|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 19/59] Add basic libxl framework, get domain cpu_time
From: George Dunlap <george.dunlap@xxxxxxxxxx>
Introduce libxl "Context" class with open, close, and dominfo.
Create a global variable in xenworker.go to hold the context; open on
first worker creation, close on last worker destruction.
Add a new element to WorkerReport, Cputime, and print it out.
For now, include hard-coded link to local Xen libraries. This should
be sorted out at some point.
Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
Makefile | 13 +++++--
benchmark.go | 2 ++
libxl.go | 116 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
run.go | 2 +-
xenworker.go | 26 ++++++++++++++
5 files changed, 156 insertions(+), 3 deletions(-)
create mode 100644 libxl.go
diff --git a/Makefile b/Makefile
index 2e06f87..54f2ce8 100644
--- a/Makefile
+++ b/Makefile
@@ -4,8 +4,17 @@ BINALL = $(BIN)
.PHONY: all
all: $(BIN)
-schedbench: main.go processworker.go xenworker.go benchmark.go run.go
- go build -o $@ $^
+
+
+CGO_CFLAGS = -I/build/hg/xen.git/dist/install/usr/local/include
+CGO_LIBS = -lyajl -lxenlight
+
+# FIXME
+XENLIB_PATH ?= /build/hg/xen.git/dist/install/usr/local/lib/
+CGO_LDFLAGS = -L$(XENLIB_PATH) -Wl,-rpath-link=$(XENLIB_PATH) $(CGO_LIBS)
+
+schedbench: main.go processworker.go xenworker.go benchmark.go run.go libxl.go
+ CGO_LDFLAGS="$(CGO_LDFLAGS)" CGO_CFLAGS="$(CGO_CFLAGS)" go build -o $@
$^
.PHONY: clean
clean:
diff --git a/benchmark.go b/benchmark.go
index 7fa83d2..4b2d805 100644
--- a/benchmark.go
+++ b/benchmark.go
@@ -24,6 +24,7 @@ import (
"io/ioutil"
"encoding/json"
"math"
+ "time"
)
type WorkerId struct {
@@ -40,6 +41,7 @@ type WorkerReport struct {
Now int
Mops int
MaxDelta int
+ Cputime time.Duration
}
type WorkerParams struct {
diff --git a/libxl.go b/libxl.go
new file mode 100644
index 0000000..39e47ab
--- /dev/null
+++ b/libxl.go
@@ -0,0 +1,116 @@
+package main
+
+/*
+#include <libxl.h>
+*/
+import "C"
+
+import (
+ "unsafe"
+ "fmt"
+ "time"
+)
+
+type Context struct {
+ ctx *C.libxl_ctx
+}
+
+func NewContext() (Ctx *Context, err error) {
+ Ctx = &Context{}
+
+ err = Ctx.Open()
+
+ return
+}
+
+func (Ctx *Context) IsOpen() bool {
+ return Ctx.ctx != nil
+}
+
+func (Ctx *Context) Open() (err error) {
+ ret := C.libxl_ctx_alloc(unsafe.Pointer(&Ctx.ctx), C.LIBXL_VERSION, 0,
nil)
+
+ if ret != 0 {
+ err = fmt.Errorf("Allocating libxl context: %d", ret)
+ }
+ return
+}
+
+func (Ctx *Context) Close() (err error) {
+ ret := C.libxl_ctx_free(unsafe.Pointer(Ctx.ctx))
+ Ctx.ctx = nil
+
+ if ret != 0 {
+ err = fmt.Errorf("Freeing libxl context: %d", ret)
+ }
+ return
+}
+
+type Domid uint32
+
+type MemKB uint64
+
+// FIXME: Use the idl to generate types
+type Dominfo struct {
+ // FIXME: uuid
+ Domid Domid
+ Running bool
+ Blocked bool
+ Paused bool
+ Shutdown bool
+ Dying bool
+ Never_stop bool
+
+ Shutdown_reason int32 // FIXME shutdown_reason enumeration
+ Outstanding_memkb MemKB
+ Current_memkb MemKB
+ Shared_memkb MemKB
+ Paged_memkb MemKB
+ Max_memkb MemKB
+ Cpu_time time.Duration
+ Vcpu_max_id uint32
+ Vcpu_online uint32
+ Cpupool uint32
+ Domain_type int32 //FIXME libxl_domain_type enumeration
+
+}
+
+func (Ctx *Context) DomainInfo(Id Domid) (di *Dominfo, err error) {
+ if Ctx.ctx == nil {
+ err = fmt.Errorf("Context not opened")
+ return
+ }
+
+
+ var cdi C.libxl_dominfo
+
+ ret := C.libxl_domain_info(Ctx.ctx, unsafe.Pointer(&cdi),
C.uint32_t(Id))
+
+ // FIXME: IsDomainNotPresentError
+ if ret != 0 {
+ err = fmt.Errorf("libxl_domain_info failed: %d", ret)
+ return
+ }
+
+ // FIXME -- use introspection to make this more robust
+ di = &Dominfo{}
+ di.Domid = Domid(cdi.domid)
+ di.Running = bool(cdi.running)
+ di.Blocked = bool(cdi.blocked)
+ di.Paused = bool(cdi.paused)
+ di.Shutdown = bool(cdi.shutdown)
+ di.Dying = bool(cdi.dying)
+ di.Never_stop = bool(cdi.never_stop)
+ di.Shutdown_reason = int32(cdi.shutdown_reason)
+ di.Outstanding_memkb = MemKB(cdi.outstanding_memkb)
+ di.Current_memkb = MemKB(cdi.current_memkb)
+ di.Shared_memkb = MemKB(cdi.shared_memkb)
+ di.Paged_memkb = MemKB(cdi.paged_memkb)
+ di.Max_memkb = MemKB(cdi.max_memkb)
+ di.Cpu_time = time.Duration(cdi.cpu_time)
+ di.Vcpu_max_id = uint32(cdi.vcpu_max_id)
+ di.Vcpu_online = uint32(cdi.vcpu_online)
+ di.Cpupool = uint32(cdi.cpupool)
+ di.Domain_type = int32(cdi.domain_type)
+ return
+}
diff --git a/run.go b/run.go
index 9f1edcf..788c541 100644
--- a/run.go
+++ b/run.go
@@ -41,7 +41,7 @@ func Report(ws *WorkerState, r WorkerReport) {
tput := Throughput(lr.Now, lr.Mops, r.Now, r.Mops)
- fmt.Printf("%v Time: %2.3f Mops: %d Tput: %4.2f\n", r.Id, time,
mops, tput);
+ fmt.Printf("%v Time: %2.3f Mops: %d Tput: %4.2f Cputime: %v\n",
r.Id, time, mops, tput, r.Cputime);
}
ws.LastReport = r
diff --git a/xenworker.go b/xenworker.go
index 4d42e5e..31af35f 100644
--- a/xenworker.go
+++ b/xenworker.go
@@ -27,8 +27,16 @@ import (
"io"
)
+type xenGlobal struct {
+ Ctx Context
+ count int
+}
+
+var xg xenGlobal
+
type XenWorker struct {
id WorkerId
+ Ctx Context
vmname string
domid int
consoleCmd *exec.Cmd
@@ -59,6 +67,14 @@ func (w *XenWorker) SetId(i WorkerId) {
}
func (w *XenWorker) Init(p WorkerParams, g WorkerConfig) (err error) {
+ if xg.count == 0 {
+ err = xg.Ctx.Open()
+ if err != nil {
+ return
+ }
+ }
+ xg.count++
+
mock := false
// Make xl config file
@@ -202,6 +218,11 @@ func (w *XenWorker) Shutdown() {
e.Stdout = os.Stdout
e.Stderr = os.Stderr
+ xg.count--
+ if xg.count == 0 {
+ defer xg.Ctx.Close()
+ }
+
err := e.Run()
if err != nil {
fmt.Printf("Error destroying domain: %v\n", err)
@@ -237,6 +258,11 @@ func (w *XenWorker) Process(report chan WorkerReport, done
chan bool) {
var r WorkerReport
json.Unmarshal([]byte(s), &r)
r.Id = w.id
+ di, err := xg.Ctx.DomainInfo(Domid(w.domid))
+ // Ignore errors for now
+ if err == nil {
+ r.Cputime = di.Cpu_time
+ }
report <- r
} else {
if s == "START JSON" {
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |