|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH XTF v3 1/4] xtf-runner: split into logical components
Split the xtf-runner script file into multiple modules in order to
support multiple test types.
Features:
- 2 abstract types (TestInfo and TestInstance) to represent the
test information (info.json) and, respectively to implement the test
execution.
TestInfo has to implement the "all_instances" method to create the
list of TestInstance objects.
TestInstance has to implement "set_up", "run", and "clean-up"
methods.
- TestResult - represents an XTF test result (SUCCESS, SKIP, ERROR,
FAILURE, CRASH). The values should be kept in sync with the C code
from report.h
- Dynamic test class loading. Each info.json shoudl contain a
"class_name" field which specifies the test info class describing the
test. This value defaults to "xtf.domu_test.DomuTestInfo"
- custom test info parameters. info.json can have the "extra"
field, implemented as a dictionary, which contains parameters
specific for a certain test info class.
e.g. TEST-EXTRA-INFO := arg1='--address=0x80000000 --id=4' arg2=42
- logger class (print depending on the quiet field)
- DomuTestInfo/DomuTest instance. Simple test which loads a XEN DomU
and checks the output for a specific pattern.
- toolstack abstraction using a wrapper class (e.g.
(xtf.xl_domu.XLDomU)
Signed-off-by: Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx>
---
build/gen.mk | 13 ++-
build/mkinfo.py | 84 +++++++++++---
xtf-runner | 334 +++++-------------------------------------------------
xtf/__init__.py | 12 ++
xtf/domu_test.py | 179 +++++++++++++++++++++++++++++
xtf/exceptions.py | 6 +
xtf/logger.py | 23 ++++
xtf/suite.py | 97 ++++++++++++++++
xtf/test.py | 139 +++++++++++++++++++++++
xtf/xl_domu.py | 121 ++++++++++++++++++++
10 files changed, 687 insertions(+), 321 deletions(-)
create mode 100644 xtf/__init__.py
create mode 100644 xtf/domu_test.py
create mode 100644 xtf/exceptions.py
create mode 100644 xtf/logger.py
create mode 100644 xtf/suite.py
create mode 100644 xtf/test.py
create mode 100644 xtf/xl_domu.py
diff --git a/build/gen.mk b/build/gen.mk
index 8d7a6bf..c19ca6a 100644
--- a/build/gen.mk
+++ b/build/gen.mk
@@ -27,12 +27,23 @@ else
TEST-CFGS := $(foreach env,$(TEST-ENVS),test-$(env)-$(NAME).cfg)
endif
+CLASS ?= "xtf.domu_test.DomuTestInfo"
+
.PHONY: build
build: $(foreach env,$(TEST-ENVS),test-$(env)-$(NAME)) $(TEST-CFGS)
build: info.json
+MKINFO-OPTS := -n "$(NAME)"
+MKINFO-OPTS += -c "$(CLASS)"
+MKINFO-OPTS += -t "$(CATEGORY)"
+MKINFO-OPTS += -e "$(TEST-ENVS)"
+MKINFO-OPTS += -v "$(VARY-CFG)"
+ifneq (x$(TEST-EXTRA-INFO), x)
+MKINFO-OPTS += -x "$(TEST-EXTRA-INFO)"
+endif
+
info.json: $(ROOT)/build/mkinfo.py FORCE
- @$(PYTHON) $< $@.tmp "$(NAME)" "$(CATEGORY)" "$(TEST-ENVS)"
"$(VARY-CFG)"
+ @$(PYTHON) $< $(MKINFO-OPTS) $@.tmp
@$(call move-if-changed,$@.tmp,$@)
.PHONY: install install-each-env
diff --git a/build/mkinfo.py b/build/mkinfo.py
index 94891a9..afa355c 100644
--- a/build/mkinfo.py
+++ b/build/mkinfo.py
@@ -1,24 +1,74 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
+""" mkinfo.py
-import sys, os, json
+ Generates a test info json file.
+ The script is ran at build stage using the parameters specified
+ in the test's Makefile.
+"""
-# Usage: mkcfg.py $OUT $NAME $CATEGORY $ENVS $VARIATIONS
-_, out, name, cat, envs, variations = sys.argv
+import json
+import sys
+import shlex
+from optparse import OptionParser
-template = {
- "name": name,
- "category": cat,
- "environments": [],
- "variations": [],
- }
+def main():
+ """ Main entrypoint """
+ # Avoid wrapping the epilog text
+ OptionParser.format_epilog = lambda self, formatter: self.epilog
-if envs:
- template["environments"] = envs.split(" ")
-if variations:
- template["variations"] = variations.split(" ")
+ parser = OptionParser(
+ usage = "%prog [OPTIONS] out_file",
+ description = "Xen Test Framework json generation tool",
+ )
-open(out, "w").write(
- json.dumps(template, indent=4, separators=(',', ': '))
- + "\n"
- )
+ parser.add_option("-n", "--name", action = "store",
+ dest = "name",
+ help = "Test name",
+ )
+ parser.add_option("-c", "--class", action = "store",
+ dest = "class_name",
+ help = "Test class name",
+ )
+ parser.add_option("-t", "--category", action = "store",
+ dest = "cat",
+ help = "Test category",
+ )
+ parser.add_option("-e", "--environments", action = "store",
+ dest = "envs",
+ help = "Test environments (e.g hvm64, pv64 ...)",
+ )
+ parser.add_option("-v", "--variations", action = "store",
+ dest = "variations",
+ help = "Test variations",
+ )
+ parser.add_option("-x", "--extra", action = "store",
+ dest = "extra",
+ help = "Test specific parameters",
+ )
+
+ opts, args = parser.parse_args()
+ template = {
+ "name": opts.name,
+ "class_name": opts.class_name,
+ "category": opts.cat,
+ "environments": [],
+ "variations": [],
+ "extra": {}
+ }
+
+ if opts.envs:
+ template["environments"] = opts.envs.split(" ")
+ if opts.variations:
+ template["variations"] = opts.variations.split(" ")
+ if opts.extra:
+ template["extra"] = dict([(e.split('=',1))
+ for e in shlex.split(opts.extra)])
+
+ open(args[0], "w").write(
+ json.dumps(template, indent=4, separators=(',', ': '))
+ + "\n"
+ )
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/xtf-runner b/xtf-runner
index 172cb1d..1a4901a 100755
--- a/xtf-runner
+++ b/xtf-runner
@@ -7,154 +7,30 @@
Currently assumes the presence and availability of the `xl` toolstack.
"""
-import sys, os, os.path as path
+import os
+import sys
from optparse import OptionParser
-from subprocess import Popen, PIPE, call as subproc_call
+from subprocess import Popen, PIPE
-try:
- import json
-except ImportError:
- import simplejson as json
-
-# All results of a test, keep in sync with C code report.h.
-# Notes:
-# - WARNING is not a result on its own.
-# - CRASH isn't known to the C code, but covers all cases where a valid
-# result was not found.
-all_results = ['SUCCESS', 'SKIP', 'ERROR', 'FAILURE', 'CRASH']
+from xtf import default_categories, non_default_categories, all_categories
+from xtf import pv_environments, hvm_environments, all_environments
+from xtf.exceptions import RunnerError
+from xtf.logger import Logger
+from xtf.suite import get_all_test_info, gather_all_test_info
+from xtf.test import TestResult
# Return the exit code for different states. Avoid using 1 and 2 because
# python interpreter uses them -- see document for sys.exit.
def exit_code(state):
""" Convert a test result to an xtf-runner exit code. """
- return { "SUCCESS": 0,
- "SKIP": 3,
- "ERROR": 4,
- "FAILURE": 5,
- "CRASH": 6,
+ return { TestResult.SUCCESS: 0,
+ TestResult.SKIP: 3,
+ TestResult.ERROR: 4,
+ TestResult.FAILURE: 5,
+ TestResult.CRASH: 6,
}[state]
-# All test categories
-default_categories = set(("functional", "xsa"))
-non_default_categories = set(("special", "utility", "in-development"))
-all_categories = default_categories | non_default_categories
-
-# All test environments
-pv_environments = set(("pv64", "pv32pae"))
-hvm_environments = set(("hvm64", "hvm32pae", "hvm32pse", "hvm32"))
-all_environments = pv_environments | hvm_environments
-
-
-class RunnerError(Exception):
- """ Errors relating to xtf-runner itself """
-
-class TestInstance(object):
- """ Object representing a single test. """
-
- def __init__(self, arg):
- """ Parse and verify 'arg' as a test instance. """
- self.env, self.name, self.variation = parse_test_instance_string(arg)
-
- if self.env is None:
- raise RunnerError("No environment for '%s'" % (arg, ))
-
- if self.variation is None and
get_all_test_info()[self.name].variations:
- raise RunnerError("Test '%s' has variations, but none specified"
- % (self.name, ))
-
- def vm_name(self):
- """ Return the VM name as `xl` expects it. """
- return repr(self)
-
- def cfg_path(self):
- """ Return the path to the `xl` config file for this test. """
- return path.join("tests", self.name, repr(self) + ".cfg")
-
- def __repr__(self):
- if not self.variation:
- return "test-%s-%s" % (self.env, self.name)
- else:
- return "test-%s-%s~%s" % (self.env, self.name, self.variation)
-
- def __hash__(self):
- return hash(repr(self))
-
- def __cmp__(self, other):
- return cmp(repr(self), repr(other))
-
-
-class TestInfo(object):
- """ Object representing a tests info.json, in a more convenient form. """
-
- def __init__(self, test_json):
- """Parse and verify 'test_json'.
-
- May raise KeyError, TypeError or ValueError.
- """
-
- name = test_json["name"]
- if not isinstance(name, basestring):
- raise TypeError("Expected string for 'name', got '%s'"
- % (type(name), ))
- self.name = name
-
- cat = test_json["category"]
- if not isinstance(cat, basestring):
- raise TypeError("Expected string for 'category', got '%s'"
- % (type(cat), ))
- if not cat in all_categories:
- raise ValueError("Unknown category '%s'" % (cat, ))
- self.cat = cat
-
- envs = test_json["environments"]
- if not isinstance(envs, list):
- raise TypeError("Expected list for 'environments', got '%s'"
- % (type(envs), ))
- if not envs:
- raise ValueError("Expected at least one environment")
- for env in envs:
- if not env in all_environments:
- raise ValueError("Unknown environments '%s'" % (env, ))
- self.envs = envs
-
- variations = test_json["variations"]
- if not isinstance(variations, list):
- raise TypeError("Expected list for 'variations', got '%s'"
- % (type(variations), ))
- self.variations = variations
-
- def all_instances(self, env_filter = None, vary_filter = None):
- """Return a list of TestInstances, for each supported environment.
- Optionally filtered by env_filter. May return an empty list if
- the filter doesn't match any supported environment.
- """
-
- if env_filter:
- envs = set(env_filter).intersection(self.envs)
- else:
- envs = self.envs
-
- if vary_filter:
- variations = set(vary_filter).intersection(self.variations)
- else:
- variations = self.variations
-
- res = []
- if variations:
- for env in envs:
- for vary in variations:
- res.append(TestInstance("test-%s-%s~%s"
- % (env, self.name, vary)))
- else:
- res = [ TestInstance("test-%s-%s" % (env, self.name))
- for env in envs ]
- return res
-
- def __repr__(self):
- return "TestInfo(%s)" % (self.name, )
-
-
def parse_test_instance_string(arg):
"""Parse a test instance string.
@@ -221,47 +97,6 @@ def parse_test_instance_string(arg):
return env, name, variation
-
-# Cached test json from disk
-_all_test_info = {}
-
-def get_all_test_info():
- """ Open and collate each info.json """
-
- # Short circuit if already cached
- if _all_test_info:
- return _all_test_info
-
- for test in os.listdir("tests"):
-
- info_file = None
- try:
-
- # Ignore directories which don't have a info.json inside them
- try:
- info_file = open(path.join("tests", test, "info.json"))
- except IOError:
- continue
-
- # Ignore tests which have bad JSON
- try:
- test_info = TestInfo(json.load(info_file))
-
- if test_info.name != test:
- continue
-
- except (ValueError, KeyError, TypeError):
- continue
-
- _all_test_info[test] = test_info
-
- finally:
- if info_file:
- info_file.close()
-
- return _all_test_info
-
-
def tests_from_selection(cats, envs, tests):
"""Given a selection of possible categories, environment and tests, return
all tests within the provided parameters.
@@ -433,136 +268,25 @@ def list_tests(opts):
for sel in opts.selection:
print sel
-
-def interpret_result(logline):
- """ Interpret the final log line of a guest for a result """
-
- if not "Test result:" in logline:
- return "CRASH"
-
- for res in all_results:
- if res in logline:
- return res
-
- return "CRASH"
-
-
-def run_test_console(opts, test):
- """ Run a specific, obtaining results via xenconsole """
-
- cmd = ['xl', 'create', '-p', test.cfg_path()]
- if not opts.quiet:
- print "Executing '%s'" % (" ".join(cmd), )
-
- create = Popen(cmd, stdout = PIPE, stderr = PIPE)
- _, stderr = create.communicate()
-
- if create.returncode:
- if opts.quiet:
- print "Executing '%s'" % (" ".join(cmd), )
- print stderr
- raise RunnerError("Failed to create VM")
-
- cmd = ['xl', 'console', test.vm_name()]
- if not opts.quiet:
- print "Executing '%s'" % (" ".join(cmd), )
-
- console = Popen(cmd, stdout = PIPE)
-
- cmd = ['xl', 'unpause', test.vm_name()]
- if not opts.quiet:
- print "Executing '%s'" % (" ".join(cmd), )
-
- rc = subproc_call(cmd)
- if rc:
- if opts.quiet:
- print "Executing '%s'" % (" ".join(cmd), )
- raise RunnerError("Failed to unpause VM")
-
- stdout, _ = console.communicate()
-
- if console.returncode:
- raise RunnerError("Failed to obtain VM console")
-
- lines = stdout.splitlines()
-
- if lines:
- if not opts.quiet:
- print "\n".join(lines)
- print ""
-
- else:
- return "CRASH"
-
- return interpret_result(lines[-1])
-
-
-def run_test_logfile(opts, test):
- """ Run a specific test, obtaining results from a logfile """
-
- logpath = path.join(opts.logfile_dir,
- opts.logfile_pattern.replace("%s", str(test)))
-
- if not opts.quiet:
- print "Using logfile '%s'" % (logpath, )
-
- fd = os.open(logpath, os.O_CREAT | os.O_RDONLY, 0644)
- logfile = os.fdopen(fd)
- logfile.seek(0, os.SEEK_END)
-
- cmd = ['xl', 'create', '-F', test.cfg_path()]
- if not opts.quiet:
- print "Executing '%s'" % (" ".join(cmd), )
-
- guest = Popen(cmd, stdout = PIPE, stderr = PIPE)
-
- _, stderr = guest.communicate()
-
- if guest.returncode:
- if opts.quiet:
- print "Executing '%s'" % (" ".join(cmd), )
- print stderr
- raise RunnerError("Failed to run test")
-
- line = ""
- for line in logfile.readlines():
-
- line = line.rstrip()
- if not opts.quiet:
- print line
-
- if "Test result:" in line:
- print ""
- break
-
- logfile.close()
-
- return interpret_result(line)
-
-
def run_tests(opts):
""" Run tests """
tests = opts.selection
- if not len(tests):
+ if not tests:
raise RunnerError("No tests to run")
- run_test = { "console": run_test_console,
- "logfile": run_test_logfile,
- }.get(opts.results_mode, None)
-
- if run_test is None:
- raise RunnerError("Unknown mode '%s'" % (opts.mode, ))
-
- rc = all_results.index('SUCCESS')
+ rc = TestResult()
results = []
for test in tests:
+ res = TestResult()
+ test.set_up(opts, res)
+ if res == TestResult.SUCCESS:
+ test.run(res)
+ test.clean_up(res)
- res = run_test(opts, test)
- res_idx = all_results.index(res)
- if res_idx > rc:
- rc = res_idx
+ if res > rc:
+ rc = res
results.append(res)
@@ -571,7 +295,7 @@ def run_tests(opts):
for test, res in zip(tests, results):
print "%-40s %s" % (test, res)
- return exit_code(all_results[rc])
+ return exit_code(rc)
def main():
@@ -581,7 +305,7 @@ def main():
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 1)
# Normalise $CWD to the directory this script is in
- os.chdir(path.dirname(path.abspath(sys.argv[0])))
+ os.chdir(os.path.dirname(os.path.abspath(sys.argv[0])))
# Avoid wrapping the epilog text
OptionParser.format_epilog = lambda self, formatter: self.epilog
@@ -715,12 +439,16 @@ def main():
opts, args = parser.parse_args()
opts.args = args
+ Logger().initialize(opts)
+
+ gather_all_test_info()
+
opts.selection = interpret_selection(opts)
if opts.list_tests:
return list_tests(opts)
- else:
- return run_tests(opts)
+
+ return run_tests(opts)
if __name__ == "__main__":
diff --git a/xtf/__init__.py b/xtf/__init__.py
new file mode 100644
index 0000000..889c1d5
--- /dev/null
+++ b/xtf/__init__.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# All test categories
+default_categories = set(("functional", "xsa"))
+non_default_categories = set(("special", "utility", "in-development"))
+all_categories = default_categories | non_default_categories
+
+# All test environments
+pv_environments = set(("pv64", "pv32pae"))
+hvm_environments = set(("hvm64", "hvm32pae", "hvm32pse", "hvm32"))
+all_environments = pv_environments | hvm_environments
diff --git a/xtf/domu_test.py b/xtf/domu_test.py
new file mode 100644
index 0000000..4052167
--- /dev/null
+++ b/xtf/domu_test.py
@@ -0,0 +1,179 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Basic DomU test
+Runs a domain and checks the output for a spcific pattern.
+"""
+
+import os
+import StringIO
+
+from xtf import all_environments
+from xtf.exceptions import RunnerError
+from xtf.logger import Logger
+from xtf.test import TestInstance, TestInfo, TestResult
+from xtf.xl_domu import XLDomU
+
+class DomuTestInstance(TestInstance):
+ """ Object representing a single DOMU test. """
+
+ def __init__(self, env, name, variation):
+ super(DomuTestInstance, self).__init__(name)
+
+ self.env, self.variation = env, variation
+
+ if self.env is None:
+ raise RunnerError("No environment for '%s'" % (self.name, ))
+
+ self.domu = XLDomU(self.cfg_path())
+ self.results_mode = 'console'
+ self.logpath = None
+ if not Logger().quiet:
+ self.output = StringIO.StringIO()
+ else:
+ self.output = None
+
+ def vm_name(self):
+ """ Return the VM name as `xl` expects it. """
+ return repr(self)
+
+ def cfg_path(self):
+ """ Return the path to the `xl` config file for this test. """
+ return os.path.join("tests", self.name, repr(self) + ".cfg")
+
+ def __repr__(self):
+ if self.variation:
+ return "test-%s-%s~%s" % (self.env, self.name, self.variation)
+ return "test-%s-%s" % (self.env, self.name)
+
+ def set_up(self, opts, result):
+ self.results_mode = opts.results_mode
+ if self.results_mode not in ['console', 'logfile']:
+ raise RunnerError("Unknown mode '%s'" % (opts.results_mode, ))
+
+ self.logpath = os.path.join(opts.logfile_dir,
+ opts.logfile_pattern.replace("%s", str(self)))
+ self.domu.create()
+
+ def run(self, result):
+ """Executes the test instance"""
+ run_test = { "console": self._run_test_console,
+ "logfile": self._run_test_logfile,
+ }.get(self.results_mode, None)
+
+ run_test(result)
+
+ def clean_up(self, result):
+ if self.output:
+ self.output.close()
+
+ # wait for completion
+ if not self.domu.cleanup():
+ result.set(TestResult.CRASH)
+
+ def _run_test_console(self, result):
+ """ Run a specific, obtaining results via xenconsole """
+
+ console = self.domu.console(self.output)
+
+ # start the domain
+ self.domu.unpause()
+ value = console.expect(self.result_pattern())
+
+ if self.output is not None:
+ Logger().log(self.output.getvalue())
+
+ result.set(value)
+
+ def _run_test_logfile(self, result):
+ """ Run a specific test, obtaining results from a logfile """
+
+ Logger().log("Using logfile '%s'" % (self.logpath, ))
+
+ fd = os.open(self.logpath, os.O_CREAT | os.O_RDONLY, 0644)
+ logfile = os.fdopen(fd)
+ logfile.seek(0, os.SEEK_END)
+
+ self.domu.unpause()
+
+ # wait for completion
+ if not self.domu.cleanup():
+ result.set(TestResult.CRASH)
+
+ line = ""
+ for line in logfile.readlines():
+ line = line.rstrip()
+ Logger().log(line)
+
+ if "Test result:" in line:
+ print ""
+ break
+
+ logfile.close()
+
+ result.set(TestInstance.parse_result(line))
+
+
+class DomuTestInfo(TestInfo):
+ """ Object representing a tests info.json, in a more convenient form. """
+
+ def __init__(self, test_json):
+ """Parse and verify 'test_json'.
+
+ May raise KeyError, TypeError or ValueError.
+ """
+
+ super(DomuTestInfo, self).__init__(test_json)
+ self.instance_class = DomuTestInstance
+
+ envs = test_json["environments"]
+ if not isinstance(envs, list):
+ raise TypeError("Expected list for 'environments', got '%s'"
+ % (type(envs), ))
+ if not envs:
+ raise ValueError("Expected at least one environment")
+ for env in envs:
+ if env not in all_environments:
+ raise ValueError("Unknown environments '%s'" % (env, ))
+ self.envs = envs
+
+ variations = test_json["variations"]
+ if not isinstance(variations, list):
+ raise TypeError("Expected list for 'variations', got '%s'"
+ % (type(variations), ))
+ self.variations = variations
+
+ extra = test_json["extra"]
+ if not isinstance(extra, dict):
+ raise TypeError("Expected dict for 'extra', got '%s'"
+ % (type(extra), ))
+ self.extra = extra
+
+ def all_instances(self, env_filter = None, vary_filter = None):
+ """Return a list of TestInstances, for each supported environment.
+ Optionally filtered by env_filter. May return an empty list if
+ the filter doesn't match any supported environment.
+ """
+
+ if env_filter:
+ envs = set(env_filter).intersection(self.envs)
+ else:
+ envs = self.envs
+
+ if vary_filter:
+ variations = set(vary_filter).intersection(self.variations)
+ else:
+ variations = self.variations
+
+ res = []
+ if variations:
+ for env in envs:
+ for vary in variations:
+ res.append(self.instance_class(env, self.name, vary))
+ else:
+ res = [ self.instance_class(env, self.name, None)
+ for env in envs ]
+ return res
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, self.name, )
diff --git a/xtf/exceptions.py b/xtf/exceptions.py
new file mode 100644
index 0000000..26801a2
--- /dev/null
+++ b/xtf/exceptions.py
@@ -0,0 +1,6 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+class RunnerError(Exception):
+ """ Errors relating to xtf-runner itself """
+
diff --git a/xtf/logger.py b/xtf/logger.py
new file mode 100644
index 0000000..ec279e5
--- /dev/null
+++ b/xtf/logger.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+class Singleton(type):
+ """Singleton meta class"""
+ _instances = {}
+ def __call__(cls, *args, **kwargs):
+ if cls not in cls._instances:
+ cls._instances[cls] = super(Singleton, cls).__call__(*args,
**kwargs)
+ return cls._instances[cls]
+
+class Logger(object):
+ """Logger class for XTF."""
+ __metaclass__ = Singleton
+
+ def initialize(self, opts):
+ """Initialize logger"""
+ self.quiet = opts.quiet
+
+ def log(self, message):
+ """Display the message"""
+ if not self.quiet:
+ print message
diff --git a/xtf/suite.py b/xtf/suite.py
new file mode 100644
index 0000000..ad7d30f
--- /dev/null
+++ b/xtf/suite.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import os, os.path as path
+import sys
+import imp
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+from xtf.exceptions import RunnerError
+
+# Cached test json from disk
+_all_test_info = {}
+
+def _load_module(name):
+ """Loads module dynamically"""
+ components = name.split(".")
+ module_path = sys.path
+
+ for index in xrange(len(components)):
+ module_name = components[index]
+ module = sys.modules.get(module_name)
+ if module:
+ if hasattr(module, '__path__'):
+ module_path = module.__path__
+ continue
+
+ try:
+ mod_file, filename, description = imp.find_module(module_name,
+ module_path)
+ module = imp.load_module(module_name, mod_file, filename,
+ description)
+ if hasattr(module, '__path__'):
+ module_path = module.__path__
+ finally:
+ if mod_file:
+ mod_file.close()
+
+ return module
+
+def _load_class(name):
+ """Loads python class dynamically"""
+ components = name.split(".")
+ class_name = components[-1]
+ module = _load_module(".".join(components[:-1]))
+
+ try:
+ cls = module.__dict__[class_name]
+ return cls
+ except KeyError:
+ return None
+
+
+def get_all_test_info():
+ """ Returns all available test info instances """
+
+ if not _all_test_info:
+ raise RunnerError("No available test info")
+
+ return _all_test_info
+
+
+def gather_all_test_info():
+ """ Open and collate each info.json """
+
+ for test in os.listdir("tests"):
+
+ info_file = None
+ try:
+
+ # Ignore directories which don't have a info.json inside them
+ try:
+ info_file = open(path.join("tests", test, "info.json"))
+ except IOError:
+ continue
+
+ # Ignore tests which have bad JSON
+ try:
+ json_info = json.load(info_file)
+ test_class = _load_class(json_info["class_name"])
+ test_info = test_class(json_info)
+
+ if test_info.name != test:
+ continue
+
+ except (ValueError, KeyError, TypeError):
+ continue
+
+ _all_test_info[test] = test_info
+
+ finally:
+ if info_file:
+ info_file.close()
+
diff --git a/xtf/test.py b/xtf/test.py
new file mode 100644
index 0000000..4440b47
--- /dev/null
+++ b/xtf/test.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Base XTF Test Classess
+"""
+import pexpect
+from xtf import all_categories
+
+class TestResult(object):
+ """
+ Test result wrapper class
+ All results of a test, keep in sync with C code report.h.
+ Notes:
+ - WARNING is not a result on its own.
+ - CRASH isn't known to the C code, but covers all cases where a valid
+ result was not found.
+ """
+
+ SUCCESS = 'SUCCESS'
+ SKIP = 'SKIP'
+ ERROR = 'ERROR'
+ FAILURE = 'FAILURE'
+ CRASH = 'CRASH'
+
+ all_results = [SUCCESS, SKIP, ERROR, FAILURE, CRASH]
+
+ def __init__(self, value=SUCCESS):
+ self.set(value)
+
+ def __cmp__(self, other):
+ if isinstance(other, TestResult):
+ return cmp(TestResult.all_results.index(self._value),
+ TestResult.all_results.index(repr(other)))
+ elif isinstance(other, (str, unicode)):
+ if other in TestResult.all_results:
+ return cmp(TestResult.all_results.index(self._value),
+ TestResult.all_results.index(other))
+
+ raise ValueError
+
+ def __repr__(self):
+ return self._value
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def set(self, value):
+ """
+ The result can be set using both a string value or an index
+ if the index used is out-of-bounds the result will be initialized
+ to CRASH
+ """
+ if isinstance(value, (int, long)):
+ try:
+ self._value = TestResult.all_results[value]
+ except IndexError:
+ self._value = TestResult.CRASH
+ else:
+ if value in TestResult.all_results:
+ self._value = value
+ else:
+ self._value = TestResult.CRASH
+
+
+class TestInstance(object):
+ """Base class for a XTF Test Instance object"""
+
+ @staticmethod
+ def parse_result(logline):
+ """ Interpret the final log line of a guest for a result """
+
+ if "Test result:" not in logline:
+ return TestResult.CRASH
+
+ for res in TestResult.all_results:
+ if res in logline:
+ return res
+
+ return TestResult.CRASH
+
+ @staticmethod
+ def result_pattern():
+ """the test result pattern."""
+ return ['Test result: ' + x for x in TestResult.all_results] + \
+ [pexpect.TIMEOUT, pexpect.EOF]
+
+ def __init__(self, name):
+ self.name = name
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def __cmp__(self, other):
+ return cmp(repr(self), repr(other))
+
+ def set_up(self, opts, result):
+ """Sets up the necessary resources needed to run the test."""
+ raise NotImplementedError
+
+ def run(self, result):
+ """Runs the Test Instance."""
+ raise NotImplementedError
+
+ def clean_up(self, result):
+ """Cleans up the test data."""
+ raise NotImplementedError
+
+
+class TestInfo(object):
+ """Base class for a XTF Test Info object.
+ It represents a tests info.json, in a more convenient form.
+ """
+
+ def __init__(self, test_json):
+ """Parse and verify 'test_json'.
+
+ May raise KeyError, TypeError or ValueError.
+ """
+ name = test_json["name"]
+ if not isinstance(name, basestring):
+ raise TypeError("Expected string for 'name', got '%s'"
+ % (type(name), ))
+ self.name = name
+
+ cat = test_json["category"]
+ if not isinstance(cat, basestring):
+ raise TypeError("Expected string for 'category', got '%s'"
+ % (type(cat), ))
+ if cat not in all_categories:
+ raise ValueError("Unknown category '%s'" % (cat, ))
+ self.cat = cat
+
+ def all_instances(self, env_filter = None, vary_filter = None):
+ """Return a list of TestInstances, for each supported environment.
+ Optionally filtered by env_filter. May return an empty list if
+ the filter doesn't match any supported environment.
+ """
+ raise NotImplementedError
diff --git a/xtf/xl_domu.py b/xtf/xl_domu.py
new file mode 100644
index 0000000..f76dbfe
--- /dev/null
+++ b/xtf/xl_domu.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""XL DomU class"""
+########################################################################
+# Imports
+########################################################################
+
+import imp
+import os.path
+import time
+
+from subprocess import Popen, PIPE
+
+import pexpect
+
+from xtf.exceptions import RunnerError
+from xtf.logger import Logger
+
+########################################################################
+# Functions
+########################################################################
+
+def _run_cmd(args, quiet=False):
+ """Execute command using Popen"""
+ proc = Popen(args, stdout = PIPE, stderr = PIPE)
+ if not quiet:
+ Logger().log("Executing '%s'" % (" ".join(args), ))
+ _, stderr = proc.communicate()
+ return proc.returncode, _, stderr
+
+def _xl_create(xl_conf_file, paused, fg):
+ """Creates a XEN Domain using the XL toolstack"""
+ args = ['xl', 'create']
+ if paused:
+ args.append('-p')
+ if fg:
+ args.append('-F')
+ args.append(xl_conf_file)
+ ret, _, stderr = _run_cmd(args)
+ if ret:
+ raise RunnerError("_xl_create", ret, _, stderr)
+
+def _xl_dom_id(xl_dom_name):
+ """Returns the ID of a XEN domain specified by name"""
+ args = ['xl', 'domid', xl_dom_name]
+ ret, _, stderr = _run_cmd(args)
+ if ret:
+ raise RunnerError("_xl_dom_id", ret, _, stderr)
+ return long(_)
+
+def _xl_destroy(domid):
+ """Destroy the domain specified by domid"""
+ args = ['xl', 'destroy', str(domid)]
+ ret, _, stderr = _run_cmd(args)
+ if ret:
+ raise RunnerError("_xl_destroy", ret, _, stderr)
+
+def _xl_unpause(domid):
+ """Unpauses the domain specified by domid"""
+ args = ['xl', 'unpause', str(domid)]
+ ret, _, stderr = _run_cmd(args)
+ if ret:
+ raise RunnerError("_xl_unpause", ret, _, stderr)
+
+def _is_alive(domid):
+ """Checks if the domain is alive using xenstore."""
+ args = ['xenstore-exists', os.path.join('/local/domain', str(domid))]
+ ret = _run_cmd(args, True)[0]
+ return ret == 0
+
+
+########################################################################
+# Classes
+########################################################################
+
+class XLDomU(object):
+ """XEN DomU implementation using the XL toolstack"""
+
+ def __init__(self, conf):
+ super(XLDomU, self).__init__()
+ self.__xl_conf_file = conf
+ self.dom_id = 0
+ code = open(conf)
+ self.__config = imp.new_module(conf)
+ exec code in self.__config.__dict__
+ self.__console = None
+
+ def create(self, paused=True, fg=False):
+ """Creates the XEN domain."""
+ _xl_create(self.__xl_conf_file, paused, fg)
+ self.dom_id = _xl_dom_id(self.__config.name)
+
+ def cleanup(self, timeout=10):
+ """Destroys the domain."""
+
+ if self.dom_id == 0:
+ return True
+
+ for _ in xrange(timeout):
+ if not _is_alive(self.dom_id):
+ return True
+ time.sleep(1)
+
+ if _is_alive(self.dom_id):
+ _xl_destroy(self.dom_id)
+ self.dom_id = 0
+ return False
+
+ return True
+
+ def unpause(self):
+ """Unpauses the domain."""
+ _xl_unpause(self.dom_id)
+
+ def console(self, logfile=None):
+ """Creates the domain_console handler."""
+ if self.__console is None:
+ self.__console = pexpect.spawn('xl', ['console', str(self.dom_id)],
+ logfile=logfile)
+ return self.__console
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |