[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 7 of 7] xenpaging: start xenpaging via config option
# HG changeset patch # User Olaf Hering <olaf@xxxxxxxxx> # Date 1301591717 -7200 # Node ID 93889c2c6aad3d8ab9c02da4197e9645a9a1aae2 # Parent 1d040925ea0dc5c01f7cf2c188ab01da48028f92 xenpaging: start xenpaging via config option Start xenpaging via config option. TODO: add libxl support TODO: parse config values like 42K, 42M, 42G, 42% Signed-off-by: Olaf Hering <olaf@xxxxxxxxx> --- v4: add config option for pagefile directory add config option to enable debug add config option to set polic mru_size fail if chdir fails force self.xenpaging* variables to be strings because a xm new may turn some of them into type int and later os.execve fails with a TypeError v3: decouple create/destroycreateXenPaging from _create/_removeDevices init xenpaging variable to 0 if xenpaging is not in config file to avoid string None coming from sxp file v2: unlink logfile instead of truncating it. allows hardlinking for further inspection diff -r 1d040925ea0d -r 93889c2c6aad tools/examples/xmexample.hvm --- a/tools/examples/xmexample.hvm Thu Mar 31 19:14:01 2011 +0200 +++ b/tools/examples/xmexample.hvm Thu Mar 31 19:15:17 2011 +0200 @@ -127,6 +127,18 @@ # Device Model to be used device_model = 'qemu-dm' +# number of guest pages to page-out, or -1 for entire guest memory range +xenpaging=42 + +# directory to store guest page file +#xenpaging_workdir="/var/lib/xen/xenpaging" + +# enable debug output in pager +#xenpaging_debug=0 + +# number of paged-in pages to keep in memory +#xenpaging_policy_mru_size=1024 + #----------------------------------------------------------------------------- # boot on floppy (a), hard disk (c), Network (n) or CD-ROM (d) # default: hard disk, cd-rom, floppy diff -r 1d040925ea0d -r 93889c2c6aad tools/python/README.XendConfig --- a/tools/python/README.XendConfig Thu Mar 31 19:14:01 2011 +0200 +++ b/tools/python/README.XendConfig Thu Mar 31 19:15:17 2011 +0200 @@ -120,6 +120,10 @@ image.vncdisplay image.vncunused image.hvm.device_model + image.hvm.xenpaging + image.hvm.xenpaging_workdir + image.hvm.xenpaging_debug + image.hvm.xenpaging_policy_mru_size image.hvm.display image.hvm.xauthority image.hvm.vncconsole diff -r 1d040925ea0d -r 93889c2c6aad tools/python/README.sxpcfg --- a/tools/python/README.sxpcfg Thu Mar 31 19:14:01 2011 +0200 +++ b/tools/python/README.sxpcfg Thu Mar 31 19:15:17 2011 +0200 @@ -51,6 +51,10 @@ - vncunused (HVM) - device_model + - xenpaging + - xenpaging_workdir + - xenpaging_debug + - xenpaging_policy_mru_size - display - xauthority - vncconsole diff -r 1d040925ea0d -r 93889c2c6aad tools/python/xen/xend/XendConfig.py --- a/tools/python/xen/xend/XendConfig.py Thu Mar 31 19:14:01 2011 +0200 +++ b/tools/python/xen/xend/XendConfig.py Thu Mar 31 19:15:17 2011 +0200 @@ -147,6 +147,10 @@ 'apic': int, 'boot': str, 'device_model': str, + 'xenpaging': str, + 'xenpaging_workdir': str, + 'xenpaging_debug': str, + 'xenpaging_policy_mru_size': str, 'loader': str, 'display' : str, 'fda': str, @@ -512,6 +516,14 @@ self['platform']['nomigrate'] = 0 if self.is_hvm(): + if 'xenpaging' not in self['platform']: + self['platform']['xenpaging'] = "0" + if 'xenpaging_workdir' not in self['platform']: + self['platform']['xenpaging_workdir'] = "/var/lib/xen/xenpaging" + if 'xenpaging_debug' not in self['platform']: + self['platform']['xenpaging_debug'] = "0" + if 'xenpaging_policy_mru_size' not in self['platform']: + self['platform']['xenpaging_policy_mru_size'] = "0" if 'timer_mode' not in self['platform']: self['platform']['timer_mode'] = 1 if 'viridian' not in self['platform']: diff -r 1d040925ea0d -r 93889c2c6aad tools/python/xen/xend/XendDomainInfo.py --- a/tools/python/xen/xend/XendDomainInfo.py Thu Mar 31 19:14:01 2011 +0200 +++ b/tools/python/xen/xend/XendDomainInfo.py Thu Mar 31 19:15:17 2011 +0200 @@ -2246,6 +2246,8 @@ self.info['name_label'], self.domid, self.info['uuid'], new_name, new_uuid) self._unwatchVm() + if self.image: + self.image.destroyXenPaging() self._releaseDevices() # Remove existing vm node in xenstore self._removeVm() @@ -2913,6 +2915,9 @@ self._createDevices() + if self.image: + self.image.createXenPaging() + self.image.cleanupTmpImages() self.info['start_time'] = time.time() @@ -2937,6 +2942,8 @@ self.refresh_shutdown_lock.acquire() try: self.unwatchShutdown() + if self.image: + self.image.destroyXenPaging() self._releaseDevices() bootloader_tidy(self) @@ -3016,6 +3023,7 @@ self.image = image.create(self, self.info) if self.image: self.image.createDeviceModel(True) + self.image.createXenPaging() self._storeDomDetails() self._registerWatches() self.refreshShutdown() @@ -3151,6 +3159,8 @@ # could also fetch a parsed note from xenstore fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0 if not fast: + if self.image: + self.image.destroyXenPaging() self._releaseDevices() self.testDeviceComplete() self.testvifsComplete() @@ -3166,6 +3176,8 @@ self._storeDomDetails() self._createDevices() + if self.image: + self.image.createXenPaging() log.debug("XendDomainInfo.resumeDomain: devices created") xc.domain_resume(self.domid, fast) diff -r 1d040925ea0d -r 93889c2c6aad tools/python/xen/xend/image.py --- a/tools/python/xen/xend/image.py Thu Mar 31 19:14:01 2011 +0200 +++ b/tools/python/xen/xend/image.py Thu Mar 31 19:15:17 2011 +0200 @@ -122,6 +122,11 @@ self.vm.permissionsVm("image/cmdline", { 'dom': self.vm.getDomid(), 'read': True } ) self.device_model = vmConfig['platform'].get('device_model') + self.xenpaging = str(vmConfig['platform'].get('xenpaging')) + self.xenpaging_workdir = str(vmConfig['platform'].get('xenpaging_workdir')) + self.xenpaging_debug = str(vmConfig['platform'].get('xenpaging_debug')) + self.xenpaging_policy_mru_size = str(vmConfig['platform'].get('xenpaging_policy_mru_size')) + self.xenpaging_pid = None self.display = vmConfig['platform'].get('display') self.xauthority = vmConfig['platform'].get('xauthority') @@ -392,6 +397,88 @@ sentinel_fifos_inuse[sentinel_path_fifo] = 1 self.sentinel_path_fifo = sentinel_path_fifo + def createXenPaging(self): + if not self.vm.info.is_hvm(): + return + if self.xenpaging == "0": + return + if self.xenpaging_pid: + return + xenpaging_bin = auxbin.pathTo("xenpaging") + args = [xenpaging_bin] + args = args + ([ "%d" % self.vm.getDomid()]) + args = args + ([ "%s" % self.xenpaging]) + env = dict(os.environ) + if not self.xenpaging_debug == "0": + env['XENPAGING_DEBUG'] = self.xenpaging_debug + if not self.xenpaging_policy_mru_size == "0": + env['XENPAGING_POLICY_MRU_SIZE'] = self.xenpaging_policy_mru_size + self.xenpaging_logfile = "/var/log/xen/xenpaging-%s.log" % str(self.vm.info['name_label']) + logfile_mode = os.O_WRONLY|os.O_CREAT|os.O_APPEND|os.O_TRUNC + null = os.open("/dev/null", os.O_RDONLY) + try: + os.unlink(self.xenpaging_logfile) + except: + pass + logfd = os.open(self.xenpaging_logfile, logfile_mode, 0644) + sys.stderr.flush() + contract = osdep.prefork("%s:%d" % (self.vm.getName(), self.vm.getDomid())) + xenpaging_pid = os.fork() + if xenpaging_pid == 0: #child + try: + osdep.postfork(contract) + os.dup2(null, 0) + os.dup2(logfd, 1) + os.dup2(logfd, 2) + os.chdir(self.xenpaging_workdir) + try: + log.info("starting %s" % args) + os.execve(xenpaging_bin, args, env) + except Exception, e: + log.warn('failed to execute xenpaging: %s' % utils.exception_string(e)) + os._exit(126) + except: + log.warn("starting xenpaging in %s failed" % self.xenpaging_workdir) + os._exit(127) + else: + osdep.postfork(contract, abandon=True) + self.xenpaging_pid = xenpaging_pid + os.close(null) + os.close(logfd) + + def destroyXenPaging(self): + if self.xenpaging == "0": + return + if self.xenpaging_pid: + try: + os.kill(self.xenpaging_pid, signal.SIGHUP) + except OSError, exn: + log.exception(exn) + for i in xrange(100): + try: + (p, rv) = os.waitpid(self.xenpaging_pid, os.WNOHANG) + if p == self.xenpaging_pid: + break + except OSError: + # This is expected if Xend has been restarted within + # the life of this domain. In this case, we can kill + # the process, but we can't wait for it because it's + # not our child. We continue this loop, and after it is + # terminated make really sure the process is going away + # (SIGKILL). + pass + time.sleep(0.1) + else: + log.warning("xenpaging %d took more than 10s " + "to terminate: sending SIGKILL" % self.xenpaging_pid) + try: + os.kill(self.xenpaging_pid, signal.SIGKILL) + os.waitpid(self.xenpaging_pid, 0) + except OSError: + # This happens if the process doesn't exist. + pass + self.xenpaging_pid = None + def createDeviceModel(self, restore = False): if self.device_model is None: return diff -r 1d040925ea0d -r 93889c2c6aad tools/python/xen/xm/create.py --- a/tools/python/xen/xm/create.py Thu Mar 31 19:14:01 2011 +0200 +++ b/tools/python/xen/xm/create.py Thu Mar 31 19:15:17 2011 +0200 @@ -491,6 +491,22 @@ fn=set_value, default=None, use="Set the path of the root NFS directory.") +gopts.var('xenpaging', val='NUM', + fn=set_value, default='0', + use="Number of pages to swap.") + +gopts.var('xenpaging_workdir', val='PATH', + fn=set_value, default='/var/lib/xen/xenpaging', + use="Number of pages to swap.") + +gopts.var('xenpaging_debug', val='NUM', + fn=set_value, default='0', + use="Number of pages to swap.") + +gopts.var('xenpaging_policy_mru_size', val='NUM', + fn=set_value, default='0', + use="Number of pages to swap.") + gopts.var('device_model', val='FILE', fn=set_value, default=None, use="Path to device model program.") @@ -1076,6 +1092,10 @@ args = [ 'acpi', 'apic', 'boot', 'cpuid', 'cpuid_check', + 'xenpaging', + 'xenpaging_workdir', + 'xenpaging_debug', + 'xenpaging_policy_mru_size', 'device_model', 'display', 'fda', 'fdb', 'gfx_passthru', 'guest_os_type', diff -r 1d040925ea0d -r 93889c2c6aad tools/python/xen/xm/xenapi_create.py --- a/tools/python/xen/xm/xenapi_create.py Thu Mar 31 19:14:01 2011 +0200 +++ b/tools/python/xen/xm/xenapi_create.py Thu Mar 31 19:15:17 2011 +0200 @@ -1085,6 +1085,10 @@ 'acpi', 'apic', 'boot', + 'xenpaging', + 'xenpaging_workdir', + 'xenpaging_debug', + 'xenpaging_policy_mru_size', 'device_model', 'loader', 'fda', _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |