[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/2] Add new data structure to track ranges.
This patch introduces a new data structure, struct rb_rangeset, to represent a group of continuous ranges, e.g. the start and end addresses for PIO/MMIO regions. By now, this structure is supposed to assist ioreq server to forward the I/O request to backend device models more efficiently. Behavior of this new data structure is quite similar to rangeset, with major difference being the time complexity. Based on doubly linked list, struct rangeset provides O(n) time complexity for searching. And struct rb_rangeset is based on red-black tree, with binary searching, the time complexity is improved to O(log(n)) - more suitable to track massive discrete ranges. Ioreq server code is changed to utilize this new type, and a new routine, hvm_ioreq_server_dump_range_info, is added to dump all the ranges tracked in an ioreq server. Signed-off-by: Yu Zhang <yu.c.zhang@xxxxxxxxxxxxxxx> --- xen/arch/x86/domain.c | 3 + xen/arch/x86/hvm/hvm.c | 56 ++++++-- xen/common/Makefile | 1 + xen/common/rb_rangeset.c | 281 +++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/hvm/domain.h | 2 +- xen/include/asm-x86/hvm/hvm.h | 1 + xen/include/xen/rb_rangeset.h | 49 +++++++ 7 files changed, 378 insertions(+), 15 deletions(-) create mode 100644 xen/common/rb_rangeset.c create mode 100644 xen/include/xen/rb_rangeset.h diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index a8fe046..f8a8b80 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -2086,6 +2086,9 @@ int domain_relinquish_resources(struct domain *d) void arch_dump_domain_info(struct domain *d) { paging_dump_domain_info(d); + + if ( is_hvm_domain(d) ) + hvm_ioreq_server_dump_range_info(d); } void arch_dump_vcpu_info(struct vcpu *v) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 535d622..c79676e 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -37,6 +37,7 @@ #include <xen/wait.h> #include <xen/mem_access.h> #include <xen/rangeset.h> +#include <xen/rb_rangeset.h> #include <asm/shadow.h> #include <asm/hap.h> #include <asm/current.h> @@ -818,7 +819,7 @@ static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s, return; for ( i = 0; i < NR_IO_RANGE_TYPES; i++ ) - rangeset_destroy(s->range[i]); + rb_rangeset_destroy(s->range[i]); } static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s, @@ -842,8 +843,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s, if ( rc ) goto fail; - s->range[i] = rangeset_new(s->domain, name, - RANGESETF_prettyprint_hex); + s->range[i] = rb_rangeset_new(name); xfree(name); @@ -851,7 +851,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s, if ( !s->range[i] ) goto fail; - rangeset_limit(s->range[i], MAX_NR_IO_RANGES); + rb_rangeset_limit(s->range[i], MAX_NR_IO_RANGES); } done: @@ -1149,7 +1149,7 @@ static int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id, if ( s->id == id ) { - struct rangeset *r; + struct rb_rangeset *r; switch ( type ) { @@ -1169,10 +1169,10 @@ static int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id, break; rc = -EEXIST; - if ( rangeset_overlaps_range(r, start, end) ) + if ( rb_rangeset_overlaps_range(r, start, end) ) break; - rc = rangeset_add_range(r, start, end); + rc = rb_rangeset_add_range(r, start, end); break; } } @@ -1200,7 +1200,7 @@ static int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id, if ( s->id == id ) { - struct rangeset *r; + struct rb_rangeset *r; switch ( type ) { @@ -1220,10 +1220,10 @@ static int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id, break; rc = -ENOENT; - if ( !rangeset_contains_range(r, start, end) ) + if ( !rb_rangeset_contains_range(r, start, end) ) break; - rc = rangeset_remove_range(r, start, end); + rc = rb_rangeset_remove_range(r, start, end); break; } } @@ -1349,6 +1349,34 @@ static void hvm_destroy_all_ioreq_servers(struct domain *d) spin_unlock(&d->arch.hvm_domain.ioreq_server.lock); } +void hvm_ioreq_server_dump_range_info(struct domain *d) +{ + unsigned int i; + struct hvm_ioreq_server *s; + + spin_lock(&d->arch.hvm_domain.ioreq_server.lock); + + list_for_each_entry ( s, + &d->arch.hvm_domain.ioreq_server.list, + list_entry ) + { + if ( s == d->arch.hvm_domain.default_ioreq_server ) + continue; + + printk("Domain %u, ranges tracked in ioreq server %u:\n", d->domain_id, s->id); + + for ( i = 0; i < NR_IO_RANGE_TYPES; i++ ) + { + if ( !s->range[i] ) + continue; + + rb_rangeset_printk(s->range[i]); + } + } + + spin_unlock(&d->arch.hvm_domain.ioreq_server.lock); +} + static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid, evtchn_port_t *p_port) { @@ -2465,7 +2493,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, &d->arch.hvm_domain.ioreq_server.list, list_entry ) { - struct rangeset *r; + struct rb_rangeset *r; if ( s == d->arch.hvm_domain.default_ioreq_server ) continue; @@ -2484,18 +2512,18 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, case IOREQ_TYPE_PIO: end = addr + p->size - 1; - if ( rangeset_contains_range(r, addr, end) ) + if ( rb_rangeset_contains_range(r, addr, end) ) return s; break; case IOREQ_TYPE_COPY: end = addr + (p->size * p->count) - 1; - if ( rangeset_contains_range(r, addr, end) ) + if ( rb_rangeset_contains_range(r, addr, end) ) return s; break; case IOREQ_TYPE_PCI_CONFIG: - if ( rangeset_contains_singleton(r, addr >> 32) ) + if ( rb_rangeset_contains_range(r, addr>>32, addr>>32) ) { p->type = type; p->addr = addr; diff --git a/xen/common/Makefile b/xen/common/Makefile index 1cddebc..c49040f 100644 --- a/xen/common/Makefile +++ b/xen/common/Makefile @@ -28,6 +28,7 @@ obj-y += random.o obj-y += rangeset.o obj-y += radix-tree.o obj-y += rbtree.o +obj-y += rb_rangeset.o obj-y += rcupdate.o obj-y += sched_credit.o obj-y += sched_credit2.o diff --git a/xen/common/rb_rangeset.c b/xen/common/rb_rangeset.c new file mode 100644 index 0000000..a19df6b --- /dev/null +++ b/xen/common/rb_rangeset.c @@ -0,0 +1,281 @@ +/* + Red-black tree based rangeset + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#include <xen/lib.h> +#include <xen/kernel.h> +#include <xen/errno.h> +#include <xen/rb_rangeset.h> + +static struct rb_range *alloc_and_init_rb_range( + struct rb_rangeset *r, unsigned long s, unsigned long e) +{ + struct rb_range *range = NULL; + + if ( r->nr_ranges == 0 ) + return NULL; + + range = xmalloc(struct rb_range); + if ( range ) + { + --r->nr_ranges; + range->s = s; + range->e = e; + } + return range; +} + +static void free_rb_range(struct rb_rangeset *r, struct rb_range *range) +{ + r->nr_ranges++; + rb_erase(&range->node, &r->rbroot); + xfree(range); +} + +static struct rb_range *rb_rangeset_find_range( + struct rb_rangeset *r, unsigned long s) +{ + struct rb_node *node; + + node = r->rbroot.rb_node; + + while ( node ) + { + struct rb_range *range = container_of(node, struct rb_range, node); + + if ( (s >= range->s) && (s <= range->e) ) + return range; + if ( s < range->s ) + node = node->rb_left; + else if ( s > range->s ) + node = node->rb_right; + } + return NULL; +} + +bool_t rb_rangeset_overlaps_range(struct rb_rangeset *r, + unsigned long s, unsigned long e) +{ + struct rb_node *node; + bool_t rc = 0; + + ASSERT(s <= e); + + node = r->rbroot.rb_node; + while ( node ) + { + struct rb_range *range = container_of(node, struct rb_range, node); + if ( (s <= range->e) && (e >= range->s) ) + { + rc = 1; + break; + } + else if ( s < range->s ) + node = node->rb_left; + else if ( s > range->s ) + node = node->rb_right; + } + return rc; +} + +bool_t rb_rangeset_contains_range( + struct rb_rangeset *r, unsigned long s, unsigned long e) +{ + struct rb_range *range; + bool_t contains; + + ASSERT(s <= e); + + range = rb_rangeset_find_range(r, s); + contains = (range && (range->e >= e)); + return contains; +} + +static void rb_rangeset_insert_range( + struct rb_root *root, struct rb_range *range) +{ + struct rb_node **new = &(root->rb_node); + struct rb_node *parent = NULL; + + /* Figure out where to put new node */ + while ( *new ) + { + struct rb_range *this = container_of(*new, struct rb_range, node); + parent = *new; + + if ( range->s < this->s ) + new = &((*new)->rb_left); + else if ( range->s > this->s ) + new = &((*new)->rb_right); + } + + /* Add new node and rebalance the range tree. */ + rb_link_node(&range->node, parent, new); + rb_insert_color(&range->node, root); +} + +/* + * Add a new range into the rb_rangeset, rb_rangeset_overlaps_range() + * should be called first, to ensure nodes inside the rb_rangeset will + * not interleave. + */ +int rb_rangeset_add_range(struct rb_rangeset *r, + unsigned long s, unsigned long e) +{ + struct rb_range *range = NULL; + struct rb_range *next = NULL; + + ASSERT(s <= e); + + if ( (s) && (range = rb_rangeset_find_range(r, s - 1)) ) + { + /* range tree overlapped */ + if ( range->e != (s - 1) ) + return -EEXIST; + range->e = e; + } + else + { + range = alloc_and_init_rb_range(r, s, e); + if ( !range ) + return -ENOMEM; + rb_rangeset_insert_range(&r->rbroot, range); + } + + next = container_of(rb_next(&range->node), struct rb_range, node); + + if ( next && (next->s == (e + 1)) ) + { + range->e = next->e; + free_rb_range(r, next); + } + + return 0; +} + +int rb_rangeset_remove_range(struct rb_rangeset *r, + unsigned long s, unsigned long e) +{ + struct rb_range *range = NULL; + struct rb_range *next = NULL; + unsigned long start, end; + + ASSERT(s <= e); + + range = rb_rangeset_find_range(r, s); + if ( !range ) + return -ENOENT; + + start = range->s; + end = range->e; + + /* the range to be removed must be contained in one rb_range */ + if ( end < e ) + return -ENOENT; + + /* value of start can only be less than or equal to s */ + if ( start == s ) + { + if ( end > e ) + range->s = e + 1; + else + free_rb_range(r, range); + } + else + { + if ( end > e ) + { + next = alloc_and_init_rb_range(r, e + 1, end); + if ( next == NULL ) + return -ENOMEM; + + rb_rangeset_insert_range(&r->rbroot, next); + } + range->e = s - 1; + } + return 0; +} + +void rb_rangeset_destroy(struct rb_rangeset *r) +{ + struct rb_root *root; + struct rb_node *node; + + if ( r == NULL ) + return; + + root = &r->rbroot; + node = rb_first(root); + while ( node ) + { + struct rb_range *range = container_of(node, struct rb_range, node); + free_rb_range(r, range); + node = rb_first(root); + } + + xfree(r); +} + +struct rb_rangeset *rb_rangeset_new(char *name) +{ + struct rb_rangeset *r; + + r = xmalloc(struct rb_rangeset); + if ( r == NULL ) + return NULL; + + r->nr_ranges = -1; + if ( name != NULL ) + { + safe_strcpy(r->name, name); + } + else + { + snprintf(r->name, sizeof(r->name), "(no name)"); + } + + r->rbroot = RB_ROOT; + return r; +} + +void rb_rangeset_limit( + struct rb_rangeset *r, unsigned int limit) +{ + r->nr_ranges = limit; +} + +void rb_rangeset_printk(struct rb_rangeset *r) +{ + struct rb_node *node; + + printk(" %-10s: {\n", r->name); + + node = rb_first(&r->rbroot); + + while ( node ) + { + struct rb_range *range = container_of(node, struct rb_range, node); + + printk(" [ 0x%lx", range->s); + if ( range->e != range->s ) + printk(", 0x%lx", range->e); + printk(" ]\n"); + + node = rb_next(node); + } + printk(" }\n"); +} diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index d62fda9..a2f60a8 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -68,7 +68,7 @@ struct hvm_ioreq_server { /* Lock to serialize access to buffered ioreq ring */ spinlock_t bufioreq_lock; evtchn_port_t bufioreq_evtchn; - struct rangeset *range[NR_IO_RANGE_TYPES]; + struct rb_rangeset *range[NR_IO_RANGE_TYPES]; bool_t enabled; bool_t bufioreq_atomic; }; diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 57f9605..2b1ea30 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -230,6 +230,7 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip); struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, ioreq_t *p); +void hvm_ioreq_server_dump_range_info(struct domain *d); int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *p); void hvm_broadcast_assist_req(ioreq_t *p); void hvm_complete_assist_req(ioreq_t *p); diff --git a/xen/include/xen/rb_rangeset.h b/xen/include/xen/rb_rangeset.h new file mode 100644 index 0000000..4c07e30 --- /dev/null +++ b/xen/include/xen/rb_rangeset.h @@ -0,0 +1,49 @@ +/* + Red-black tree based rangeset + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#ifndef __RB_RANGESET_H__ +#define __RB_RANGESET_H__ + +#include <xen/rbtree.h> + +struct rb_rangeset { + char name[32]; + long nr_ranges; + struct rb_root rbroot; +}; + +struct rb_range { + struct rb_node node; + unsigned long s, e; +}; + +struct rb_rangeset *rb_rangeset_new(char *name); +void rb_rangeset_limit( + struct rb_rangeset *r, unsigned int limit); +void rb_rangeset_destroy(struct rb_rangeset *r); +bool_t rb_rangeset_overlaps_range(struct rb_rangeset *r, + unsigned long s, unsigned long e); +bool_t rb_rangeset_contains_range( + struct rb_rangeset *r, unsigned long s, unsigned long e); +int rb_rangeset_add_range(struct rb_rangeset *r, + unsigned long s, unsigned long e); +int rb_rangeset_remove_range(struct rb_rangeset *r, + unsigned long s, unsigned long e); +void rb_rangeset_printk(struct rb_rangeset *r); + +#endif /* __RB_RANGESET_H__ */ -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |