|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v1 2/2] ioreq-server: Support scatter page forwarding
>>> On 28.07.14 at 19:55, <wei.ye@xxxxxxxxx> wrote:
> +static struct pglist_hash_table *hvm_ioreq_server_lookup_pglist_hash_table(
> + struct pglist_hash_table *pglist_ht, unsigned long
> gpfn)
> +{
> + int index = pglist_hash(gpfn);
Here and elsewhere: "unsigned int" for the hash index.
> +static void hvm_ioreq_server_free_hash_chain(struct pglist_hash_table *chain)
> +{
> + struct pglist_hash_table *p = chain;
> + struct pglist_hash_table *n;
> +
> + while (p)
Coding style.
> +static int hvm_ioreq_server_pglist_hash_add(struct pglist_hash_table
> *pglist_ht,
> + unsigned long gpfn)
> +{
> + int index = pglist_hash(gpfn);
> + struct pglist_hash_table *ne;
> +
> + if ( hvm_ioreq_server_lookup_pglist_hash_table(pglist_ht, gpfn) != NULL )
> + return -EINVAL;
> +
> + if ( pglist_ht[index].gpfn == PGLIST_INVALID_GPFN )
> + pglist_ht[index].gpfn = gpfn;
> + else
> + {
> + ne = xmalloc_bytes(sizeof(pglist_ht[0]));
xmalloc() please.
> +static int hvm_ioreq_server_pglist_hash_rem(struct pglist_hash_table
> *pglist_ht,
> + unsigned long gpfn)
> +{
> + int index = pglist_hash(gpfn);
> + struct pglist_hash_table *next, *prev;
> +
> + if ( pglist_ht[index].gpfn == gpfn )
> + pglist_ht[index].gpfn = PGLIST_INVALID_GPFN;
> + else
> + {
> + prev = &pglist_ht[index];
> + while ( 1 )
> + {
> + next = prev->next;
> + if ( !next )
> + {
> + printk("hvm_ioreq_server_pglist_hash_rem hash_table %p
> remove"
> + " %lx not found\n", pglist_ht, gpfn);
The value of this log message is questionable anyway, but there's no
way for this to be acceptable without a suitably low guest level being
set.
> @@ -948,7 +1040,14 @@ static int hvm_ioreq_server_init(struct
> hvm_ioreq_server *s, struct domain *d,
> spin_lock_init(&s->lock);
> INIT_LIST_HEAD(&s->ioreq_vcpu_list);
> spin_lock_init(&s->bufioreq_lock);
> + spin_lock_init(&s->pglist_hash_lock);
>
> + rc = -ENOMEM;
> + s->pglist_hash_base = xmalloc_bytes(PGLIST_HASH_ENTRY_SIZE *
> PGLIST_HASH_SIZE);
xmalloc_array() please.
> + if ( s->pglist_hash_base == NULL )
> + goto fail1;
> + memset(s->pglist_hash_base, 0, PGLIST_HASH_ENTRY_SIZE *
> PGLIST_HASH_SIZE);
And together with this, xzalloc_array().
> +static int hvm_map_pages_to_ioreq_server(struct domain *d, ioservid_t id,
> + uint16_t set, uint16_t nr_pages,
> + unsigned long *gpfn)
> +{
> + struct hvm_ioreq_server *s;
> + int rc;
> + unsigned int i;
> + p2m_type_t ot, nt;
> +
> + if ( set )
> + {
> + ot = p2m_ram_rw;
> + nt = p2m_ram_wp;
> + }
> + else
> + {
> + ot = p2m_ram_wp;
> + nt = p2m_ram_rw;
> + }
> +
> + spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
> +
> + rc = -ENOENT;
> + list_for_each_entry ( s,
> + &d->arch.hvm_domain.ioreq_server.list,
> + list_entry )
> + {
> + if ( s == d->arch.hvm_domain.default_ioreq_server )
> + continue;
> +
> + if ( s->id == id )
> + {
Consistency please: Either both if()-s use "continue" (and perhaps get
combined into one), or (less desirable imo) both get combined to use
a {} body.
> + spin_lock(&s->pglist_hash_lock);
> +
> + for ( i = 0; i < nr_pages; i++ )
> + {
> + p2m_change_type_one(d, gpfn[i], ot, nt);
Error handling?
> + if ( set )
> + rc =
> hvm_ioreq_server_pglist_hash_add(s->pglist_hash_base, gpfn[i]);
> + else
> + rc =
> hvm_ioreq_server_pglist_hash_rem(s->pglist_hash_base, gpfn[i]);
> +
> + if ( rc != 0 )
> + break;
> + }
> +
> + spin_unlock(&s->pglist_hash_lock);
> + flush_tlb_mask(d->domain_dirty_cpumask);
Why?
> @@ -2294,6 +2453,8 @@ static struct hvm_ioreq_server
> *hvm_select_ioreq_server(struct domain *d,
> uint32_t cf8;
> uint8_t type;
> uint64_t addr;
> + unsigned long gpfn;
> + struct pglist_hash_table *he;
These ought to be moved down into a more narrow scope.
> @@ -2361,6 +2522,15 @@ static struct hvm_ioreq_server
> *hvm_select_ioreq_server(struct domain *d,
> if ( rangeset_contains_range(r, addr, end) )
> return s;
>
> + gpfn = addr >> PAGE_SHIFT;
> + spin_lock(&s->pglist_hash_lock);
> + he =
> hvm_ioreq_server_lookup_pglist_hash_table(s->pglist_hash_base, gpfn);
> + spin_unlock(&s->pglist_hash_lock);
> +
> + if ( he ) {
Coding style again.
> + return s;
> + }
> +
And now the most fundamental question: Why is the existing (range
set based) mechanism not enough? Is that because we limit the
number of entries permitted on the range set? If so, the obvious
question is - why is there no limit being enforced for your new type
of pages?
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -48,6 +48,16 @@ struct hvm_ioreq_vcpu {
> evtchn_port_t ioreq_evtchn;
> };
>
> +struct pglist_hash_table {
> + struct pglist_hash_table *next;
> + unsigned long gpfn;
> +};
> +#define PGLIST_HASH_SIZE_SHIFT 8
> +#define PGLIST_HASH_SIZE (1 << PGLIST_HASH_SIZE_SHIFT)
> +#define pglist_hash(x) ((x) % PGLIST_HASH_SIZE)
> +#define PGLIST_INVALID_GPFN 0
> +#define PGLIST_HASH_ENTRY_SIZE sizeof(struct pglist_hash_table)
None of this has any point in being in a header: There's only a
single source file using these, so they should be private to that
one.
> --- a/xen/include/public/hvm/hvm_op.h
> +++ b/xen/include/public/hvm/hvm_op.h
> @@ -367,6 +367,18 @@ struct xen_hvm_set_ioreq_server_state {
> typedef struct xen_hvm_set_ioreq_server_state
> xen_hvm_set_ioreq_server_state_t;
> DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
>
> +#define HVMOP_map_pages_to_ioreq_server 23
> +#define MAX_MAP_BATCH_PAGES 128
Too generic a name.
> +struct xen_hvm_map_pages_to_ioreq_server {
> + domid_t domid; /* IN - domain to be serviced */
> + ioservid_t id; /* IN - server id */
> + uint16_t set; /* IN - 1: map pages, 0: unmap pages */
> + uint16_t nr_pages; /* IN - page number */
"page count" or "number of pages".
> + unsigned long page_list[MAX_MAP_BATCH_PAGES]; /* IN - gpfn list */
No "unsigned long" in public interfaces. And no variable width types
in interfaces not being subject to compat mode translation.
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |