[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] Allow programatic iomem permissions
On Fri, 2007-07-13 at 17:20 +0100, Keir Fraser wrote: > On 13/7/07 16:54, "Kieran Mansley" <kmansley@xxxxxxxxxxxxxx> wrote: > > > Oops, sorry, forgot to refresh the patch before sending. This one > > should compile better! > > The put_page[_and_type] also needs to be on the far side of the TLB flush. > This is because if a refcount falls to zero then the page can change type > (e.g., to pagetable page) or be recycled to another domain! This is a safety > issue for Xen rather than for the granter. :-) > OK, here is another spin of the patch that should address all the concerns mentioned so far. I'm pretty sure that (apart from the change in when the tlb flush happens) the behaviour is the same as the old version. i.e. The same code will get executed in both cases, despite the function being split in two, even if something goes wrong. I would of course welcome more pairs of eyes to check this though. As this modifies a fairly heavily used and vital region of Xen I'm guessing that there's a unit test somewhere that I could run (or get run by whoever controls the test). Can anyone point me at this? If there's no such test, knowing that would help too. If this patch is acceptable (and once better tested) I'll repost with it signed off, together with the change to redefine the grant_operation_permitted macro, and the iomem permission patch that prompted this in the first place. Thanks Kieran Fix TLB flush on grant unmap diff -r 1e208016e32e xen/common/grant_table.c --- a/xen/common/grant_table.c Wed Jul 18 10:05:04 2007 +0100 +++ b/xen/common/grant_table.c Wed Jul 18 12:01:41 2007 +0100 @@ -60,13 +60,25 @@ union grant_combo { /* Used to share code between unmap_grant_ref and unmap_and_replace. */ struct gnttab_unmap_common { + /* Input */ uint64_t host_addr; uint64_t dev_bus_addr; uint64_t new_addr; grant_handle_t handle; + /* Return */ int16_t status; + + /* Shared state beteen *_unmap and *_unmap_complete */ + u16 flags; + unsigned long frame; + struct grant_mapping *map; + struct domain *rd; }; + +/* Number of unmap operations that are done between each tlb flush */ +#define GNTTAB_UNMAP_BATCH_SIZE 32 + #define PIN_FAIL(_lbl, _rc, _f, _a...) \ do { \ @@ -411,18 +423,14 @@ __gnttab_unmap_common( struct gnttab_unmap_common *op) { domid_t dom; - grant_ref_t ref; struct domain *ld, *rd; struct active_grant_entry *act; grant_entry_t *sha; - struct grant_mapping *map; - u16 flags; s16 rc = 0; - unsigned long frame; ld = current->domain; - frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT); + op->frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT); if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) ) { @@ -431,20 +439,19 @@ __gnttab_unmap_common( return; } - map = &maptrack_entry(ld->grant_table, op->handle); - - if ( unlikely(!map->flags) ) + op->map = &maptrack_entry(ld->grant_table, op->handle); + + if ( unlikely(!op->map->flags) ) { gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op- >handle); op->status = GNTST_bad_handle; return; } - dom = map->domid; - ref = map->ref; - flags = map->flags; - - if ( unlikely((rd = rcu_lock_domain_by_id(dom)) == NULL) ) + dom = op->map->domid; + op->flags = op->map->flags; + + if ( unlikely((op->rd = rd = rcu_lock_domain_by_id(dom)) == NULL) ) { /* This can happen when a grant is implicitly unmapped. */ gdprintk(XENLOG_INFO, "Could not find domain %d\n", dom); @@ -456,71 +463,47 @@ __gnttab_unmap_common( spin_lock(&rd->grant_table->lock); - act = &active_entry(rd->grant_table, ref); - sha = &shared_entry(rd->grant_table, ref); - - if ( frame == 0 ) - { - frame = act->frame; + act = &active_entry(rd->grant_table, op->map->ref); + sha = &shared_entry(rd->grant_table, op->map->ref); + + if ( op->frame == 0 ) + { + op->frame = act->frame; } else { - if ( unlikely(frame != act->frame) ) + if ( unlikely(op->frame != act->frame) ) PIN_FAIL(unmap_out, GNTST_general_error, "Bad frame number doesn't match gntref.\n"); - if ( flags & GNTMAP_device_map ) + if ( op->flags & GNTMAP_device_map ) { ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask)); - map->flags &= ~GNTMAP_device_map; - if ( flags & GNTMAP_readonly ) - { + op->map->flags &= ~GNTMAP_device_map; + if ( op->flags & GNTMAP_readonly ) act->pin -= GNTPIN_devr_inc; - put_page(mfn_to_page(frame)); - } else - { act->pin -= GNTPIN_devw_inc; - put_page_and_type(mfn_to_page(frame)); - } - } - } - - if ( (op->host_addr != 0) && (flags & GNTMAP_host_map) ) + } + } + + if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) ) { if ( (rc = replace_grant_host_mapping(op->host_addr, - frame, op->new_addr, flags)) < 0 ) + op->frame, op->new_addr, + op->flags)) < 0 ) goto unmap_out; ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask)); - map->flags &= ~GNTMAP_host_map; - if ( flags & GNTMAP_readonly ) - { + op->map->flags &= ~GNTMAP_host_map; + if ( op->flags & GNTMAP_readonly ) act->pin -= GNTPIN_hstr_inc; - put_page(mfn_to_page(frame)); - } else - { act->pin -= GNTPIN_hstw_inc; - put_page_and_type(mfn_to_page(frame)); - } - } - - if ( (map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 ) - { - map->flags = 0; - put_maptrack_handle(ld->grant_table, op->handle); } /* If just unmapped a writable mapping, mark as dirtied */ - if ( !(flags & GNTMAP_readonly) ) - gnttab_mark_dirty(rd, frame); - - if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) && - !(flags & GNTMAP_readonly) ) - gnttab_clear_flag(_GTF_writing, &sha->flags); - - if ( act->pin == 0 ) - gnttab_clear_flag(_GTF_reading, &sha->flags); + if ( !(op->flags & GNTMAP_readonly) ) + gnttab_mark_dirty(rd, op->frame); unmap_out: op->status = rc; @@ -529,78 +512,205 @@ __gnttab_unmap_common( } static void +__gnttab_unmap_common_complete(struct gnttab_unmap_common *op) +{ + struct domain *ld, *rd; + struct active_grant_entry *act; + grant_entry_t *sha; + + rd = op->rd; + + if ( rd == NULL ) { + /* + * Suggests that __gntab_unmap_common failed in + * rcu_lock_domain_by_id() or earlier, and so we have nothing + * to complete + */ + return; + } + + ld = current->domain; + + rcu_lock_domain(rd); + spin_lock(&rd->grant_table->lock); + + act = &active_entry(rd->grant_table, op->map->ref); + sha = &shared_entry(rd->grant_table, op->map->ref); + + if ( unlikely(op->frame != act->frame) ) + { + /* + * Suggests that __gntab_unmap_common failed early and so + * nothing further to do + */ + goto unmap_out; + } + + if ( op->flags & GNTMAP_device_map ) + { + if ( op->flags & GNTMAP_readonly ) + put_page(mfn_to_page(op->frame)); + else + put_page_and_type(mfn_to_page(op->frame)); + } + + if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) ) + { + if ( op->status != 0 ) + { + /* + * Suggests that __gntab_unmap_common failed in + * replace_grant_host_mapping() so nothing further to do + */ + goto unmap_out; + } + + if ( op->flags & GNTMAP_readonly ) + put_page(mfn_to_page(op->frame)); + else + put_page_and_type(mfn_to_page(op->frame)); + } + + if ( (op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 ) + { + op->map->flags = 0; + put_maptrack_handle(ld->grant_table, op->handle); + } + + if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) && + !(op->flags & GNTMAP_readonly) ) + gnttab_clear_flag(_GTF_writing, &sha->flags); + + if ( act->pin == 0 ) + gnttab_clear_flag(_GTF_reading, &sha->flags); + + unmap_out: + spin_unlock(&rd->grant_table->lock); + rcu_unlock_domain(rd); +} + +static void __gnttab_unmap_grant_ref( - struct gnttab_unmap_grant_ref *op) -{ - struct gnttab_unmap_common common = { - .host_addr = op->host_addr, - .dev_bus_addr = op->dev_bus_addr, - .handle = op->handle, - }; - - __gnttab_unmap_common(&common); - op->status = common.status; -} + struct gnttab_unmap_grant_ref *op, + struct gnttab_unmap_common *common) +{ + common->host_addr = op->host_addr; + common->dev_bus_addr = op->dev_bus_addr; + common->handle = op->handle; + + /* Intialise these in case common contains old state */ + common->new_addr = 0; + common->rd = NULL; + + __gnttab_unmap_common(common); + op->status = common->status; +} + static long gnttab_unmap_grant_ref( XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) uop, unsigned int count) { - int i; + int i, c, partial_done, done = 0; struct gnttab_unmap_grant_ref op; - - for ( i = 0; i < count; i++ ) - { - if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) ) - goto fault; - __gnttab_unmap_grant_ref(&op); - if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) ) - goto fault; - } - - flush_tlb_mask(current->domain->domain_dirty_cpumask); + struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE]; + + while (count != 0) { + c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE); + partial_done = 0; + + for ( i = 0; i < c; i++ ) + { + if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) ) + goto fault; + __gnttab_unmap_grant_ref(&op, &(common[i])); + ++partial_done; + if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) ) + goto fault; + } + + flush_tlb_mask(current->domain->domain_dirty_cpumask); + + for ( i = 0; i < partial_done; i++ ) + { + __gnttab_unmap_common_complete(&(common[i])); + } + + count -= c; + done += c; + } + return 0; fault: flush_tlb_mask(current->domain->domain_dirty_cpumask); - return -EFAULT; + + for ( i = 0; i < partial_done; i++ ) + { + __gnttab_unmap_common_complete(&(common[i])); + } + return -EFAULT; } static void __gnttab_unmap_and_replace( - struct gnttab_unmap_and_replace *op) -{ - struct gnttab_unmap_common common = { - .host_addr = op->host_addr, - .new_addr = op->new_addr, - .handle = op->handle, - }; - - __gnttab_unmap_common(&common); - op->status = common.status; + struct gnttab_unmap_and_replace *op, + struct gnttab_unmap_common *common) +{ + common->host_addr = op->host_addr; + common->new_addr = op->new_addr; + common->handle = op->handle; + + /* Intialise these in case common contains old state */ + common->dev_bus_addr = 0; + common->rd = NULL; + + __gnttab_unmap_common(common); + op->status = common->status; } static long gnttab_unmap_and_replace( XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) uop, unsigned int count) { - int i; + int i, c, partial_done, done = 0; struct gnttab_unmap_and_replace op; - - for ( i = 0; i < count; i++ ) - { - if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) ) - goto fault; - __gnttab_unmap_and_replace(&op); - if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) ) - goto fault; - } - - flush_tlb_mask(current->domain->domain_dirty_cpumask); + struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE]; + + while (count != 0) { + c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE); + partial_done = 0; + + for ( i = 0; i < c; i++ ) + { + if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) ) + goto fault; + __gnttab_unmap_and_replace(&op, &(common[i])); + ++partial_done; + if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) ) + goto fault; + } + + flush_tlb_mask(current->domain->domain_dirty_cpumask); + + for ( i = 0; i < partial_done; i++ ) + { + __gnttab_unmap_common_complete(&(common[i])); + } + + count -= c; + done += c; + } + return 0; fault: flush_tlb_mask(current->domain->domain_dirty_cpumask); + + for ( i = 0; i < partial_done; i++ ) + { + __gnttab_unmap_common_complete(&(common[i])); + } return -EFAULT; } Attachment:
unmap_tlb_fix _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |