[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 1/2] x86/mm: add API for marking only part of a MMIO page read only



On Tue, May 21, 2024 at 05:16:58PM +0200, Jan Beulich wrote:
> On 21.05.2024 04:54, Marek Marczykowski-Górecki wrote:
> > --- a/xen/arch/x86/include/asm/mm.h
> > +++ b/xen/arch/x86/include/asm/mm.h
> > @@ -522,9 +522,27 @@ extern struct rangeset *mmio_ro_ranges;
> >  void memguard_guard_stack(void *p);
> >  void memguard_unguard_stack(void *p);
> >  
> > +/*
> > + * Add more precise r/o marking for a MMIO page. Range specified here
> > + * will still be R/O, but the rest of the page (not marked as R/O via 
> > another
> > + * call) will have writes passed through.
> > + * The start address and the size must be aligned to SUBPAGE_MMIO_RO_ALIGN.
> > + *
> > + * This API cannot be used for overlapping ranges, nor for pages already 
> > added
> > + * to mmio_ro_ranges separately.
> > + *
> > + * Return values:
> > + *  - negative: error
> > + *  - 0: success
> > + */
> > +#define SUBPAGE_MMIO_RO_ALIGN 8
> 
> This isn't just alignment, but also (and perhaps more importantly) 
> granularity.
> I think the name wants to express this.

SUBPAGE_MMIO_RO_GRANULARITY? Sounds a bit long...

> 
> > @@ -4910,6 +4921,260 @@ long arch_memory_op(unsigned long cmd, 
> > XEN_GUEST_HANDLE_PARAM(void) arg)
> >      return rc;
> >  }
> >  
> > +/*
> > + * Mark part of the page as R/O.
> > + * Returns:
> > + * - 0 on success - first range in the page
> > + * - 1 on success - subsequent range in the page
> > + * - <0 on error
> > + *
> > + * This needs subpage_ro_lock already taken */
> 
> Nit: Comment style (full stop and */ on its own line).
> 
> > +static int __init subpage_mmio_ro_add_page(
> > +    mfn_t mfn, unsigned int offset_s, unsigned int offset_e)
> > +{
> > +    struct subpage_ro_range *entry = NULL, *iter;
> > +    unsigned int i;
> > +
> > +    list_for_each_entry(iter, &subpage_ro_ranges, list)
> > +    {
> > +        if ( mfn_eq(iter->mfn, mfn) )
> > +        {
> > +            entry = iter;
> > +            break;
> > +        }
> > +    }
> > +    if ( !entry )
> > +    {
> > +        /* iter == NULL marks it was a newly allocated entry */
> > +        iter = NULL;
> > +        entry = xzalloc(struct subpage_ro_range);
> > +        if ( !entry )
> > +            return -ENOMEM;
> > +        entry->mfn = mfn;
> > +    }
> > +
> > +    for ( i = offset_s; i <= offset_e; i += SUBPAGE_MMIO_RO_ALIGN )
> > +    {
> > +        int oldbit = __test_and_set_bit(i / SUBPAGE_MMIO_RO_ALIGN,
> > +                                        entry->ro_qwords);
> 
> Why int, not bool?

Because __test_and_set_bit returns int. But I can change to bool if you
prefer.

> > +        ASSERT(!oldbit);
> > +    }
> > +
> > +    if ( !iter )
> > +        list_add(&entry->list, &subpage_ro_ranges);
> > +
> > +    return iter ? 1 : 0;
> > +}
> > +
> > +/* This needs subpage_ro_lock already taken */
> > +static void __init subpage_mmio_ro_remove_page(
> > +    mfn_t mfn,
> > +    int offset_s,
> > +    int offset_e)
> 
> Can either of these be negative? The more that ...

Right, I can change them to unsigned. They are unsigned already in
subpage_mmio_ro_add_page.

> > +{
> > +    struct subpage_ro_range *entry = NULL, *iter;
> > +    unsigned int i;
> 
> ... this is used ...
> 
> > +    list_for_each_entry(iter, &subpage_ro_ranges, list)
> > +    {
> > +        if ( mfn_eq(iter->mfn, mfn) )
> > +        {
> > +            entry = iter;
> > +            break;
> > +        }
> > +    }
> > +    if ( !entry )
> > +        return;
> > +
> > +    for ( i = offset_s; i <= offset_e; i += SUBPAGE_MMIO_RO_ALIGN )
> 
> ... with both of them?
> 
> > +        __clear_bit(i / SUBPAGE_MMIO_RO_ALIGN, entry->ro_qwords);
> > +
> > +    if ( !bitmap_empty(entry->ro_qwords, PAGE_SIZE / 
> > SUBPAGE_MMIO_RO_ALIGN) )
> > +        return;
> > +
> > +    list_del(&entry->list);
> > +    if ( entry->mapped )
> > +        iounmap(entry->mapped);
> > +    xfree(entry);
> > +}
> > +
> > +int __init subpage_mmio_ro_add(
> > +    paddr_t start,
> > +    size_t size)
> > +{
> > +    mfn_t mfn_start = maddr_to_mfn(start);
> > +    paddr_t end = start + size - 1;
> > +    mfn_t mfn_end = maddr_to_mfn(end);
> > +    unsigned int offset_end = 0;
> > +    int rc;
> > +    bool subpage_start, subpage_end;
> > +
> > +    ASSERT(IS_ALIGNED(start, SUBPAGE_MMIO_RO_ALIGN));
> > +    ASSERT(IS_ALIGNED(size, SUBPAGE_MMIO_RO_ALIGN));
> > +    if ( !IS_ALIGNED(size, SUBPAGE_MMIO_RO_ALIGN) )
> > +        size = ROUNDUP(size, SUBPAGE_MMIO_RO_ALIGN);
> > +
> > +    if ( !size )
> > +        return 0;
> > +
> > +    if ( mfn_eq(mfn_start, mfn_end) )
> > +    {
> > +        /* Both starting and ending parts handled at once */
> > +        subpage_start = PAGE_OFFSET(start) || PAGE_OFFSET(end) != 
> > PAGE_SIZE - 1;
> > +        subpage_end = false;
> > +    }
> > +    else
> > +    {
> > +        subpage_start = PAGE_OFFSET(start);
> > +        subpage_end = PAGE_OFFSET(end) != PAGE_SIZE - 1;
> > +    }
> > +
> > +    spin_lock(&subpage_ro_lock);
> > +
> > +    if ( subpage_start )
> > +    {
> > +        offset_end = mfn_eq(mfn_start, mfn_end) ?
> > +                     PAGE_OFFSET(end) :
> > +                     (PAGE_SIZE - 1);
> > +        rc = subpage_mmio_ro_add_page(mfn_start,
> > +                                      PAGE_OFFSET(start),
> > +                                      offset_end);
> > +        if ( rc < 0 )
> > +            goto err_unlock;
> > +        /* Check if not marking R/W part of a page intended to be fully 
> > R/O */
> > +        ASSERT(rc || !rangeset_contains_singleton(mmio_ro_ranges,
> > +                                                  mfn_x(mfn_start)));
> > +    }
> > +
> > +    if ( subpage_end )
> > +    {
> > +        rc = subpage_mmio_ro_add_page(mfn_end, 0, PAGE_OFFSET(end));
> > +        if ( rc < 0 )
> > +            goto err_unlock_remove;
> > +        /* Check if not marking R/W part of a page intended to be fully 
> > R/O */
> > +        ASSERT(rc || !rangeset_contains_singleton(mmio_ro_ranges,
> > +                                                  mfn_x(mfn_end)));
> > +    }
> > +
> > +    spin_unlock(&subpage_ro_lock);
> > +
> > +    rc = rangeset_add_range(mmio_ro_ranges, mfn_x(mfn_start), 
> > mfn_x(mfn_end));
> > +    if ( rc )
> > +        goto err_remove;
> > +
> > +    return 0;
> > +
> > + err_remove:
> > +    spin_lock(&subpage_ro_lock);
> > +    if ( subpage_end )
> > +        subpage_mmio_ro_remove_page(mfn_end, 0, PAGE_OFFSET(end));
> > + err_unlock_remove:
> > +    if ( subpage_start )
> > +        subpage_mmio_ro_remove_page(mfn_start, PAGE_OFFSET(start), 
> > offset_end);
> > + err_unlock:
> > +    spin_unlock(&subpage_ro_lock);
> > +    return rc;
> > +}
> > +
> > +static void __iomem *subpage_mmio_get_page(struct subpage_ro_range *entry)
> > +{
> > +    void __iomem *mapped_page;
> > +
> > +    if ( entry->mapped )
> > +        return entry->mapped;
> > +
> > +    mapped_page = ioremap(mfn_x(entry->mfn) << PAGE_SHIFT, PAGE_SIZE);
> 
> mfn_to_maddr() or some such?

Makes sense.

> > +    spin_lock(&subpage_ro_lock);
> > +    /* Re-check under the lock */
> > +    if ( entry->mapped )
> > +    {
> > +        spin_unlock(&subpage_ro_lock);
> > +        iounmap(mapped_page);
> 
> What if ioremap() failed?

Good point.

-- 
Best Regards,
Marek Marczykowski-Górecki
Invisible Things Lab

Attachment: signature.asc
Description: PGP signature


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.