[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v7 6/9] spinlock: Introduce spin_lock_cb()
While waiting for a lock we may want to periodically run some code. This code may, for example, allow the caller to release resources held by it that are no longer needed in the critical section protected by the lock. Specifically, this feature will be needed by scrubbing code where the scrubber, while waiting for heap lock to merge back clean pages, may be requested by page allocator (which is currently holding the lock) to abort merging and release the buddy page head that the allocator wants. We could use spin_trylock() but since it doesn't take lock ticket it may take long time until the lock is taken. Instead we add spin_lock_cb() that allows us to grab the ticket and execute a callback while waiting. This callback is executed on every iteration of the spinlock waiting loop. Since we may be sleeping in the lock until it is released we need a mechanism that will make sure that the callback has a chance to run. We add spin_lock_kick() that will wake up the waiter. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/common/spinlock.c | 9 ++++++++- xen/include/xen/spinlock.h | 8 ++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c index 2a06406..3c1caae 100644 --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -129,7 +129,7 @@ static always_inline u16 observe_head(spinlock_tickets_t *t) return read_atomic(&t->head); } -void _spin_lock(spinlock_t *lock) +void inline _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data) { spinlock_tickets_t tickets = SPINLOCK_TICKET_INC; LOCK_PROFILE_VAR; @@ -140,6 +140,8 @@ void _spin_lock(spinlock_t *lock) while ( tickets.tail != observe_head(&lock->tickets) ) { LOCK_PROFILE_BLOCK; + if ( unlikely(cb) ) + cb(data); arch_lock_relax(); } LOCK_PROFILE_GOT; @@ -147,6 +149,11 @@ void _spin_lock(spinlock_t *lock) arch_lock_acquire_barrier(); } +void _spin_lock(spinlock_t *lock) +{ + _spin_lock_cb(lock, NULL, NULL); +} + void _spin_lock_irq(spinlock_t *lock) { ASSERT(local_irq_is_enabled()); diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h index c1883bd..91bfb95 100644 --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -153,6 +153,7 @@ typedef struct spinlock { #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED) void _spin_lock(spinlock_t *lock); +void _spin_lock_cb(spinlock_t *lock, void (*cond)(void *), void *data); void _spin_lock_irq(spinlock_t *lock); unsigned long _spin_lock_irqsave(spinlock_t *lock); @@ -169,6 +170,7 @@ void _spin_lock_recursive(spinlock_t *lock); void _spin_unlock_recursive(spinlock_t *lock); #define spin_lock(l) _spin_lock(l) +#define spin_lock_cb(l, c, d) _spin_lock_cb(l, c, d) #define spin_lock_irq(l) _spin_lock_irq(l) #define spin_lock_irqsave(l, f) \ ({ \ @@ -190,6 +192,12 @@ void _spin_unlock_recursive(spinlock_t *lock); 1 : ({ local_irq_restore(flags); 0; }); \ }) +#define spin_lock_kick(l) \ +({ \ + smp_mb(); \ + arch_lock_signal(); \ +}) + /* Ensure a lock is quiescent between two critical operations. */ #define spin_barrier(l) _spin_barrier(l) -- 1.8.3.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |