[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] xen/spinlock: split recursive spinlocks from normal ones
commit bfbec024b83e2705b40853468e7baa4aac0e7f1f Author: Juergen Gross <jgross@xxxxxxxx> AuthorDate: Mon Apr 8 09:19:34 2024 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Mon Apr 8 09:19:34 2024 +0200 xen/spinlock: split recursive spinlocks from normal ones Recursive and normal spinlocks are sharing the same data structure for representation of the lock. This has two major disadvantages: - it is not clear from the definition of a lock, whether it is intended to be used recursive or not, while a mixture of both usage variants needs to be - in production builds (builds without CONFIG_DEBUG_LOCKS) the needed data size of an ordinary spinlock is 8 bytes instead of 4, due to the additional recursion data needed (associated with that the rwlock data is using 12 instead of only 8 bytes) Fix that by introducing a struct spinlock_recursive for recursive spinlocks only, and switch recursive spinlock functions to require pointers to this new struct. This allows to check the correct usage at build time. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/common/spinlock.c | 50 ++++++++++++++++++++++++++++++++ xen/include/xen/spinlock.h | 72 +++++++++++++++++++++++++++++++++++----------- 2 files changed, 105 insertions(+), 17 deletions(-) diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c index 6572c76114..5aaca49a61 100644 --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -545,6 +545,56 @@ void _rspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags) local_irq_restore(flags); } +bool _nrspin_trylock(rspinlock_t *lock) +{ + check_lock(&lock->debug, true); + + if ( unlikely(lock->recurse_cpu != SPINLOCK_NO_CPU) ) + return false; + + return spin_trylock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR); +} + +void _nrspin_lock(rspinlock_t *lock) +{ + spin_lock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR, NULL, + NULL); +} + +void _nrspin_unlock(rspinlock_t *lock) +{ + spin_unlock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR); +} + +void _nrspin_lock_irq(rspinlock_t *lock) +{ + ASSERT(local_irq_is_enabled()); + local_irq_disable(); + _nrspin_lock(lock); +} + +void _nrspin_unlock_irq(rspinlock_t *lock) +{ + _nrspin_unlock(lock); + local_irq_enable(); +} + +unsigned long _nrspin_lock_irqsave(rspinlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + _nrspin_lock(lock); + + return flags; +} + +void _nrspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags) +{ + _nrspin_unlock(lock); + local_irq_restore(flags); +} + #ifdef CONFIG_DEBUG_LOCK_PROFILE struct lock_profile_anc { diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h index 148be1e116..f49ba928f0 100644 --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -77,8 +77,6 @@ union lock_debug { }; */ struct spinlock; -/* Temporary hack until a dedicated struct rspinlock is existing. */ -#define rspinlock spinlock struct lock_profile { struct lock_profile *next; /* forward link */ @@ -110,6 +108,10 @@ struct lock_profile_qhead { __used_section(".lockprofile.data") = \ &lock_profile_data__##name #define SPIN_LOCK_UNLOCKED_(x) { \ + .debug = LOCK_DEBUG_, \ + .profile = x, \ +} +#define RSPIN_LOCK_UNLOCKED_(x) { \ .recurse_cpu = SPINLOCK_NO_CPU, \ .debug = LOCK_DEBUG_, \ .profile = x, \ @@ -119,8 +121,9 @@ struct lock_profile_qhead { spinlock_t l = SPIN_LOCK_UNLOCKED_(NULL); \ static struct lock_profile lock_profile_data__##l = LOCK_PROFILE_(l); \ LOCK_PROFILE_PTR_(l) +#define RSPIN_LOCK_UNLOCKED RSPIN_LOCK_UNLOCKED_(NULL) #define DEFINE_RSPINLOCK(l) \ - rspinlock_t l = SPIN_LOCK_UNLOCKED_(NULL); \ + rspinlock_t l = RSPIN_LOCK_UNLOCKED_(NULL); \ static struct lock_profile lock_profile_data__##l = RLOCK_PROFILE_(l); \ LOCK_PROFILE_PTR_(l) @@ -145,8 +148,11 @@ struct lock_profile_qhead { #define spin_lock_init_prof(s, l) \ spin_lock_init_prof__(s, l, lock, spinlock_t, false) -#define rspin_lock_init_prof(s, l) \ - spin_lock_init_prof__(s, l, rlock, rspinlock_t, true) +#define rspin_lock_init_prof(s, l) do { \ + spin_lock_init_prof__(s, l, rlock, rspinlock_t, true); \ + (s)->l.recurse_cpu = SPINLOCK_NO_CPU; \ + (s)->l.recurse_cnt = 0; \ + } while (0) void _lock_profile_register_struct( int32_t type, struct lock_profile_qhead *qhead, int32_t idx); @@ -168,11 +174,14 @@ struct lock_profile_qhead { }; struct lock_profile { }; #define SPIN_LOCK_UNLOCKED { \ + .debug = LOCK_DEBUG_, \ +} +#define RSPIN_LOCK_UNLOCKED { \ .recurse_cpu = SPINLOCK_NO_CPU, \ .debug = LOCK_DEBUG_, \ } #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED -#define DEFINE_RSPINLOCK(l) rspinlock_t l = SPIN_LOCK_UNLOCKED +#define DEFINE_RSPINLOCK(l) rspinlock_t l = RSPIN_LOCK_UNLOCKED #define spin_lock_init_prof(s, l) spin_lock_init(&((s)->l)) #define rspin_lock_init_prof(s, l) rspin_lock_init(&((s)->l)) @@ -193,6 +202,14 @@ typedef union { #define SPINLOCK_TICKET_INC { .head_tail = 0x10000, } typedef struct spinlock { + spinlock_tickets_t tickets; + union lock_debug debug; +#ifdef CONFIG_DEBUG_LOCK_PROFILE + struct lock_profile *profile; +#endif +} spinlock_t; + +typedef struct rspinlock { spinlock_tickets_t tickets; uint16_t recurse_cpu:SPINLOCK_CPU_BITS; #define SPINLOCK_NO_CPU ((1u << SPINLOCK_CPU_BITS) - 1) @@ -203,12 +220,10 @@ typedef struct spinlock { #ifdef CONFIG_DEBUG_LOCK_PROFILE struct lock_profile *profile; #endif -} spinlock_t; - -typedef spinlock_t rspinlock_t; +} rspinlock_t; #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED) -#define rspin_lock_init(l) (*(l) = (rspinlock_t)SPIN_LOCK_UNLOCKED) +#define rspin_lock_init(l) (*(l) = (rspinlock_t)RSPIN_LOCK_UNLOCKED) void _spin_lock(spinlock_t *lock); void _spin_lock_cb(spinlock_t *lock, void (*cb)(void *data), void *data); @@ -312,12 +327,35 @@ static always_inline void rspin_lock(rspinlock_t *lock) #define rspin_barrier(l) _rspin_barrier(l) #define rspin_is_locked(l) _rspin_is_locked(l) -#define nrspin_trylock(l) spin_trylock(l) -#define nrspin_lock(l) spin_lock(l) -#define nrspin_unlock(l) spin_unlock(l) -#define nrspin_lock_irq(l) spin_lock_irq(l) -#define nrspin_unlock_irq(l) spin_unlock_irq(l) -#define nrspin_lock_irqsave(l, f) spin_lock_irqsave(l, f) -#define nrspin_unlock_irqrestore(l, f) spin_unlock_irqrestore(l, f) +bool _nrspin_trylock(rspinlock_t *lock); +void _nrspin_lock(rspinlock_t *lock); +#define nrspin_lock_irqsave(l, f) \ + ({ \ + BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long)); \ + (f) = _nrspin_lock_irqsave(l); \ + block_lock_speculation(); \ + }) +unsigned long _nrspin_lock_irqsave(rspinlock_t *lock); +void _nrspin_unlock(rspinlock_t *lock); +void _nrspin_lock_irq(rspinlock_t *lock); +void _nrspin_unlock_irq(rspinlock_t *lock); +void _nrspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags); + +static always_inline void nrspin_lock(rspinlock_t *lock) +{ + _nrspin_lock(lock); + block_lock_speculation(); +} + +static always_inline void nrspin_lock_irq(rspinlock_t *l) +{ + _nrspin_lock_irq(l); + block_lock_speculation(); +} + +#define nrspin_trylock(l) lock_evaluate_nospec(_nrspin_trylock(l)) +#define nrspin_unlock(l) _nrspin_unlock(l) +#define nrspin_unlock_irqrestore(l, f) _nrspin_unlock_irqrestore(l, f) +#define nrspin_unlock_irq(l) _nrspin_unlock_irq(l) #endif /* __SPINLOCK_H__ */ -- generated by git-patchbot for /home/xen/git/xen.git#master
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |