Displaying 2 results from an estimated 2 matches for "ticket_batch_mask".
2014 Jun 28
2
[RFC PATCH v2] Implement Batched (group) ticket lock
...a single CPU anywhere
@@ -49,7 +50,42 @@ static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
}
+static int __ticket_lock_get_batch_mask(void)
+{
+ if (static_key_false(¶virt_ticketlocks_enabled))
+ return TICKET_BATCH_MASK;
+ else
+ return TICKET_BATCH_MASK_NATIVE;
+}
+
+static void __ticket_lock_batch_spin(arch_spinlock_t *lock, __ticket_t ticket)
+{
+ if (static_key_false(¶virt_ticketlocks_enabled)) {
+ register struct __raw_tickets inc, new;
+
+ inc.head = ACCESS_ONCE(lock->tickets.head);
+ barrier(...
2014 Jun 28
2
[RFC PATCH v2] Implement Batched (group) ticket lock
...a single CPU anywhere
@@ -49,7 +50,42 @@ static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
}
+static int __ticket_lock_get_batch_mask(void)
+{
+ if (static_key_false(¶virt_ticketlocks_enabled))
+ return TICKET_BATCH_MASK;
+ else
+ return TICKET_BATCH_MASK_NATIVE;
+}
+
+static void __ticket_lock_batch_spin(arch_spinlock_t *lock, __ticket_t ticket)
+{
+ if (static_key_false(¶virt_ticketlocks_enabled)) {
+ register struct __raw_tickets inc, new;
+
+ inc.head = ACCESS_ONCE(lock->tickets.head);
+ barrier(...