Displaying 20 results from an estimated 208 matches for "config_nr_cpus".
2009 May 27
3
Xen 3.2 with Ubuntu 8.04 (64-Bit) on Intel Nehalem (i7)
...with Hyper-Threading 16 cores in total. As mentioned in the
subject I am using Ubuntu 8.04 64Bit and Xen 3.2 from Ubuntu package.
During these tests I hit on some issues, which are not clear to me. I
would be thankful for any comments/hints/thoughts on the following topics:
1. I recognized that CONFIG_NR_CPUS is set to 8 in the Ubuntu Xen
kernel. However, the Ubuntu server kernel has a limitation of 64. Is
there any specific reason for the limitation of 8 in the Xen kernel?
2. After recompiling the kernel with CONFIG_NR_CPUS=16 /proc/cpuinfo
still just shows 8 cores, but xm vcpu-list shows the 16 co...
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...lock: Pointer to queued spinlock structure
@@ -314,12 +318,6 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
*/
void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{
- struct mcs_spinlock *prev, *next, *node;
- u32 old, tail;
- int idx;
-
- BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
-
if (pv_enabled())
goto pv_queue;
@@ -397,6 +395,26 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
queue:
lockevent_inc(lock_slowpath);
pv_queue:
+ __queued_spin_lock_slowpath_queue(lock);
+}
+EXPORT_SYMBOL(queued_spin_lock_slow...
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...lock: Pointer to queued spinlock structure
@@ -314,12 +318,6 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
*/
void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{
- struct mcs_spinlock *prev, *next, *node;
- u32 old, tail;
- int idx;
-
- BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
-
if (pv_enabled())
goto pv_queue;
@@ -397,6 +395,26 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
queue:
lockevent_inc(lock_slowpath);
pv_queue:
+ __queued_spin_lock_slowpath_queue(lock);
+}
+EXPORT_SYMBOL(queued_spin_lock_slow...
2014 May 28
0
[RFC] Implement Batched (group) ticket lock
...OWPATH_FLAG ((__ticket_t)1)
> #else
> -#define __TICKET_LOCK_INC 1
> #define TICKET_SLOWPATH_FLAG ((__ticket_t)0)
> #endif
For the !CONFIG_PARAVIRT case, TICKET_LOCK_INC_SHIFT used to be 0,
now you are making it one. Probably not an issue, since even people
who compile with 128 < CONFIG_NR_CPUS <= 256 will likely have their
spinlocks padded out to 32 or 64 bits anyway in most data structures.
> -#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC))
> +#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_TAIL_INC))
> typedef u8 __ticket_t;
> typedef u16 __ticketpair_t;
> #els...
2014 May 28
7
[RFC] Implement Batched (group) ticket lock
...e TICKET_LOCK_INC_SHIFT 1
+#define __TICKET_LOCK_TAIL_INC (1<<TICKET_LOCK_INC_SHIFT)
+
#ifdef CONFIG_PARAVIRT_SPINLOCKS
-#define __TICKET_LOCK_INC 2
#define TICKET_SLOWPATH_FLAG ((__ticket_t)1)
#else
-#define __TICKET_LOCK_INC 1
#define TICKET_SLOWPATH_FLAG ((__ticket_t)0)
#endif
-#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC))
+#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_TAIL_INC))
typedef u8 __ticket_t;
typedef u16 __ticketpair_t;
#else
@@ -19,7 +20,12 @@ typedef u16 __ticket_t;
typedef u32 __ticketpair_t;
#endif
-#define TICKET_LOCK_INC ((__ticket_t)__TICKET_LOCK_INC)
+#define...
2014 May 28
7
[RFC] Implement Batched (group) ticket lock
...e TICKET_LOCK_INC_SHIFT 1
+#define __TICKET_LOCK_TAIL_INC (1<<TICKET_LOCK_INC_SHIFT)
+
#ifdef CONFIG_PARAVIRT_SPINLOCKS
-#define __TICKET_LOCK_INC 2
#define TICKET_SLOWPATH_FLAG ((__ticket_t)1)
#else
-#define __TICKET_LOCK_INC 1
#define TICKET_SLOWPATH_FLAG ((__ticket_t)0)
#endif
-#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC))
+#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_TAIL_INC))
typedef u8 __ticket_t;
typedef u16 __ticketpair_t;
#else
@@ -19,7 +20,12 @@ typedef u16 __ticket_t;
typedef u32 __ticketpair_t;
#endif
-#define TICKET_LOCK_INC ((__ticket_t)__TICKET_LOCK_INC)
+#define...
2020 Jul 21
0
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...@@ -314,12 +318,6 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
> */
> void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
> {
> - struct mcs_spinlock *prev, *next, *node;
> - u32 old, tail;
> - int idx;
> -
> - BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
> -
> if (pv_enabled())
> goto pv_queue;
>
> @@ -397,6 +395,26 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
> queue:
> lockevent_inc(lock_slowpath);
> pv_queue:
> + __queued_spin_lock_slowpath_q...
2014 May 29
0
[RFC] Implement Batched (group) ticket lock
..._INC (1<<TICKET_LOCK_INC_SHIFT)
> +
> #ifdef CONFIG_PARAVIRT_SPINLOCKS
> -#define __TICKET_LOCK_INC 2
> #define TICKET_SLOWPATH_FLAG ((__ticket_t)1)
> #else
> -#define __TICKET_LOCK_INC 1
> #define TICKET_SLOWPATH_FLAG ((__ticket_t)0)
> #endif
>
> -#if (CONFIG_NR_CPUS< (256 / __TICKET_LOCK_INC))
> +#if (CONFIG_NR_CPUS< (256 / __TICKET_LOCK_TAIL_INC))
> typedef u8 __ticket_t;
> typedef u16 __ticketpair_t;
> #else
> @@ -19,7 +20,12 @@ typedef u16 __ticket_t;
> typedef u32 __ticketpair_t;
> #endif
>
> -#define TICKET_LO...
2014 May 08
1
[PATCH v10 03/19] qspinlock: Add pending bit
...Still don't like you put it in a separate function, but you don't need
the pointer thing. Note how after you fail the trylock_pending() you
touch the second (node) cacheline.
> @@ -110,6 +184,9 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
>
> BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
>
> + if (trylock_pending(lock, &val))
> + return; /* Lock acquired */
> +
> node = this_cpu_ptr(&mcs_nodes[0]);
> idx = node->count++;
> tail = encode_tail(smp_processor_id(), idx);
> @@ -119,15 +196,18 @@ void queu...
2014 May 08
1
[PATCH v10 03/19] qspinlock: Add pending bit
...Still don't like you put it in a separate function, but you don't need
the pointer thing. Note how after you fail the trylock_pending() you
touch the second (node) cacheline.
> @@ -110,6 +184,9 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
>
> BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
>
> + if (trylock_pending(lock, &val))
> + return; /* Lock acquired */
> +
> node = this_cpu_ptr(&mcs_nodes[0]);
> idx = node->count++;
> tail = encode_tail(smp_processor_id(), idx);
> @@ -119,15 +196,18 @@ void queu...
2004 Jul 18
3
zaptel issues
...n of the
previous definition
In file included from /usr/src/linux/include/linux/prefetch.h:13,
from /usr/src/linux/include/linux/list.h:6,
from /usr/src/linux/include/linux/module.h:12,
from zaptel.c:44:
/usr/include/asm/processor.h:75: error: `CONFIG_NR_CPUS' undeclared
here (not in a function)
In file included from zaptel.c:44:
/usr/src/linux/include/linux/module.h:21:34: linux/modversions.h: No
such file or directory
In file included from /usr/include/asm/smp.h:17,
from /usr/src/linux/include/linux/smp.h:14,...
2020 Jul 23
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...ways_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
>> */
>> void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
>> {
>> - struct mcs_spinlock *prev, *next, *node;
>> - u32 old, tail;
>> - int idx;
>> -
>> - BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
>> -
>> if (pv_enabled())
>> goto pv_queue;
>>
>> @@ -397,6 +395,26 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
>> queue:
>> lockevent_inc(lock_slowpath);
>> pv_queue:
>...
2020 Jul 23
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...ways_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
>> */
>> void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
>> {
>> - struct mcs_spinlock *prev, *next, *node;
>> - u32 old, tail;
>> - int idx;
>> -
>> - BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
>> -
>> if (pv_enabled())
>> goto pv_queue;
>>
>> @@ -397,6 +395,26 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
>> queue:
>> lockevent_inc(lock_slowpath);
>> pv_queue:
>...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...+#endif
> barrier();
> ACCESS_ONCE(l->locked) = _Q_LOCKED_VAL;
> barrier();
Why? If we have a simple test-and-set lock like below, we'll never get
here at all.
> @@ -252,6 +260,18 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
>
> BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
>
> +#ifdef CONFIG_VIRT_UNFAIR_LOCKS
> + /*
> + * A simple test and set unfair lock
> + */
> + if (static_key_false(&virt_unfairlocks_enabled)) {
> + cpu_relax(); /* Relax after a failed lock attempt */
Meh, I don't think any...
2014 Jun 11
3
[PATCH v11 09/16] qspinlock, x86: Allow unfair spinlock in a virtual guest
...+#endif
> barrier();
> ACCESS_ONCE(l->locked) = _Q_LOCKED_VAL;
> barrier();
Why? If we have a simple test-and-set lock like below, we'll never get
here at all.
> @@ -252,6 +260,18 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
>
> BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
>
> +#ifdef CONFIG_VIRT_UNFAIR_LOCKS
> + /*
> + * A simple test and set unfair lock
> + */
> + if (static_key_false(&virt_unfairlocks_enabled)) {
> + cpu_relax(); /* Relax after a failed lock attempt */
Meh, I don't think any...
2014 Jun 15
28
[PATCH 00/11] qspinlock with paravirt support
Since Waiman seems incapable of doing simple things; here's my take on the
paravirt crap.
The first few patches are taken from Waiman's latest series, but the virt
support is completely new. Its primary aim is to not mess up the native code.
I've not stress tested it, but the virt and paravirt (kvm) cases boot on simple
smp guests. I've not done Xen, but the patch should be
2014 Jun 15
28
[PATCH 00/11] qspinlock with paravirt support
Since Waiman seems incapable of doing simple things; here's my take on the
paravirt crap.
The first few patches are taken from Waiman's latest series, but the virt
support is completely new. Its primary aim is to not mess up the native code.
I've not stress tested it, but the virt and paravirt (kvm) cases boot on simple
smp guests. I've not done Xen, but the patch should be
2014 Jun 15
0
[PATCH 08/11] qspinlock: Revert to test-and-set on hypervisors
...ef virt_queue_spin_lock
+static __always_inline bool virt_queue_spin_lock(struct qspinlock *lock)
+{
+ return false;
+}
+#endif
+
/*
* Initializier
*/
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -247,6 +247,9 @@ void queue_spin_lock_slowpath(struct qsp
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
+ if (virt_queue_spin_lock(lock))
+ return;
+
/*
* wait for in-progress pending->locked hand-overs
*
2014 Jun 15
0
[PATCH 06/11] qspinlock: Optimize pending bit
...nged, 10 insertions(+)
Index: linux-2.6/kernel/locking/qspinlock.c
===================================================================
--- linux-2.6.orig/kernel/locking/qspinlock.c
+++ linux-2.6/kernel/locking/qspinlock.c
@@ -226,6 +226,16 @@ void queue_spin_lock_slowpath(struct qsp
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
/*
+ * wait for in-progress pending->locked hand-overs
+ *
+ * 0,1,0 -> 0,0,1
+ */
+ if (val == _Q_PENDING_VAL) {
+ while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL)
+ cpu_relax();
+ }
+
+ /*
* trylock || pending
*
* 0,...
2014 Jun 28
2
[RFC PATCH v2] Implement Batched (group) ticket lock
...)
+#define TICKET_LOCK_INC_SHIFT 1
+#define __TICKET_LOCK_TAIL_INC (1<<TICKET_LOCK_INC_SHIFT)
#else
-#define __TICKET_LOCK_INC 1
-#define TICKET_SLOWPATH_FLAG ((__ticket_t)0)
+#define TICKET_LOCK_INC_SHIFT 0
+#define __TICKET_LOCK_TAIL_INC (1<<TICKET_LOCK_INC_SHIFT)
#endif
-#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC))
+#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_TAIL_INC))
typedef u8 __ticket_t;
typedef u16 __ticketpair_t;
#else
@@ -19,7 +20,40 @@ typedef u16 __ticket_t;
typedef u32 __ticketpair_t;
#endif
-#define TICKET_LOCK_INC ((__ticket_t)__TICKET_LOCK_INC)
+#ifdef C...