Displaying 9 results from an estimated 9 matches for "slow_set".
2015 Apr 13
1
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...man Long wrote:
> >>+static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
> >>+{
> >>+ struct __qspinlock *l = (void *)lock;
> >>+ struct qspinlock **lp = NULL;
> >>+ struct pv_node *pn = (struct pv_node *)node;
> >>+ int slow_set = false;
> >>+ int loop;
> >>+
> >>+ for (;;) {
> >>+ for (loop = SPIN_THRESHOLD; loop; loop--) {
> >>+ if (!READ_ONCE(l->locked))
> >>+ return;
> >>+
> >>+ cpu_relax();
> >>+ }
> >>+
> >>...
2015 Apr 13
1
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...man Long wrote:
> >>+static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
> >>+{
> >>+ struct __qspinlock *l = (void *)lock;
> >>+ struct qspinlock **lp = NULL;
> >>+ struct pv_node *pn = (struct pv_node *)node;
> >>+ int slow_set = false;
> >>+ int loop;
> >>+
> >>+ for (;;) {
> >>+ for (loop = SPIN_THRESHOLD; loop; loop--) {
> >>+ if (!READ_ONCE(l->locked))
> >>+ return;
> >>+
> >>+ cpu_relax();
> >>+ }
> >>+
> >>...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...spin.
> + * __pv_queue_spin_unlock() will wake us.
> + */
> +static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
> +{
> + struct __qspinlock *l = (void *)lock;
> + struct qspinlock **lp = NULL;
> + struct pv_node *pn = (struct pv_node *)node;
> + int slow_set = false;
> + int loop;
> +
> + for (;;) {
> + for (loop = SPIN_THRESHOLD; loop; loop--) {
> + if (!READ_ONCE(l->locked))
> + return;
> +
> + cpu_relax();
> + }
> +
> + WRITE_ONCE(pn->state, vcpu_halted);
> + if (!lp)
> + lp = pv_hash(lock, p...
2015 Apr 09
6
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...spin.
> + * __pv_queue_spin_unlock() will wake us.
> + */
> +static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
> +{
> + struct __qspinlock *l = (void *)lock;
> + struct qspinlock **lp = NULL;
> + struct pv_node *pn = (struct pv_node *)node;
> + int slow_set = false;
> + int loop;
> +
> + for (;;) {
> + for (loop = SPIN_THRESHOLD; loop; loop--) {
> + if (!READ_ONCE(l->locked))
> + return;
> +
> + cpu_relax();
> + }
> +
> + WRITE_ONCE(pn->state, vcpu_halted);
> + if (!lp)
> + lp = pv_hash(lock, p...
2015 Apr 09
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...pin_unlock() will wake us.
>> + */
>> +static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
>> +{
>> + struct __qspinlock *l = (void *)lock;
>> + struct qspinlock **lp = NULL;
>> + struct pv_node *pn = (struct pv_node *)node;
>> + int slow_set = false;
>> + int loop;
>> +
>> + for (;;) {
>> + for (loop = SPIN_THRESHOLD; loop; loop--) {
>> + if (!READ_ONCE(l->locked))
>> + return;
>> +
>> + cpu_relax();
>> + }
>> +
>> + WRITE_ONCE(pn->state, vcpu_halted);
&g...
2015 Apr 07
0
[PATCH v15 09/15] pvqspinlock: Implement simple paravirt support for the qspinlock
...ecome clear; halt the vcpu after a short spin.
+ * __pv_queue_spin_unlock() will wake us.
+ */
+static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
+{
+ struct __qspinlock *l = (void *)lock;
+ struct qspinlock **lp = NULL;
+ struct pv_node *pn = (struct pv_node *)node;
+ int slow_set = false;
+ int loop;
+
+ for (;;) {
+ for (loop = SPIN_THRESHOLD; loop; loop--) {
+ if (!READ_ONCE(l->locked))
+ return;
+
+ cpu_relax();
+ }
+
+ WRITE_ONCE(pn->state, vcpu_halted);
+ if (!lp)
+ lp = pv_hash(lock, pn);
+ /*
+ * lp must be set before setting _Q_SLOW_VAL
+ *
+...
2015 Apr 07
0
[PATCH v15 13/15] pvqspinlock: Only kick CPU at unlock time
...clearing the hash table entry.
+ */
+ if (cmpxchg(&pn->state, vcpu_running, vcpu_halted)
+ == vcpu_hashed)
+ goto wait_now;
+
if (!lp)
lp = pv_hash(lock, pn);
/*
@@ -283,7 +315,7 @@ static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
} else if (slow_set && !READ_ONCE(l->locked))
return;
slow_set = true;
-
+wait_now:
pv_wait(&l->locked, _Q_SLOW_VAL);
}
/*
@@ -315,7 +347,7 @@ __visible void __pv_queue_spin_unlock(struct qspinlock *lock)
* At this point the memory pointed at by lock can be freed/reused,
* however...
2015 Apr 07
18
[PATCH v15 00/15] qspinlock: a 4-byte queue spinlock with PV support
v14->v15:
- Incorporate PeterZ's v15 qspinlock patch and improve upon the PV
qspinlock code by dynamically allocating the hash table as well
as some other performance optimization.
- Simplified the Xen PV qspinlock code as suggested by David Vrabel
<david.vrabel at citrix.com>.
- Add benchmarking data for 3.19 kernel to compare the performance
of a spinlock heavy test
2015 Apr 07
18
[PATCH v15 00/15] qspinlock: a 4-byte queue spinlock with PV support
v14->v15:
- Incorporate PeterZ's v15 qspinlock patch and improve upon the PV
qspinlock code by dynamically allocating the hash table as well
as some other performance optimization.
- Simplified the Xen PV qspinlock code as suggested by David Vrabel
<david.vrabel at citrix.com>.
- Add benchmarking data for 3.19 kernel to compare the performance
of a spinlock heavy test