Displaying 20 results from an estimated 33 matches for "write_seqcount_end".
2013 Jun 27
1
[PATCH 2/5] time: pass flags instead of multiple bools to timekeeping_update()
...or)
+ if (action & TK_MIRROR)
memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));
}
@@ -508,7 +511,7 @@ int do_settimeofday(const struct timespec *tv)
tk_set_xtime(tk, tv);
- timekeeping_update(tk, true, true);
+ timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR);
write_seqcount_end(&timekeeper_seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
@@ -552,7 +555,7 @@ int timekeeping_inject_offset(struct timespec *ts)
tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
error: /* even if we error out, we forwarded the time, so call update */...
2013 Jun 19
14
[PATCH 2/4] time: add a notifier chain for when the system time is stepped
..._set_task, 0);
+
+static void timekeeping_clock_was_set_delayed(void)
+{
+ tasklet_schedule(&clock_was_set_tasklet);
+}
+
static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
static void update_pvclock_gtod(struct timekeeper *tk)
@@ -513,8 +532,7 @@ int do_settimeofday(const struct timespec *tv)
write_seqcount_end(&timekeeper_seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
- /* signal hrtimers about time change */
- clock_was_set();
+ timekeeping_clock_was_set();
return 0;
}
@@ -557,8 +575,7 @@ error: /* even if we error out, we forwarded the time, so call update */
write_seqcou...
2019 Mar 29
0
[PATCH net v4] failover: allow name change on IFF_UP slave interfaces
...ctly.
1199 */
1200 if (dev->flags & IFF_UP &&
1201 likely(!(dev->priv_flags & IFF_FAILOVER_SLAVE)))
1202 return -EBUSY;
1203
1204 write_seqcount_begin(&devnet_rename_seq);
1205
1206 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1207 write_seqcount_end(&devnet_rename_seq);
1208 return 0;
1209 }
1210
1211 memcpy(oldname, dev->name, IFNAMSIZ);
1212
1213 err = dev_get_valid_name(net, dev, newname);
1214 if (err < 0) {
1215 write_seqcount_end(&devnet_rename_seq);
1216 return err;
1217 }
1218
1219 i...
2019 Mar 28
0
[PATCH net v3] failover: allow name change on IFF_UP slave interfaces
...gt;flags & IFF_UP) {
1202 if (likely(!(dev->priv_flags & IFF_FAILOVER_SLAVE)))
1203 return -EBUSY;
1204 reopen_needed = true;
1205 }
1206
1207 write_seqcount_begin(&devnet_rename_seq);
1208
1209 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1210 write_seqcount_end(&devnet_rename_seq);
1211 return 0;
1212 }
1213
1214 memcpy(oldname, dev->name, IFNAMSIZ);
1215
1216 err = dev_get_valid_name(net, dev, newname);
1217 if (err < 0) {
1218 write_seqcount_end(&devnet_rename_seq);
1219 return err;
1220 }
1221
1222 i...
2019 Mar 27
0
[PATCH net v3] failover: allow name change on IFF_UP slave interfaces
...!strchr(oldname, '%'))
> netdev_info(dev, "renamed from %s\n", oldname);
>
>@@ -1210,7 +1230,9 @@ int dev_change_name(struct net_device *dev, const char *newname)
> memcpy(dev->name, oldname, IFNAMSIZ);
> dev->name_assign_type = old_assign_type;
> write_seqcount_end(&devnet_rename_seq);
>- return ret;
>+ if (err >= 0)
>+ err = ret;
>+ goto reopen;
> }
>
> write_seqcount_end(&devnet_rename_seq);
>@@ -1246,6 +1268,15 @@ int dev_change_name(struct net_device *dev, const char *newname)
> }
> }
>
>+reopen:...
2019 Aug 07
2
[PATCH V4 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...> uaddr->uaddr - 1 + uaddr->size);
> }
>
> +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq)
> +{
> + write_seqcount_begin(&vq->seq);
> +}
> +
> +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
> +{
> + write_seqcount_end(&vq->seq);
> +}
The write side of a seqlock only provides write barriers. Access to
map = vq->maps[VHOST_ADDR_USED];
Still needs a read side barrier, and then I think this will be no
better than a normal spinlock.
It also doesn't seem like this algorithm even needs a seqlock,...
2019 Aug 07
2
[PATCH V4 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...> uaddr->uaddr - 1 + uaddr->size);
> }
>
> +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq)
> +{
> + write_seqcount_begin(&vq->seq);
> +}
> +
> +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
> +{
> + write_seqcount_end(&vq->seq);
> +}
The write side of a seqlock only provides write barriers. Access to
map = vq->maps[VHOST_ADDR_USED];
Still needs a read side barrier, and then I think this will be no
better than a normal spinlock.
It also doesn't seem like this algorithm even needs a seqlock,...
2019 Aug 01
0
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...ford that for vhost fast path,
the atomics eliminate almost all the performance improvement brought by
this patch on a machine without SMAP.
>
>> write_seqcount_begin()
>>
>> map = vq->map[X]
>>
>> write or read through map->addr directly
>>
>> write_seqcount_end()
>>
>>
>> There's no rmb() in write_seqcount_begin(), so map could be read before
>> write_seqcount_begin(), but it looks to me now that this doesn't harm at
>> all, maybe we can try this way.
> That is because it is a write side lock, not a read lock. IIRC...
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...gister cycle.
You are supposed to use something simple like a spinlock or mutex
inside the invalidate_range_start to serialized tear down of the SPTEs
with their accessors.
> write_seqcount_begin()
>
> map = vq->map[X]
>
> write or read through map->addr directly
>
> write_seqcount_end()
>
>
> There's no rmb() in write_seqcount_begin(), so map could be read before
> write_seqcount_begin(), but it looks to me now that this doesn't harm at
> all, maybe we can try this way.
That is because it is a write side lock, not a read lock. IIRC
seqlocks have weaker...
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...gister cycle.
You are supposed to use something simple like a spinlock or mutex
inside the invalidate_range_start to serialized tear down of the SPTEs
with their accessors.
> write_seqcount_begin()
>
> map = vq->map[X]
>
> write or read through map->addr directly
>
> write_seqcount_end()
>
>
> There's no rmb() in write_seqcount_begin(), so map could be read before
> write_seqcount_begin(), but it looks to me now that this doesn't harm at
> all, maybe we can try this way.
That is because it is a write side lock, not a read lock. IIRC
seqlocks have weaker...
2019 Aug 07
0
[PATCH V4 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...addr,
return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size);
}
+static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq)
+{
+ write_seqcount_begin(&vq->seq);
+}
+
+static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
+{
+ write_seqcount_end(&vq->seq);
+}
+
+static void inline vhost_vq_sync_access(struct vhost_virtqueue *vq)
+{
+ unsigned int seq;
+
+ /* Make sure any changes to map was done before checking seq
+ * counter. Paired with smp_wmb() in write_seqcount_begin().
+ */
+ smp_mb();
+ seq = raw_read_seqcount(&vq->...
2019 Aug 08
3
[PATCH V4 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...id inline vhost_vq_access_map_begin(struct
>>> vhost_virtqueue *vq)
>>> +{
>>> +??? write_seqcount_begin(&vq->seq);
>>> +}
>>> +
>>> +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
>>> +{
>>> +??? write_seqcount_end(&vq->seq);
>>> +}
>> The write side of a seqlock only provides write barriers. Access to
>>
>> ????map = vq->maps[VHOST_ADDR_USED];
>>
>> Still needs a read side barrier, and then I think this will be no
>> better than a normal spinlock.
>&g...
2019 Aug 07
0
[PATCH V4 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...);
>> }
>>
>> +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq)
>> +{
>> + write_seqcount_begin(&vq->seq);
>> +}
>> +
>> +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
>> +{
>> + write_seqcount_end(&vq->seq);
>> +}
> The write side of a seqlock only provides write barriers. Access to
>
> map = vq->maps[VHOST_ADDR_USED];
>
> Still needs a read side barrier, and then I think this will be no
> better than a normal spinlock.
>
> It also doesn't seem li...
2019 Aug 07
12
[PATCH V4 0/9] Fixes for metadata accelreation
Hi all:
This series try to fix several issues introduced by meta data
accelreation series. Please review.
Changes from V3:
- remove the unnecessary patch
Changes from V2:
- use seqlck helper to synchronize MMU notifier with vhost worker
Changes from V1:
- try not use RCU to syncrhonize MMU notifier with vhost worker
- set dirty pages after no readers
- return -EAGAIN only when we find the
2019 Aug 07
12
[PATCH V4 0/9] Fixes for metadata accelreation
Hi all:
This series try to fix several issues introduced by meta data
accelreation series. Please review.
Changes from V3:
- remove the unnecessary patch
Changes from V2:
- use seqlck helper to synchronize MMU notifier with vhost worker
Changes from V1:
- try not use RCU to syncrhonize MMU notifier with vhost worker
- set dirty pages after no readers
- return -EAGAIN only when we find the
2014 Jan 16
0
[PATCH net-next v3 5/5] virtio-net: initial rx sysfs support, export mergeable rx buffer size
...t sysfs_seq;
+
/* Page frag for packet buffer allocation. */
struct page_frag alloc_frag;
@@ -416,7 +423,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
}
+ write_seqcount_begin(&rq->sysfs_seq);
ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
+ write_seqcount_end(&rq->sysfs_seq);
return head_skb;
err_skb:
@@ -604,18 +613,29 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
return err;
}
-static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
+static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len)...
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
On Wed, Jul 31, 2019 at 04:46:53AM -0400, Jason Wang wrote:
> We used to use RCU to synchronize MMU notifier with worker. This leads
> calling synchronize_rcu() in invalidate_range_start(). But on a busy
> system, there would be many factors that may slow down the
> synchronize_rcu() which makes it unsuitable to be called in MMU
> notifier.
>
> A solution is SRCU but its
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
On Wed, Jul 31, 2019 at 04:46:53AM -0400, Jason Wang wrote:
> We used to use RCU to synchronize MMU notifier with worker. This leads
> calling synchronize_rcu() in invalidate_range_start(). But on a busy
> system, there would be many factors that may slow down the
> synchronize_rcu() which makes it unsuitable to be called in MMU
> notifier.
>
> A solution is SRCU but its
2014 Jan 16
2
[PATCH net-next v3 5/5] virtio-net: initial rx sysfs support, export mergeable rx buffer size
...er allocation. */
> struct page_frag alloc_frag;
>
> @@ -416,7 +423,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> }
> }
>
> + write_seqcount_begin(&rq->sysfs_seq);
> ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
> + write_seqcount_end(&rq->sysfs_seq);
> return head_skb;
>
> err_skb:
Hmm this adds overhead just to prevent sysfs from getting wrong value.
Can't sysfs simply disable softirq while it's reading the value?
> @@ -604,18 +613,29 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp...
2014 Jan 16
2
[PATCH net-next v3 5/5] virtio-net: initial rx sysfs support, export mergeable rx buffer size
...er allocation. */
> struct page_frag alloc_frag;
>
> @@ -416,7 +423,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> }
> }
>
> + write_seqcount_begin(&rq->sysfs_seq);
> ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
> + write_seqcount_end(&rq->sysfs_seq);
> return head_skb;
>
> err_skb:
Hmm this adds overhead just to prevent sysfs from getting wrong value.
Can't sysfs simply disable softirq while it's reading the value?
> @@ -604,18 +613,29 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp...