Displaying 20 results from an estimated 303 matches for "rcu_dereference".
2011 Nov 17
12
[PATCH] Avoid panic when adjusting sedf parameters
When using sedf scheduler in a cpupool the system might panic when setting
sedf scheduling parameters for a domain.
Signed-off-by: juergen.gross@ts.fujitsu.com
1 file changed, 4 insertions(+)
xen/common/sched_sedf.c | 4 ++++
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
2011 Nov 24
2
[PATCH] macvtap: Fix macvtap_get_queue to use rxhash first
...c 2011-10-22 08:38:01.000000000 +0530
+++ new/drivers/net/macvtap.c 2011-11-16 18:34:51.000000000 +0530
@@ -175,6 +175,14 @@ static struct macvtap_queue *macvtap_get
if (!numvtaps)
goto out;
+ /* Check if we can use flow to select a queue */
+ rxq = skb_get_rxhash(skb);
+ if (rxq) {
+ tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
+ if (tap)
+ goto out;
+ }
+
if (likely(skb_rx_queue_recorded(skb))) {
rxq = skb_get_rx_queue(skb);
@@ -186,14 +194,6 @@ static struct macvtap_queue *macvtap_get
goto out;
}
- /* Check if we can use flow to select a queue */
- rxq = skb_get_rxhash(s...
2011 Nov 24
2
[PATCH] macvtap: Fix macvtap_get_queue to use rxhash first
...c 2011-10-22 08:38:01.000000000 +0530
+++ new/drivers/net/macvtap.c 2011-11-16 18:34:51.000000000 +0530
@@ -175,6 +175,14 @@ static struct macvtap_queue *macvtap_get
if (!numvtaps)
goto out;
+ /* Check if we can use flow to select a queue */
+ rxq = skb_get_rxhash(skb);
+ if (rxq) {
+ tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
+ if (tap)
+ goto out;
+ }
+
if (likely(skb_rx_queue_recorded(skb))) {
rxq = skb_get_rx_queue(skb);
@@ -186,14 +194,6 @@ static struct macvtap_queue *macvtap_get
goto out;
}
- /* Check if we can use flow to select a queue */
- rxq = skb_get_rxhash(s...
2019 Aug 07
0
[PATCH V4 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
....c b/drivers/vhost/vhost.c
index cfc11f9ed9c9..57bfbb60d960 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -324,17 +324,16 @@ static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
spin_lock(&vq->mmu_lock);
for (i = 0; i < VHOST_NUM_ADDRS; i++) {
- map[i] = rcu_dereference_protected(vq->maps[i],
- lockdep_is_held(&vq->mmu_lock));
+ map[i] = vq->maps[i];
if (map[i]) {
vhost_set_map_dirty(vq, map[i], i);
- rcu_assign_pointer(vq->maps[i], NULL);
+ vq->maps[i] = NULL;
}
}
spin_unlock(&vq->mmu_lock);
- /* No need for syn...
2019 Jul 31
0
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
....c b/drivers/vhost/vhost.c
index cfc11f9ed9c9..db2c81cb1e90 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -324,17 +324,16 @@ static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
spin_lock(&vq->mmu_lock);
for (i = 0; i < VHOST_NUM_ADDRS; i++) {
- map[i] = rcu_dereference_protected(vq->maps[i],
- lockdep_is_held(&vq->mmu_lock));
+ map[i] = vq->maps[i];
if (map[i]) {
vhost_set_map_dirty(vq, map[i], i);
- rcu_assign_pointer(vq->maps[i], NULL);
+ vq->maps[i] = NULL;
}
}
spin_unlock(&vq->mmu_lock);
- /* No need for syn...
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...c11f9ed9c9..db2c81cb1e90 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -324,17 +324,16 @@ static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
>
> spin_lock(&vq->mmu_lock);
> for (i = 0; i < VHOST_NUM_ADDRS; i++) {
> - map[i] = rcu_dereference_protected(vq->maps[i],
> - lockdep_is_held(&vq->mmu_lock));
> + map[i] = vq->maps[i];
> if (map[i]) {
> vhost_set_map_dirty(vq, map[i], i);
> - rcu_assign_pointer(vq->maps[i], NULL);
> + vq->maps[i] = NULL;
> }
> }
> spin_unlock(...
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...c11f9ed9c9..db2c81cb1e90 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -324,17 +324,16 @@ static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
>
> spin_lock(&vq->mmu_lock);
> for (i = 0; i < VHOST_NUM_ADDRS; i++) {
> - map[i] = rcu_dereference_protected(vq->maps[i],
> - lockdep_is_held(&vq->mmu_lock));
> + map[i] = vq->maps[i];
> if (map[i]) {
> vhost_set_map_dirty(vq, map[i], i);
> - rcu_assign_pointer(vq->maps[i], NULL);
> + vq->maps[i] = NULL;
> }
> }
> spin_unlock(...
2019 Sep 06
1
[PATCH 1/2] Revert "vhost: access vq metadata through kernel virtual address"
...s = 0;
> - map->addr = NULL;
> -}
> -
> -static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
> -{
> - struct vhost_map *map[VHOST_NUM_ADDRS];
> - int i;
> -
> - spin_lock(&vq->mmu_lock);
> - for (i = 0; i < VHOST_NUM_ADDRS; i++) {
> - map[i] = rcu_dereference_protected(vq->maps[i],
> - lockdep_is_held(&vq->mmu_lock));
> - if (map[i])
> - rcu_assign_pointer(vq->maps[i], NULL);
> - }
> - spin_unlock(&vq->mmu_lock);
> -
> - synchronize_rcu();
> -
> - for (i = 0; i < VHOST_NUM_ADDRS; i++)
> - if...
2018 May 22
0
[PATCH net-next v11 3/5] net: Introduce net_failover driver
...kb);
+ return NETDEV_TX_OK;
+}
+
+static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct net_failover_info *nfo_info = netdev_priv(dev);
+ struct net_device *xmit_dev;
+
+ /* Try xmit via primary netdev followed by standby netdev */
+ xmit_dev = rcu_dereference_bh(nfo_info->primary_dev);
+ if (!xmit_dev || !net_failover_xmit_ready(xmit_dev)) {
+ xmit_dev = rcu_dereference_bh(nfo_info->standby_dev);
+ if (!xmit_dev || !net_failover_xmit_ready(xmit_dev))
+ return net_failover_drop_xmit(skb, dev);
+ }
+
+ skb->dev = xmit_dev;
+ skb->queue_map...
2019 Jul 31
14
[PATCH V2 0/9] Fixes for metadata accelreation
Hi all:
This series try to fix several issues introduced by meta data
accelreation series. Please review.
Changes from V1:
- Try not use RCU to syncrhonize MMU notifier with vhost worker
- set dirty pages after no readers
- return -EAGAIN only when we find the range is overlapped with
metadata
Jason Wang (9):
vhost: don't set uaddr for invalid address
vhost: validate MMU notifier
2019 Aug 03
1
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...st/vhost.c
> >> +++ b/drivers/vhost/vhost.c
> >> @@ -324,17 +324,16 @@ static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
> >>
> >> spin_lock(&vq->mmu_lock);
> >> for (i = 0; i < VHOST_NUM_ADDRS; i++) {
> >> - map[i] = rcu_dereference_protected(vq->maps[i],
> >> - lockdep_is_held(&vq->mmu_lock));
> >> + map[i] = vq->maps[i];
> >> if (map[i]) {
> >> vhost_set_map_dirty(vq, map[i], i);
> >> - rcu_assign_pointer(vq->maps[i], NULL);
> >> + vq->...
2018 May 07
0
[PATCH net-next v10 2/4] net: Introduce generic failover module
...kb);
+ return NETDEV_TX_OK;
+}
+
+static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct net_failover_info *nfo_info = netdev_priv(dev);
+ struct net_device *xmit_dev;
+
+ /* Try xmit via primary netdev followed by standby netdev */
+ xmit_dev = rcu_dereference_bh(nfo_info->primary_dev);
+ if (!xmit_dev || !net_failover_xmit_ready(xmit_dev)) {
+ xmit_dev = rcu_dereference_bh(nfo_info->standby_dev);
+ if (!xmit_dev || !net_failover_xmit_ready(xmit_dev))
+ return net_failover_drop_xmit(skb, dev);
+ }
+
+ skb->dev = xmit_dev;
+ skb->queue_map...
2019 Sep 05
0
[PATCH 1/2] Revert "vhost: access vq metadata through kernel virtual address"
...map->pages);
- map->pages = NULL;
- map->npages = 0;
- map->addr = NULL;
-}
-
-static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
-{
- struct vhost_map *map[VHOST_NUM_ADDRS];
- int i;
-
- spin_lock(&vq->mmu_lock);
- for (i = 0; i < VHOST_NUM_ADDRS; i++) {
- map[i] = rcu_dereference_protected(vq->maps[i],
- lockdep_is_held(&vq->mmu_lock));
- if (map[i])
- rcu_assign_pointer(vq->maps[i], NULL);
- }
- spin_unlock(&vq->mmu_lock);
-
- synchronize_rcu();
-
- for (i = 0; i < VHOST_NUM_ADDRS; i++)
- if (map[i])
- vhost_map_unprefetch(map[i]);
-
-}
-...
2016 Jan 26
4
[v3,11/41] mips: reuse asm-generic/barrier.h
...p_read_barrier_depends()" does NOTHING wrt the second write.
>
> Just to clarify: on alpha it adds a memory barrier, but that memory
> barrier is useless.
No trailing data-dependent read, so agreed, no smp_read_barrier_depends()
needed. That said, I believe that we should encourage rcu_dereference*()
or lockless_dereference() instead of READ_ONCE() for documentation
reasons, though.
> On non-alpha, it is a no-op, and obviously does nothing simply because
> it generates no code.
>
> So if anybody believes that the "smp_read_barrier_depends()" does
> something, they...
2016 Jan 26
4
[v3,11/41] mips: reuse asm-generic/barrier.h
...p_read_barrier_depends()" does NOTHING wrt the second write.
>
> Just to clarify: on alpha it adds a memory barrier, but that memory
> barrier is useless.
No trailing data-dependent read, so agreed, no smp_read_barrier_depends()
needed. That said, I believe that we should encourage rcu_dereference*()
or lockless_dereference() instead of READ_ONCE() for documentation
reasons, though.
> On non-alpha, it is a no-op, and obviously does nothing simply because
> it generates no code.
>
> So if anybody believes that the "smp_read_barrier_depends()" does
> something, they...
2019 Aug 01
0
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...644
>> --- a/drivers/vhost/vhost.c
>> +++ b/drivers/vhost/vhost.c
>> @@ -324,17 +324,16 @@ static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
>>
>> spin_lock(&vq->mmu_lock);
>> for (i = 0; i < VHOST_NUM_ADDRS; i++) {
>> - map[i] = rcu_dereference_protected(vq->maps[i],
>> - lockdep_is_held(&vq->mmu_lock));
>> + map[i] = vq->maps[i];
>> if (map[i]) {
>> vhost_set_map_dirty(vq, map[i], i);
>> - rcu_assign_pointer(vq->maps[i], NULL);
>> + vq->maps[i] = NULL;
>> }...
2018 Apr 11
2
[RFC PATCH net-next v6 2/4] net: Introduce generic bypass module
...ta into network stack.
>+ * Change the associated network device from lower dev to virtio.
>+ * note: already called with rcu_read_lock
>+ */
>+static rx_handler_result_t bypass_handle_frame(struct sk_buff **pskb)
>+{
>+ struct sk_buff *skb = *pskb;
>+ struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
>+
>+ skb->dev = ndev;
>+
>+ return RX_HANDLER_ANOTHER;
>+}
>+
>+static struct net_device *bypass_master_get_bymac(u8 *mac,
>+ struct bypass_ops **ops)
>+{
>+ struct bypass_master *bypass_master;
>+ struct net_device *bypa...
2018 Apr 11
2
[RFC PATCH net-next v6 2/4] net: Introduce generic bypass module
...ta into network stack.
>+ * Change the associated network device from lower dev to virtio.
>+ * note: already called with rcu_read_lock
>+ */
>+static rx_handler_result_t bypass_handle_frame(struct sk_buff **pskb)
>+{
>+ struct sk_buff *skb = *pskb;
>+ struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
>+
>+ skb->dev = ndev;
>+
>+ return RX_HANDLER_ANOTHER;
>+}
>+
>+static struct net_device *bypass_master_get_bymac(u8 *mac,
>+ struct bypass_ops **ops)
>+{
>+ struct bypass_master *bypass_master;
>+ struct net_device *bypa...
2019 Jul 03
3
[PATCH v2 1/3] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
...he_virtio_vsock;
> -}
> -
> static u32 virtio_transport_get_local_cid(void)
> {
> - struct virtio_vsock *vsock = virtio_vsock_get();
> + struct virtio_vsock *vsock;
> + u32 ret;
>
> - if (!vsock)
> - return VMADDR_CID_ANY;
> + rcu_read_lock();
> + vsock = rcu_dereference(the_virtio_vsock);
> + if (!vsock) {
> + ret = VMADDR_CID_ANY;
> + goto out_rcu;
> + }
>
> - return vsock->guest_cid;
> + ret = vsock->guest_cid;
> +out_rcu:
> + rcu_read_unlock();
> + return ret;
> }
>
> static void virtio_transport_loopback...
2019 Jul 03
3
[PATCH v2 1/3] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
...he_virtio_vsock;
> -}
> -
> static u32 virtio_transport_get_local_cid(void)
> {
> - struct virtio_vsock *vsock = virtio_vsock_get();
> + struct virtio_vsock *vsock;
> + u32 ret;
>
> - if (!vsock)
> - return VMADDR_CID_ANY;
> + rcu_read_lock();
> + vsock = rcu_dereference(the_virtio_vsock);
> + if (!vsock) {
> + ret = VMADDR_CID_ANY;
> + goto out_rcu;
> + }
>
> - return vsock->guest_cid;
> + ret = vsock->guest_cid;
> +out_rcu:
> + rcu_read_unlock();
> + return ret;
> }
>
> static void virtio_transport_loopback...