search for: __gfp_retry_mayfail

Displaying 20 results from an estimated 55 matches for "__gfp_retry_mayfail".

2018 Apr 18
5
[PATCH] net: don't use kvzalloc for DMA memory
...inux-2.6/net/core/dev.c 2018-04-18 16:24:43.000000000 +0200 > > @@ -8366,7 +8366,8 @@ struct net_device *alloc_netdev_mqs(int > > /* ensure 32-byte alignment of whole construct */ > > alloc_size += NETDEV_ALIGN - 1; > > > > - p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); > > + WARN_ON(alloc_size > PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); > > + p = kzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); > > if (!p) > > return NULL; > > > > > > Since when a net_device needs to be in DMA zone ??? > > I...
2018 Apr 18
5
[PATCH] net: don't use kvzalloc for DMA memory
...inux-2.6/net/core/dev.c 2018-04-18 16:24:43.000000000 +0200 > > @@ -8366,7 +8366,8 @@ struct net_device *alloc_netdev_mqs(int > > /* ensure 32-byte alignment of whole construct */ > > alloc_size += NETDEV_ALIGN - 1; > > > > - p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); > > + WARN_ON(alloc_size > PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); > > + p = kzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); > > if (!p) > > return NULL; > > > > > > Since when a net_device needs to be in DMA zone ??? > > I...
2018 Apr 18
0
[PATCH] net: don't use kvzalloc for DMA memory
...re/dev.c 2018-04-18 16:24:43.000000000 +0200 >>> @@ -8366,7 +8366,8 @@ struct net_device *alloc_netdev_mqs(int >>> /* ensure 32-byte alignment of whole construct */ >>> alloc_size += NETDEV_ALIGN - 1; >>> >>> - p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); >>> + WARN_ON(alloc_size > PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); >>> + p = kzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); >>> if (!p) >>> return NULL; >>> >>> >> >> Since when a net_device needs to be in DM...
2019 Sep 27
5
[PATCH] vhost: introduce mdev based hardware backend
...ev_probe(struct device *dev) > > +{ > > + struct mdev_device *mdev = mdev_from_dev(dev); > > + const struct virtio_mdev_device_ops *ops = mdev_get_dev_ops(mdev); > > + struct vhost_mdev *m; > > + int nvqs, r; > > + > > + m = kzalloc(sizeof(*m), GFP_KERNEL | __GFP_RETRY_MAYFAIL); > > + if (!m) > > + return -ENOMEM; > > + > > + mutex_init(&m->mutex); > > + > > + nvqs = ops->get_queue_max(mdev); > > + m->nvqs = nvqs; > > > The name could be confusing, get_queue_max() is to get the maximum number of > entr...
2019 Sep 27
5
[PATCH] vhost: introduce mdev based hardware backend
...ev_probe(struct device *dev) > > +{ > > + struct mdev_device *mdev = mdev_from_dev(dev); > > + const struct virtio_mdev_device_ops *ops = mdev_get_dev_ops(mdev); > > + struct vhost_mdev *m; > > + int nvqs, r; > > + > > + m = kzalloc(sizeof(*m), GFP_KERNEL | __GFP_RETRY_MAYFAIL); > > + if (!m) > > + return -ENOMEM; > > + > > + mutex_init(&m->mutex); > > + > > + nvqs = ops->get_queue_max(mdev); > > + m->nvqs = nvqs; > > > The name could be confusing, get_queue_max() is to get the maximum number of > entr...
2019 Sep 27
1
[PATCH] vhost: introduce mdev based hardware backend
...; + struct mdev_device *mdev = mdev_from_dev(dev); > > > > + const struct virtio_mdev_device_ops *ops = mdev_get_dev_ops(mdev); > > > > + struct vhost_mdev *m; > > > > + int nvqs, r; > > > > + > > > > + m = kzalloc(sizeof(*m), GFP_KERNEL | __GFP_RETRY_MAYFAIL); > > > > + if (!m) > > > > + return -ENOMEM; > > > > + > > > > + mutex_init(&m->mutex); > > > > + > > > > + nvqs = ops->get_queue_max(mdev); > > > > + m->nvqs = nvqs; > > > The name could be...
2020 Feb 18
2
[PATCH] vhost: introduce vDPA based backend
..._vdpa *v; > + struct device *d; > + int minor, nvqs; > + int r; > + > + /* Currently, we only accept the network devices. */ > + if (ops->get_device_id(vdpa) != VIRTIO_ID_NET) { > + r = -ENOTSUPP; > + goto err; > + } > + > + v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL); > + if (!v) { > + r = -ENOMEM; > + goto err; > + } > + > + nvqs = VHOST_VDPA_VQ_MAX; > + > + v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue), > + GFP_KERNEL); > + if (!v->vqs) { > + r = -ENOMEM; > + goto err_alloc_vqs; > + } &g...
2020 Feb 18
2
[PATCH] vhost: introduce vDPA based backend
..._vdpa *v; > + struct device *d; > + int minor, nvqs; > + int r; > + > + /* Currently, we only accept the network devices. */ > + if (ops->get_device_id(vdpa) != VIRTIO_ID_NET) { > + r = -ENOTSUPP; > + goto err; > + } > + > + v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL); > + if (!v) { > + r = -ENOMEM; > + goto err; > + } > + > + nvqs = VHOST_VDPA_VQ_MAX; > + > + v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue), > + GFP_KERNEL); > + if (!v->vqs) { > + r = -ENOMEM; > + goto err_alloc_vqs; > + } &g...
2017 Nov 09
0
[PATCH] vhost/vsock: fix uninitialized vhost_vsock->guest_cid
The vhost_vsock->guest_cid field is uninitialized when /dev/vhost-vsock is opened until the VHOST_VSOCK_SET_GUEST_CID ioctl is called. kvmalloc(..., GFP_KERNEL | __GFP_RETRY_MAYFAIL) does not zero memory. All other vhost_vsock fields are initialized explicitly so just initialize this field too. Signed-off-by: Stefan Hajnoczi <stefanha at redhat.com> --- drivers/vhost/vsock.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vso...
2017 Nov 09
0
[PATCH] vhost/vsock: fix uninitialized vhost_vsock->guest_cid
The vhost_vsock->guest_cid field is uninitialized when /dev/vhost-vsock is opened until the VHOST_VSOCK_SET_GUEST_CID ioctl is called. kvmalloc(..., GFP_KERNEL | __GFP_RETRY_MAYFAIL) does not zero memory. All other vhost_vsock fields are initialized explicitly so just initialize this field too. Signed-off-by: Stefan Hajnoczi <stefanha at redhat.com> --- drivers/vhost/vsock.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vso...
2019 Jun 06
1
memory leak in vhost_net_ioctl
...progress. Protected by tx vq lock. */ bool tx_flush; + bool ld; /* Last dinner */ /* Private page frag */ struct page_frag page_frag; /* Refcount bias of page frag */ @@ -1283,6 +1284,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!n) return -ENOMEM; + n->ld = false; vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL); if (!vqs) { kvfree(n); @@ -1376,7 +1378,10 @@ static void vhost_net_flush(struct vhost_net *n) n->tx_flush = true; mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mute...
2019 Jun 06
1
memory leak in vhost_net_ioctl
...progress. Protected by tx vq lock. */ bool tx_flush; + bool ld; /* Last dinner */ /* Private page frag */ struct page_frag page_frag; /* Refcount bias of page frag */ @@ -1283,6 +1284,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!n) return -ENOMEM; + n->ld = false; vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL); if (!vqs) { kvfree(n); @@ -1376,7 +1378,10 @@ static void vhost_net_flush(struct vhost_net *n) n->tx_flush = true; mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mute...
2019 Sep 27
0
[PATCH] vhost: introduce mdev based hardware backend
...e *dev) >>> +{ >>> + struct mdev_device *mdev = mdev_from_dev(dev); >>> + const struct virtio_mdev_device_ops *ops = mdev_get_dev_ops(mdev); >>> + struct vhost_mdev *m; >>> + int nvqs, r; >>> + >>> + m = kzalloc(sizeof(*m), GFP_KERNEL | __GFP_RETRY_MAYFAIL); >>> + if (!m) >>> + return -ENOMEM; >>> + >>> + mutex_init(&m->mutex); >>> + >>> + nvqs = ops->get_queue_max(mdev); >>> + m->nvqs = nvqs; >> The name could be confusing, get_queue_max() is to get the maximum number...
2020 Nov 03
0
[PATCH 04/17] vhost: prep vhost_dev_init users to handle failures
...* This struct is large and allocation could fail, fall back to vmalloc 433fc58e6bf2c8b Asias He 2016-07-28 612 * if there is no other way. 433fc58e6bf2c8b Asias He 2016-07-28 613 */ dcda9b04713c3f6 Michal Hocko 2017-07-12 614 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL); 433fc58e6bf2c8b Asias He 2016-07-28 615 if (!vsock) 433fc58e6bf2c8b Asias He 2016-07-28 616 return -ENOMEM; 433fc58e6bf2c8b Asias He 2016-07-28 617 433fc58e6bf2c8b Asias He 2016-07-28 618 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KER...
2020 Feb 19
0
[PATCH] vhost: introduce vDPA based backend
...minor, nvqs; > > + int r; > > + > > + /* Currently, we only accept the network devices. */ > > + if (ops->get_device_id(vdpa) != VIRTIO_ID_NET) { > > + r = -ENOTSUPP; > > + goto err; > > + } > > + > > + v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL); > > + if (!v) { > > + r = -ENOMEM; > > + goto err; > > + } > > + > > + nvqs = VHOST_VDPA_VQ_MAX; > > + > > + v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue), > > + GFP_KERNEL); > > + if (!v->vqs) { > >...
2019 Sep 26
6
[PATCH] vhost: introduce mdev based hardware backend
...ase, + .ioctl = vhost_mdev_unlocked_ioctl, +}; + +static int vhost_mdev_probe(struct device *dev) +{ + struct mdev_device *mdev = mdev_from_dev(dev); + const struct virtio_mdev_device_ops *ops = mdev_get_dev_ops(mdev); + struct vhost_mdev *m; + int nvqs, r; + + m = kzalloc(sizeof(*m), GFP_KERNEL | __GFP_RETRY_MAYFAIL); + if (!m) + return -ENOMEM; + + mutex_init(&m->mutex); + + nvqs = ops->get_queue_max(mdev); + m->nvqs = nvqs; + + m->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue), + GFP_KERNEL); + if (!m->vqs) { + r = -ENOMEM; + goto err; + } + + r = vfio_add_group_dev(d...
2019 Sep 26
6
[PATCH] vhost: introduce mdev based hardware backend
...ase, + .ioctl = vhost_mdev_unlocked_ioctl, +}; + +static int vhost_mdev_probe(struct device *dev) +{ + struct mdev_device *mdev = mdev_from_dev(dev); + const struct virtio_mdev_device_ops *ops = mdev_get_dev_ops(mdev); + struct vhost_mdev *m; + int nvqs, r; + + m = kzalloc(sizeof(*m), GFP_KERNEL | __GFP_RETRY_MAYFAIL); + if (!m) + return -ENOMEM; + + mutex_init(&m->mutex); + + nvqs = ops->get_queue_max(mdev); + m->nvqs = nvqs; + + m->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue), + GFP_KERNEL); + if (!m->vqs) { + r = -ENOMEM; + goto err; + } + + r = vfio_add_group_dev(d...
2019 Jun 13
0
memory leak in vhost_net_ioctl
...lush; > +??? bool ld;??? /* Last dinner */ > ????/* Private page frag */ > ????struct page_frag page_frag; > ????/* Refcount bias of page frag */ > @@ -1283,6 +1284,7 @@ static int vhost_net_open(struct inode *inode, > struct file *f) > ????n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL); > ????if (!n) > ??????? return -ENOMEM; > +??? n->ld = false; > ????vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL); > ????if (!vqs) { > ??????? kvfree(n); > @@ -1376,7 +1378,10 @@ static void vhost_net_flush(struct vhost_net *n) > ??????? n->tx_flush...
2020 Mar 12
0
[RFC for Linux] virtio_balloon: Add VIRTIO_BALLOON_F_THP_ORDER to handle THP spilt issue
...for (num_pfns = 0; num_pfns < num; > num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { > - struct page *page = balloon_page_alloc(); > + struct page *page; > + > + if (page_order) > + page = alloc_pages(__GFP_HIGHMEM | > + __GFP_KSWAPD_RECLAIM | > + __GFP_RETRY_MAYFAIL | > + __GFP_NOWARN | __GFP_NOMEMALLOC, The set of flags is inconsistent with balloon_page_alloc. Pls extend that do not bypass it. > + page_order); > + else > + page = balloon_page_alloc(); > > if (!page) { > dev_info_ratelimited(&vb->vdev-&g...
2018 Sep 12
0
[PATCH net-next V2 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) @@ -1081,6 +1218,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) struct vhost_dev *dev; struct vhost_virtqueue **vqs; void **queue; + struct xdp_buff *xdp; int i; n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL); @@ -1101,6 +1239,14 @@ static int vhost_net_open(struct inode *inode, struct file *f) } n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue; + xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL); + if (!xdp) { + kfree(vqs); + kvfree(n); + kfree(queue); + } + n->vqs[VHOST_NET_VQ_TX...