search for: 2375,7

Displaying 20 results from an estimated 48 matches for "2375,7".

Did you mean: 237,7
2019 Oct 12
2
[PATCH RFC v1 2/2] vhost: batching fetches
...*vq) > --vq->ndescs; > } > > +#define VHOST_DESC_FLAGS (VRING_DESC_F_INDIRECT | VRING_DESC_F_WRITE | \ > + VRING_DESC_F_NEXT) > static int push_split_desc(struct vhost_virtqueue *vq, struct vring_desc *desc, u16 id) > { > struct vhost_desc *h; > @@ -2375,7 +2379,7 @@ static int push_split_desc(struct vhost_virtqueue *vq, struct vring_desc *desc, > h = &vq->descs[vq->ndescs++]; > h->addr = vhost64_to_cpu(vq, desc->addr); > h->len = vhost32_to_cpu(vq, desc->len); > - h->flags = vhost16_to_cpu(vq, desc-&gt...
2019 Oct 12
2
[PATCH RFC v1 2/2] vhost: batching fetches
...*vq) > --vq->ndescs; > } > > +#define VHOST_DESC_FLAGS (VRING_DESC_F_INDIRECT | VRING_DESC_F_WRITE | \ > + VRING_DESC_F_NEXT) > static int push_split_desc(struct vhost_virtqueue *vq, struct vring_desc *desc, u16 id) > { > struct vhost_desc *h; > @@ -2375,7 +2379,7 @@ static int push_split_desc(struct vhost_virtqueue *vq, struct vring_desc *desc, > h = &vq->descs[vq->ndescs++]; > h->addr = vhost64_to_cpu(vq, desc->addr); > h->len = vhost32_to_cpu(vq, desc->len); > - h->flags = vhost16_to_cpu(vq, desc-&gt...
2020 Jun 03
2
[PATCH RFC 07/13] vhost: format-independent API for used buffers
...& VRING_DESC_F_NEXT)) > break; > @@ -2365,7 +2369,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > > vq->first_desc = i + 1; > > - return ret; > + return 1; > > err: > for (i = vq->first_desc; i < vq->ndescs; ++i) > @@ -2375,7 +2379,15 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > > return ret; > } > -EXPORT_SYMBOL_GPL(vhost_get_vq_desc); > +EXPORT_SYMBOL_GPL(vhost_get_avail_buf); > + > +/* Reverse the effect of vhost_get_avail_buf. Useful for error handling. */ > +void vhost_di...
2020 Jun 03
2
[PATCH RFC 07/13] vhost: format-independent API for used buffers
...& VRING_DESC_F_NEXT)) > break; > @@ -2365,7 +2369,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > > vq->first_desc = i + 1; > > - return ret; > + return 1; > > err: > for (i = vq->first_desc; i < vq->ndescs; ++i) > @@ -2375,7 +2379,15 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > > return ret; > } > -EXPORT_SYMBOL_GPL(vhost_get_vq_desc); > +EXPORT_SYMBOL_GPL(vhost_get_avail_buf); > + > +/* Reverse the effect of vhost_get_avail_buf. Useful for error handling. */ > +void vhost_di...
2020 Jun 02
0
[PATCH RFC 07/13] vhost: format-independent API for used buffers
...d = desc->id; + ++buf->descs; if (!(desc->flags & VRING_DESC_F_NEXT)) break; @@ -2365,7 +2369,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, vq->first_desc = i + 1; - return ret; + return 1; err: for (i = vq->first_desc; i < vq->ndescs; ++i) @@ -2375,7 +2379,15 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, return ret; } -EXPORT_SYMBOL_GPL(vhost_get_vq_desc); +EXPORT_SYMBOL_GPL(vhost_get_avail_buf); + +/* Reverse the effect of vhost_get_avail_buf. Useful for error handling. */ +void vhost_discard_avail_bufs(struct vhost_virtqueue *vq...
2019 Oct 11
0
[PATCH RFC v1 2/2] vhost: batching fetches
...@ static void pop_split_desc(struct vhost_virtqueue *vq) --vq->ndescs; } +#define VHOST_DESC_FLAGS (VRING_DESC_F_INDIRECT | VRING_DESC_F_WRITE | \ + VRING_DESC_F_NEXT) static int push_split_desc(struct vhost_virtqueue *vq, struct vring_desc *desc, u16 id) { struct vhost_desc *h; @@ -2375,7 +2379,7 @@ static int push_split_desc(struct vhost_virtqueue *vq, struct vring_desc *desc, h = &vq->descs[vq->ndescs++]; h->addr = vhost64_to_cpu(vq, desc->addr); h->len = vhost32_to_cpu(vq, desc->len); - h->flags = vhost16_to_cpu(vq, desc->flags); + h->flags...
2020 Jun 04
0
[PATCH RFC 07/13] vhost: format-independent API for used buffers
...C_F_NEXT)) > > break; > > @@ -2365,7 +2369,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > > vq->first_desc = i + 1; > > - return ret; > > + return 1; > > err: > > for (i = vq->first_desc; i < vq->ndescs; ++i) > > @@ -2375,7 +2379,15 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > > return ret; > > } > > -EXPORT_SYMBOL_GPL(vhost_get_vq_desc); > > +EXPORT_SYMBOL_GPL(vhost_get_avail_buf); > > + > > +/* Reverse the effect of vhost_get_avail_buf. Useful for error handling....
2019 Oct 12
0
[PATCH RFC v1 2/2] vhost: batching fetches
...cs; > > } > > +#define VHOST_DESC_FLAGS (VRING_DESC_F_INDIRECT | VRING_DESC_F_WRITE | \ > > + VRING_DESC_F_NEXT) > > static int push_split_desc(struct vhost_virtqueue *vq, struct vring_desc *desc, u16 id) > > { > > struct vhost_desc *h; > > @@ -2375,7 +2379,7 @@ static int push_split_desc(struct vhost_virtqueue *vq, struct vring_desc *desc, > > h = &vq->descs[vq->ndescs++]; > > h->addr = vhost64_to_cpu(vq, desc->addr); > > h->len = vhost32_to_cpu(vq, desc->len); > > - h->flags = vhost16...
2019 Oct 11
8
[PATCH RFC v1 0/2] vhost: ring format independence
So the idea is as follows: we convert descriptors to an independent format first, and process that converting to iov later. The point is that we have a tight loop that fetches descriptors, which is good for cache utilization. This will also allow all kind of batching tricks - e.g. it seems possible to keep SMAP disabled while we are fetching multiple descriptors. And perhaps more importantly,
2019 Oct 11
8
[PATCH RFC v1 0/2] vhost: ring format independence
So the idea is as follows: we convert descriptors to an independent format first, and process that converting to iov later. The point is that we have a tight loop that fetches descriptors, which is good for cache utilization. This will also allow all kind of batching tricks - e.g. it seems possible to keep SMAP disabled while we are fetching multiple descriptors. And perhaps more importantly,
2019 Nov 02
13
[PATCH v2 0/9] drm/nouveau: Various fixes for GP10B
From: Thierry Reding <treding at nvidia.com> Hi Ben, here's a revised subset of the patches I had sent out a couple of weeks ago. I've reworked the BAR2 accesses in the way that you had suggested, which at least for GP10B turned out to be fairly trivial to do. I have not looked in detail at this for GV11B yet, but a cursory look showed that BAR2 is accessed in more places, so the
2019 Dec 09
11
[PATCH v3 0/9] drm/nouveau: Various fixes for GP10B
From: Thierry Reding <treding at nvidia.com> Hi Ben, here's a revised subset of the patches I had sent out a couple of weeks ago. I've reworked the BAR2 accesses in the way that you had suggested, which at least for GP10B turned out to be fairly trivial to do. I have not looked in detail at this for GV11B yet, but a cursory look showed that BAR2 is accessed in more places, so the
2006 Aug 15
0
[git patches] ocfs2 updates
...s2: limit cluster bitmap information saved at mount ocfs2: better group descriptor consistency checks ocfs2: allocation hints diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 1b8346d..9503240 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -2375,7 +2375,6 @@ leave: mlog(0, "returning %d\n", ret); return ret; } -EXPORT_SYMBOL_GPL(dlm_migrate_lockres); int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) { diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index b0c3134..37be4b2 100644 ---...
2019 Oct 12
2
[PATCH RFC v2 0/2] vhost: ring format independence
This adds infrastructure required for supporting multiple ring formats. The idea is as follows: we convert descriptors to an independent format first, and process that converting to iov later. The point is that we have a tight loop that fetches descriptors, which is good for cache utilization. This will also allow all kind of batching tricks - e.g. it seems possible to keep SMAP disabled while
2019 Oct 13
4
[PATCH RFC v3 0/4] vhost: ring format independence
This adds infrastructure required for supporting multiple ring formats. The idea is as follows: we convert descriptors to an independent format first, and process that converting to iov later. The point is that we have a tight loop that fetches descriptors, which is good for cache utilization. This will also allow all kind of batching tricks - e.g. it seems possible to keep SMAP disabled while
2019 Oct 13
6
[PATCH RFC v4 0/5] vhost: ring format independence
This adds infrastructure required for supporting multiple ring formats. The idea is as follows: we convert descriptors to an independent format first, and process that converting to iov later. The point is that we have a tight loop that fetches descriptors, which is good for cache utilization. This will also allow all kind of batching tricks - e.g. it seems possible to keep SMAP disabled while
2011 Jan 20
1
[PATCH] change acquire/release_console_sem() to console_lock/unlock()
...ic int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co if (!vc_cons_allocated(currcons)) { /* could this happen? */ printk_once("con_write: tty %d not allocated\n", currcons+1); - release_console_sem(); + console_unlock(); return 0; } @@ -2375,7 +2375,7 @@ rescan_last_byte: } FLUSH console_conditional_schedule(); - release_console_sem(); + console_unlock(); notify_update(vc); return n; #undef FLUSH @@ -2388,11 +2388,11 @@ rescan_last_byte: * us to do the switches asynchronously (needed when we want * to switch due to a key...
2011 Jan 20
1
[PATCH] change acquire/release_console_sem() to console_lock/unlock()
...ic int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co if (!vc_cons_allocated(currcons)) { /* could this happen? */ printk_once("con_write: tty %d not allocated\n", currcons+1); - release_console_sem(); + console_unlock(); return 0; } @@ -2375,7 +2375,7 @@ rescan_last_byte: } FLUSH console_conditional_schedule(); - release_console_sem(); + console_unlock(); notify_update(vc); return n; #undef FLUSH @@ -2388,11 +2388,11 @@ rescan_last_byte: * us to do the switches asynchronously (needed when we want * to switch due to a key...
2011 Jan 20
1
[PATCH] change acquire/release_console_sem() to console_lock/unlock()
...ic int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co if (!vc_cons_allocated(currcons)) { /* could this happen? */ printk_once("con_write: tty %d not allocated\n", currcons+1); - release_console_sem(); + console_unlock(); return 0; } @@ -2375,7 +2375,7 @@ rescan_last_byte: } FLUSH console_conditional_schedule(); - release_console_sem(); + console_unlock(); notify_update(vc); return n; #undef FLUSH @@ -2388,11 +2388,11 @@ rescan_last_byte: * us to do the switches asynchronously (needed when we want * to switch due to a key...
2014 Jan 17
7
[PATCH net-next v6 0/6] virtio-net: mergeable rx buffer size auto-tuning
The virtio-net device currently uses aligned MTU-sized mergeable receive packet buffers. Network throughput for workloads with large average packet size can be improved by posting larger receive packet buffers. However, due to SKB truesize effects, posting large (e.g, PAGE_SIZE) buffers reduces the throughput of workloads that do not benefit from GRO and have no large inbound packets. This