Displaying 20 results from an estimated 124 matches for "offset_in_page".
2016 Apr 17
1
[PATCH v3 08/16] zsmalloc: squeeze freelist into page->mapping
Hello,
On (03/30/16 16:12), Minchan Kim wrote:
[..]
> +static void objidx_to_page_and_offset(struct size_class *class,
> + struct page *first_page,
> + unsigned long obj_idx,
> + struct page **obj_page,
> + unsigned long *offset_in_page)
> {
> - unsigned long obj;
> + int i;
> + unsigned long offset;
> + struct page *cursor;
> + int nr_page;
>
> - if (!page) {
> - VM_BUG_ON(obj_idx);
> - return NULL;
> - }
> + offset = obj_idx * class->size;
so we already know the `offset' before we...
2016 Apr 17
1
[PATCH v3 08/16] zsmalloc: squeeze freelist into page->mapping
Hello,
On (03/30/16 16:12), Minchan Kim wrote:
[..]
> +static void objidx_to_page_and_offset(struct size_class *class,
> + struct page *first_page,
> + unsigned long obj_idx,
> + struct page **obj_page,
> + unsigned long *offset_in_page)
> {
> - unsigned long obj;
> + int i;
> + unsigned long offset;
> + struct page *cursor;
> + int nr_page;
>
> - if (!page) {
> - VM_BUG_ON(obj_idx);
> - return NULL;
> - }
> + offset = obj_idx * class->size;
so we already know the `offset' before we...
2019 Sep 05
2
[PATCH v2] drm/virtio: Use vmalloc for command buffer allocations.
...a_buf);
kmem_cache_free(vgdev->vbufs, vbuf);
}
@@ -251,13 +251,70 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
wake_up(&vgdev->cursorq.ack_queue);
}
+/* How many bytes left in this page. */
+static unsigned int rest_of_page(void *data)
+{
+ return PAGE_SIZE - offset_in_page(data);
+}
+
+/* Create sg_table from a vmalloc'd buffer. */
+static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
+{
+ int nents, ret, s, i;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ struct page *pg;
+
+ *sg_ents = 0;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERN...
2019 Sep 05
2
[PATCH v2] drm/virtio: Use vmalloc for command buffer allocations.
...a_buf);
kmem_cache_free(vgdev->vbufs, vbuf);
}
@@ -251,13 +251,70 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
wake_up(&vgdev->cursorq.ack_queue);
}
+/* How many bytes left in this page. */
+static unsigned int rest_of_page(void *data)
+{
+ return PAGE_SIZE - offset_in_page(data);
+}
+
+/* Create sg_table from a vmalloc'd buffer. */
+static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
+{
+ int nents, ret, s, i;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ struct page *pg;
+
+ *sg_ents = 0;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERN...
2016 Mar 17
1
[PATCH v1 11/19] zsmalloc: squeeze freelist into page->mapping
...nd maybe we could have a better function name
static unsigned long *map_handle(struct size_class *class,
struct page *first_page, unsigned long obj_idx)
{
struct page *cursor = first_page;
unsigned long offset = obj_idx * class->size;
int nr_page = offset >> PAGE_SHIFT;
unsigned long offset_in_page = offset & ~PAGE_MASK;
void *addr;
int i;
if (class->huge) {
VM_BUG_ON_PAGE(!is_first_page(page), page);
return &page_private(page);
}
for (i = 0; i < nr_page; i++)
cursor = get_next_page(cursor);
addr = kmap_atomic(cursor);
return addr + offset_in_page;
}
static vo...
2016 Mar 17
1
[PATCH v1 11/19] zsmalloc: squeeze freelist into page->mapping
...nd maybe we could have a better function name
static unsigned long *map_handle(struct size_class *class,
struct page *first_page, unsigned long obj_idx)
{
struct page *cursor = first_page;
unsigned long offset = obj_idx * class->size;
int nr_page = offset >> PAGE_SHIFT;
unsigned long offset_in_page = offset & ~PAGE_MASK;
void *addr;
int i;
if (class->huge) {
VM_BUG_ON_PAGE(!is_first_page(page), page);
return &page_private(page);
}
for (i = 0; i < nr_page; i++)
cursor = get_next_page(cursor);
addr = kmap_atomic(cursor);
return addr + offset_in_page;
}
static vo...
2018 Apr 20
2
[PATCH] kvmalloc: always use vmalloc if CONFIG_DEBUG_VM
...alloc because we can't
DMA onto the stack any more?). We already have a few places which do
handle sgs of vmalloced addresses, such as the nx crypto driver:
if (is_vmalloc_addr(start_addr))
sg_addr = page_to_phys(vmalloc_to_page(start_addr))
+ offset_in_page(sg_addr);
else
sg_addr = __pa(sg_addr);
and videobuf:
pg = vmalloc_to_page(virt);
if (NULL == pg)
goto err;
BUG_ON(page_to_pfn(pg) >= (1 << (32 - PAGE_SHIFT)));
sg_set_page(&am...
2018 Apr 20
2
[PATCH] kvmalloc: always use vmalloc if CONFIG_DEBUG_VM
...alloc because we can't
DMA onto the stack any more?). We already have a few places which do
handle sgs of vmalloced addresses, such as the nx crypto driver:
if (is_vmalloc_addr(start_addr))
sg_addr = page_to_phys(vmalloc_to_page(start_addr))
+ offset_in_page(sg_addr);
else
sg_addr = __pa(sg_addr);
and videobuf:
pg = vmalloc_to_page(virt);
if (NULL == pg)
goto err;
BUG_ON(page_to_pfn(pg) >= (1 << (32 - PAGE_SHIFT)));
sg_set_page(&am...
2007 Jun 07
4
[PATCH RFC 0/3] Virtio draft II
Hi again all,
It turns out that networking really wants ordered requests, which the
previous patches didn't allow. This patch changes it to a callback
mechanism; kudos to Avi.
The downside is that locking is more complicated, and after a few dead
ends I implemented the simplest solution: the struct virtio_device
contains the spinlock to use, and it's held when your callbacks get
2007 Jun 07
4
[PATCH RFC 0/3] Virtio draft II
Hi again all,
It turns out that networking really wants ordered requests, which the
previous patches didn't allow. This patch changes it to a callback
mechanism; kudos to Avi.
The downside is that locking is more complicated, and after a few dead
ends I implemented the simplest solution: the struct virtio_device
contains the spinlock to use, and it's held when your callbacks get
2007 Jun 07
4
[PATCH RFC 0/3] Virtio draft II
Hi again all,
It turns out that networking really wants ordered requests, which the
previous patches didn't allow. This patch changes it to a callback
mechanism; kudos to Avi.
The downside is that locking is more complicated, and after a few dead
ends I implemented the simplest solution: the struct virtio_device
contains the spinlock to use, and it's held when your callbacks get
2019 Sep 06
0
[PATCH v2] drm/virtio: Use vmalloc for command buffer allocations.
> +/* How many bytes left in this page. */
> +static unsigned int rest_of_page(void *data)
> +{
> + return PAGE_SIZE - offset_in_page(data);
> +}
Not needed.
> +/* Create sg_table from a vmalloc'd buffer. */
> +static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
> +{
> + int nents, ret, s, i;
> + struct sg_table *sgt;
> + struct scatterlist *sg;
> + struct page *pg;
> +...
2016 Mar 15
2
[PATCH v1 11/19] zsmalloc: squeeze freelist into page->mapping
On (03/11/16 16:30), Minchan Kim wrote:
> -static void *location_to_obj(struct page *page, unsigned long obj_idx)
> +static void objidx_to_page_and_ofs(struct size_class *class,
> + struct page *first_page,
> + unsigned long obj_idx,
> + struct page **obj_page,
> + unsigned long *ofs_in_page)
this looks big; 5 params, function "returning" both page and
2016 Mar 15
2
[PATCH v1 11/19] zsmalloc: squeeze freelist into page->mapping
On (03/11/16 16:30), Minchan Kim wrote:
> -static void *location_to_obj(struct page *page, unsigned long obj_idx)
> +static void objidx_to_page_and_ofs(struct size_class *class,
> + struct page *first_page,
> + unsigned long obj_idx,
> + struct page **obj_page,
> + unsigned long *ofs_in_page)
this looks big; 5 params, function "returning" both page and
2023 Feb 16
0
[RFC PATCH v1 07/12] vsock/virtio: MGS_ZEROCOPY flag support
...>+ if (free_space < iov_iter->count)
>+ return -1;
>+
>+ for (pages = 0, i = 0; i < iov_iter->nr_segs; i++) {
>+ const struct iovec *iovec;
>+ int pages_in_elem;
>+
>+ iovec = &iov_iter->iov[i];
>+
>+ /* Base must be page aligned. */
>+ if (offset_in_page(iovec->iov_base))
>+ return -1;
>+
>+ /* Only last element could have not page aligned size. */
>+ if (i != (iov_iter->nr_segs - 1)) {
>+ if (offset_in_page(iovec->iov_len))
>+ return -1;
>+
>+ pages_in_elem = iovec->iov_len >> PAGE_SHIFT;
>+...
2012 Jan 12
9
Re: [PATCH] add netconsole support for xen-netfront
...-netfront.c
> index fa67905..db638b4 100644
> --- a/drivers/net/xen-netfront.c
> +++ b/drivers/net/xen-netfront.c
> @@ -489,6 +489,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
> int frags = skb_shinfo(skb)->nr_frags;
> unsigned int offset = offset_in_page(data);
> unsigned int len = skb_headlen(skb);
> + unsigned long flags;
>
> frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
> if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
> @@ -498,12 +499,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
>...
2023 Feb 21
2
[PATCH vhost 07/10] virtio_ring: add api virtio_dma_map() for advance dma
...void *addr, unsigned int length,
> > > + enum dma_data_direction dir)
> > > +{
> > > + struct page *page;
> > > + size_t offset;
> > > +
> > > + page = virt_to_page(addr);
> > > + offset = offset_in_page(addr);
> > > +
> > > + return virtio_dma_map_page(dev, page, offset, length, dir);
> > > +}
> > > +EXPORT_SYMBOL_GPL(virtio_dma_map);
> > > +
> > > +/**
> > > + * virtio_dma_mapping_error - check dma address
> > > + * @de...
2023 Feb 21
2
[PATCH vhost 07/10] virtio_ring: add api virtio_dma_map() for advance dma
...void *addr, unsigned int length,
> > > + enum dma_data_direction dir)
> > > +{
> > > + struct page *page;
> > > + size_t offset;
> > > +
> > > + page = virt_to_page(addr);
> > > + offset = offset_in_page(addr);
> > > +
> > > + return virtio_dma_map_page(dev, page, offset, length, dir);
> > > +}
> > > +EXPORT_SYMBOL_GPL(virtio_dma_map);
> > > +
> > > +/**
> > > + * virtio_dma_mapping_error - check dma address
> > > + * @de...
2017 Jul 05
3
[PATCH v2] virtio-blk: add DISCARD support to virtio-blk driver
...ck_size;
+
+ range[n].reserved = cpu_to_le32(0);
+ range[n].nlba = cpu_to_le32(nlb);
+ range[n].slba = cpu_to_le64(slba);
+ n++;
+ }
+
+ if (WARN_ON_ONCE(n != segments)) {
+ kfree(range);
+ return -1;
+ }
+
+ req->special_vec.bv_page = virt_to_page(range);
+ req->special_vec.bv_offset = offset_in_page(range);
+ req->special_vec.bv_len = sizeof(*range) * segments;
+ req->rq_flags |= RQF_SPECIAL_PAYLOAD;
+
+ return 0;
+}
+
static inline void virtblk_request_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
+...
2017 Jul 05
3
[PATCH v2] virtio-blk: add DISCARD support to virtio-blk driver
...ck_size;
+
+ range[n].reserved = cpu_to_le32(0);
+ range[n].nlba = cpu_to_le32(nlb);
+ range[n].slba = cpu_to_le64(slba);
+ n++;
+ }
+
+ if (WARN_ON_ONCE(n != segments)) {
+ kfree(range);
+ return -1;
+ }
+
+ req->special_vec.bv_page = virt_to_page(range);
+ req->special_vec.bv_offset = offset_in_page(range);
+ req->special_vec.bv_len = sizeof(*range) * segments;
+ req->rq_flags |= RQF_SPECIAL_PAYLOAD;
+
+ return 0;
+}
+
static inline void virtblk_request_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
+...