Paul Durrant
2010-Jan-12 17:44 UTC
[Xen-devel] Add multi-page shared ring support to xen-blkfront/back
These patches add support for multi-page shared rings to xen-blkfront/back. Shared rings must always be a power of 2 in size and so the size is controlled by the new ''max_ring_page_order'' module parameter of xen-blkback. This defaults to 0 so operationally there should be no difference with these patches in place. The maximum value of ''max_ring_page_order'' is currently limited to 2 (i.e. 4 pages). xen-blkfront will always use the largest shared ring size supported by the back end. To override this and continue to use a single page ring, as before, set the ''allow_multi_page_ring'' module parameter to 0. _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Paul Durrant
2010-Jan-12 17:44 UTC
[Xen-devel] [PATCH 1/2] Add multi-page ring support to xen-blkback.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com> --- drivers/xen/blkback/common.h | 10 +++- drivers/xen/blkback/interface.c | 113 +++++++++++++++++++++++++++----------- drivers/xen/blkback/xenbus.c | 100 ++++++++++++++++++++++++++++++----- 3 files changed, 174 insertions(+), 49 deletions(-) diff --git a/drivers/xen/blkback/common.h b/drivers/xen/blkback/common.h index d12d4d5..6404a4a 100644 --- a/drivers/xen/blkback/common.h +++ b/drivers/xen/blkback/common.h @@ -58,6 +58,9 @@ struct vbd { struct backend_info; +#define BLKIF_MAX_RING_PAGE_ORDER 2 +#define BLKIF_MAX_RING_PAGES (1<<BLKIF_MAX_RING_PAGE_ORDER) + typedef struct blkif_st { /* Unique identifier for this interface. */ domid_t domid; @@ -92,14 +95,15 @@ typedef struct blkif_st { wait_queue_head_t waiting_to_free; - grant_handle_t shmem_handle; - grant_ref_t shmem_ref; + unsigned int nr_shared_pages; + grant_handle_t shmem_handle[BLKIF_MAX_RING_PAGES]; } blkif_t; blkif_t *blkif_alloc(domid_t domid); void blkif_disconnect(blkif_t *blkif); void blkif_free(blkif_t *blkif); -int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn); +int blkif_map(blkif_t *blkif, unsigned long shared_pages[], + unsigned int nr_shared_pages, unsigned int evtchn); #define blkif_get(_b) (atomic_inc(&(_b)->refcnt)) #define blkif_put(_b) \ diff --git a/drivers/xen/blkback/interface.c b/drivers/xen/blkback/interface.c index e397a41..8951543 100644 --- a/drivers/xen/blkback/interface.c +++ b/drivers/xen/blkback/interface.c @@ -56,50 +56,93 @@ blkif_t *blkif_alloc(domid_t domid) return blkif; } -static int map_frontend_page(blkif_t *blkif, unsigned long shared_page) -{ - struct gnttab_map_grant_ref op; - - gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr, - GNTMAP_host_map, shared_page, blkif->domid); +#define INVALID_GRANT_HANDLE ((grant_handle_t)~0U) - if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) - BUG(); - - if (op.status) { - DPRINTK(" Grant table operation failure !\n"); - return op.status; +static void unmap_frontend_pages(blkif_t *blkif) +{ + struct vm_struct *area = blkif->blk_ring_area; + struct gnttab_unmap_grant_ref op[BLKIF_MAX_RING_PAGES]; + unsigned int i; + unsigned int j; + + j = 0; + for (i = 0; i < blkif->nr_shared_pages; i++) { + unsigned long addr = (unsigned long)area->addr + + (i * PAGE_SIZE); + + if (blkif->shmem_handle[i] != INVALID_GRANT_HANDLE) { + gnttab_set_unmap_op(&op[j++], addr, + GNTMAP_host_map, + blkif->shmem_handle[i]); + + blkif->shmem_handle[i] = INVALID_GRANT_HANDLE; + } } - blkif->shmem_ref = shared_page; - blkif->shmem_handle = op.handle; + blkif->nr_shared_pages = 0; - return 0; + if (j != 0) { + if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, + op, j)) + BUG(); + } } -static void unmap_frontend_page(blkif_t *blkif) +static int map_frontend_pages(blkif_t *blkif, unsigned long shared_pages[], + unsigned int nr_shared_pages) { - struct gnttab_unmap_grant_ref op; + struct vm_struct *area = blkif->blk_ring_area; + struct gnttab_map_grant_ref op[BLKIF_MAX_RING_PAGES]; + unsigned int i; + int status = 0; - gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr, - GNTMAP_host_map, blkif->shmem_handle); + for (i = 0; i < nr_shared_pages; i++) { + unsigned long addr = (unsigned long)area->addr + + (i * PAGE_SIZE); + + gnttab_set_map_op(&op[i], addr, GNTMAP_host_map, + shared_pages[i], blkif->domid); + } - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) + if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, op, + nr_shared_pages)) BUG(); + + for (i = 0; i < nr_shared_pages; i++) { + status = op[i].status; + if (status != 0) { + blkif->shmem_handle[i] = INVALID_GRANT_HANDLE; + continue; + } + + blkif->shmem_handle[i] = op[i].handle; + } + + blkif->nr_shared_pages = nr_shared_pages; + + if (status != 0) { + DPRINTK(" Grant table operation failure !\n"); + unmap_frontend_pages(blkif); + } + + return status; } -int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn) +int blkif_map(blkif_t *blkif, unsigned long shared_pages[], + unsigned int nr_shared_pages, unsigned int evtchn) { + unsigned long size = nr_shared_pages * PAGE_SIZE; int err; /* Already connected through? */ if (blkif->irq) return 0; - if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL ) + blkif->blk_ring_area = alloc_vm_area(size); + if (blkif->blk_ring_area == NULL) return -ENOMEM; - err = map_frontend_page(blkif, shared_page); + err = map_frontend_pages(blkif, shared_pages, nr_shared_pages); if (err) { free_vm_area(blkif->blk_ring_area); return err; @@ -110,21 +153,21 @@ int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn) { struct blkif_sring *sring; sring = (struct blkif_sring *)blkif->blk_ring_area->addr; - BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE); + BACK_RING_INIT(&blkif->blk_rings.native, sring, size); break; } case BLKIF_PROTOCOL_X86_32: { - struct blkif_x86_32_sring *sring_x86_32; - sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr; - BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE); + struct blkif_x86_32_sring *sring; + sring = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr; + BACK_RING_INIT(&blkif->blk_rings.x86_32, sring, size); break; } case BLKIF_PROTOCOL_X86_64: { - struct blkif_x86_64_sring *sring_x86_64; - sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr; - BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE); + struct blkif_x86_64_sring *sring; + sring = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr; + BACK_RING_INIT(&blkif->blk_rings.x86_64, sring, size); break; } default: @@ -132,14 +175,17 @@ int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn) } err = bind_interdomain_evtchn_to_irqhandler( - blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif); + blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", + blkif); if (err < 0) { - unmap_frontend_page(blkif); + unmap_frontend_pages(blkif); free_vm_area(blkif->blk_ring_area); + blkif->blk_ring_area = NULL; blkif->blk_rings.common.sring = NULL; return err; } + blkif->irq = err; return 0; @@ -162,8 +208,9 @@ void blkif_disconnect(blkif_t *blkif) } if (blkif->blk_rings.common.sring) { - unmap_frontend_page(blkif); + unmap_frontend_pages(blkif); free_vm_area(blkif->blk_ring_area); + blkif->blk_ring_area = NULL; blkif->blk_rings.common.sring = NULL; } } diff --git a/drivers/xen/blkback/xenbus.c b/drivers/xen/blkback/xenbus.c index 04c0a12..4ee10b8 100644 --- a/drivers/xen/blkback/xenbus.c +++ b/drivers/xen/blkback/xenbus.c @@ -42,6 +42,11 @@ static int connect_ring(struct backend_info *); static void backend_changed(struct xenbus_watch *, const char **, unsigned int); +/* Order of maximum shared ring size advertised to the front end. */ +static int blkif_max_ring_page_order; +module_param_named(max_ring_page_order, blkif_max_ring_page_order, int, 0); +MODULE_PARM_DESC(max_ring_page_order, "Order of maximum VM shared ring size"); + static int blkback_name(blkif_t *blkif, char *buf) { char *devpath, *devname; @@ -243,6 +248,11 @@ static int blkback_probe(struct xenbus_device *dev, if (err) goto fail; + err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order", + "%u", blkif_max_ring_page_order); + if (err) + goto fail; + err = xenbus_switch_state(dev, XenbusStateInitWait); if (err) goto fail; @@ -469,22 +479,82 @@ again: static int connect_ring(struct backend_info *be) { struct xenbus_device *dev = be->dev; - unsigned long ring_ref; unsigned int evtchn; + unsigned int ring_order; + unsigned long ring_ref[BLKIF_MAX_RING_PAGES]; char protocol[64] = ""; int err; DPRINTK("%s", dev->otherend); - err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", &ring_ref, - "event-channel", "%u", &evtchn, NULL); - if (err) { - xenbus_dev_fatal(dev, err, - "reading %s/ring-ref and event-channel", + err = xenbus_scanf(XBT_NIL, dev->otherend, "event-channel", "%u", + &evtchn); + if (err != 1) { + err = -EINVAL; + + xenbus_dev_fatal(dev, err, "reading %s/event-channel", dev->otherend); return err; } + printk(KERN_INFO "blkback: event-channel %u\n", evtchn); + + err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u", + &ring_order); + if (err != 1) { + DPRINTK("%s: using single page handshake", dev->otherend); + + ring_order = 0; + + err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", + "%lu", &ring_ref[0]); + if (err != 1) { + err = -EINVAL; + + xenbus_dev_fatal(dev, err, "reading %s/ring-ref", + dev->otherend); + return err; + } + + printk(KERN_INFO "blkback: ring-ref %lu\n", ring_ref[0]); + } else { + unsigned int i; + + if (ring_order > blkif_max_ring_page_order) { + err = -EINVAL; + + xenbus_dev_fatal(dev, err, + "%s/ring-page-order too big", + dev->otherend); + return err; + } + + DPRINTK("%s: using %u page(s)", dev->otherend, + (1 << ring_order)); + + for (i = 0; i < (1u << ring_order); i++) { + char ring_ref_name[10]; + + snprintf(ring_ref_name, sizeof(ring_ref_name), + "ring-ref%1u", i); + err = xenbus_scanf(XBT_NIL, dev->otherend, + ring_ref_name, "%lu", + &ring_ref[i]); + if (err != 1) { + err = -EINVAL; + + xenbus_dev_fatal(dev, err, + "reading %s/%s", + dev->otherend, + ring_ref_name); + return err; + } + + printk(KERN_INFO "blkback: ring-ref%u %lu\n", i, + ring_ref[i]); + } + } + be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; err = xenbus_gather(XBT_NIL, dev->otherend, "protocol", "%63s", protocol, NULL); @@ -497,18 +567,19 @@ static int connect_ring(struct backend_info *be) else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; else { + err = -EINVAL; + xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); - return -1; + return err; } - printk(KERN_INFO - "blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n", - ring_ref, evtchn, be->blkif->blk_protocol, protocol); + + printk(KERN_INFO "blkback: protocol %d (%s)\n", + be->blkif->blk_protocol, protocol); /* Map the shared frame, irq etc. */ - err = blkif_map(be->blkif, ring_ref, evtchn); + err = blkif_map(be->blkif, ring_ref, (1u << ring_order), evtchn); if (err) { - xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u", - ring_ref, evtchn); + xenbus_dev_fatal(dev, err, "mapping ring-refs and evtchn"); return err; } @@ -537,5 +608,8 @@ static struct xenbus_driver blkback = { int blkif_xenbus_init(void) { + if (blkif_max_ring_page_order > BLKIF_MAX_RING_PAGE_ORDER) + blkif_max_ring_page_order = BLKIF_MAX_RING_PAGE_ORDER; + return xenbus_register_backend(&blkback); } -- 1.5.6.5 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Paul Durrant
2010-Jan-12 17:44 UTC
[Xen-devel] [PATCH 2/2] Add multi-page ring support to xen-blkfront
Signed-off-by: Paul Durrant <paul.durrant@citrix.com> --- drivers/block/xen-blkfront.c | 137 ++++++++++++++++++++++++++++++++---------- 1 files changed, 105 insertions(+), 32 deletions(-) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 837b992..1aaa5a4 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -67,7 +67,25 @@ struct blk_shadow { static struct block_device_operations xlvbd_block_fops; -#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) +#define BLK_RING_SIZE(_order) \ + __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE<<(_order)) + +#define BLKIF_MAX_RING_PAGE_ORDER 2 +#define MAX_BLK_RING_SIZE BLK_RING_SIZE(BLKIF_MAX_RING_PAGE_ORDER) + +#define BLKIF_MAX_RING_PAGES (1<<BLKIF_MAX_RING_PAGE_ORDER) + +/* + * Allow multi page shared ring to be used if the capability is advertised by + * the back end. + */ +static int allow_multi_page_ring = 1; +module_param_named(allow_multi_page_ring, + allow_multi_page_ring, + int, + 0); +MODULE_PARM_DESC(allow_multi_page_ring, + "Allow multi page shared ring to be used"); /* * We have one of these per vbd, whether ide, scsi or ''other''. They @@ -81,14 +99,15 @@ struct blkfront_info int vdevice; blkif_vdev_t handle; enum blkif_state connected; - int ring_ref; + int ring_ref[BLKIF_MAX_RING_PAGES]; + int ring_order; struct blkif_front_ring ring; struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int evtchn, irq; struct request_queue *rq; struct work_struct work; struct gnttab_free_callback callback; - struct blk_shadow shadow[BLK_RING_SIZE]; + struct blk_shadow shadow[MAX_BLK_RING_SIZE]; unsigned long shadow_free; int feature_barrier; int is_ready; @@ -102,8 +121,6 @@ struct blkfront_info static DEFINE_SPINLOCK(blkif_io_lock); -#define MAXIMUM_OUTSTANDING_BLOCK_REQS \ - (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) #define GRANT_INVALID_REF 0 #define PARTS_PER_DISK 16 @@ -122,7 +139,7 @@ static DEFINE_SPINLOCK(blkif_io_lock); static int get_id_from_freelist(struct blkfront_info *info) { unsigned long free = info->shadow_free; - BUG_ON(free >= BLK_RING_SIZE); + BUG_ON(free >= MAX_BLK_RING_SIZE); info->shadow_free = info->shadow[free].req.id; info->shadow[free].req.id = 0x0fffffee; /* debug */ return free; @@ -495,6 +512,9 @@ static void blkif_restart_queue(struct work_struct *work) static void blkif_free(struct blkfront_info *info, int suspend) { + int i; + unsigned long sring = (unsigned long)info->ring.sring; + /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&blkif_io_lock); info->connected = suspend ? @@ -510,12 +530,14 @@ static void blkif_free(struct blkfront_info *info, int suspend) flush_scheduled_work(); /* Free resources associated with old device channel. */ - if (info->ring_ref != GRANT_INVALID_REF) { - gnttab_end_foreign_access(info->ring_ref, 0, - (unsigned long)info->ring.sring); - info->ring_ref = GRANT_INVALID_REF; - info->ring.sring = NULL; + for (i = 0; i < (1<<info->ring_order); i++) { + if (info->ring_ref[i] != GRANT_INVALID_REF) { + gnttab_end_foreign_access(info->ring_ref[i], 0, + sring + (i<<PAGE_SHIFT)); + info->ring_ref[i] = GRANT_INVALID_REF; + } } + info->ring.sring = NULL; if (info->irq) unbind_from_irqhandler(info->irq, info); info->evtchn = info->irq = 0; @@ -605,28 +627,35 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { - struct blkif_sring *sring; + int i; + unsigned long sring; int err; - info->ring_ref = GRANT_INVALID_REF; + for (i = 0; i < (1<<info->ring_order); i++) + info->ring_ref[i] = GRANT_INVALID_REF; - sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH); + sring = __get_free_pages(GFP_NOIO | __GFP_HIGH, info->ring_order); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } - SHARED_RING_INIT(sring); - FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); + SHARED_RING_INIT((struct blkif_sring *)sring); + FRONT_RING_INIT(&info->ring, (struct blkif_sring *)sring, + PAGE_SIZE<<info->ring_order); sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); - err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); - if (err < 0) { - free_page((unsigned long)sring); - info->ring.sring = NULL; - goto fail; + for (i = 0; i < (1<<info->ring_order); i++) { + unsigned long addr = sring + (i<<PAGE_SHIFT); + + err = xenbus_grant_ring(dev, virt_to_mfn(addr)); + if (err < 0) { + free_pages(sring, info->ring_order); + info->ring.sring = NULL; + goto fail; + } + info->ring_ref[i] = err; } - info->ring_ref = err; err = xenbus_alloc_evtchn(dev, &info->evtchn); if (err) @@ -648,6 +677,9 @@ fail: return err; } +#ifndef MIN +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif /* Common code used when first setting up, and when resuming. */ static int talk_to_blkback(struct xenbus_device *dev, @@ -655,8 +687,24 @@ static int talk_to_blkback(struct xenbus_device *dev, { const char *message = NULL; struct xenbus_transaction xbt; + int ring_order; + int single_page; int err; + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "max-ring-page-order", "%u", &ring_order, + NULL); + if (err || !allow_multi_page_ring) { + single_page = 1; + info->ring_order = 0; + dev_dbg(&dev->dev, "using single-page handshake\n"); + } else { + single_page = 0; + info->ring_order = MIN(ring_order, BLKIF_MAX_RING_PAGE_ORDER); + dev_dbg(&dev->dev, "using multi-page handshake (%d pages)\n", + (1<<info->ring_order)); + } + /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) @@ -669,18 +717,43 @@ again: goto destroy_blkring; } - err = xenbus_printf(xbt, dev->nodename, - "ring-ref", "%u", info->ring_ref); - if (err) { - message = "writing ring-ref"; - goto abort_transaction; + if (single_page) { + err = xenbus_printf(xbt, dev->nodename, + "ring-ref", "%u", info->ring_ref[0]); + if (err) { + message = "writing ring-ref"; + goto abort_transaction; + } + } else { + int i; + + err = xenbus_printf(xbt, dev->nodename, + "ring-page-order", "%u", info->ring_order); + if (err) { + message = "writing ring-page-order"; + goto abort_transaction; + } + + for (i = 0; i < (1<<info->ring_order); i++) { + char node[10]; + + snprintf(node, sizeof(node), "ring-ref%u", i); + err = xenbus_printf(xbt, dev->nodename, node, "%u", + info->ring_ref[i]); + if (err) { + message = "writing ring-ref"; + goto abort_transaction; + } + } } + err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", info->evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; } + err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { @@ -747,9 +820,9 @@ static int blkfront_probe(struct xenbus_device *dev, info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); - for (i = 0; i < BLK_RING_SIZE; i++) + for (i = 0; i < MAX_BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; - info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; + info->shadow[MAX_BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename, ''/'')+1, NULL, 0); @@ -782,13 +855,13 @@ static int blkif_recover(struct blkfront_info *info) /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); - for (i = 0; i < BLK_RING_SIZE; i++) + for (i = 0; i < MAX_BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow_free = info->ring.req_prod_pvt; - info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; + info->shadow[MAX_BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Stage 3: Find pending requests and requeue them. */ - for (i = 0; i < BLK_RING_SIZE; i++) { + for (i = 0; i < MAX_BLK_RING_SIZE; i++) { /* Not in use? */ if (copy[i].request == 0) continue; -- 1.5.6.5 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Paul Durrant
2010-Jan-12 17:51 UTC
Re: [Xen-devel] Add multi-page shared ring support to xen-blkfront/back
Paul Durrant wrote:> These patches add support for multi-page shared rings to xen-blkfront/back. > Shared rings must always be a power of 2 in size and so the size is controlled > by the new ''max_ring_page_order'' module parameter of xen-blkback. This defaults > to 0 so operationally there should be no difference with these patches in place. > The maximum value of ''max_ring_page_order'' is currently limited to 2 > (i.e. 4 pages). > xen-blkfront will always use the largest shared ring size supported by the back > end. To override this and continue to use a single page ring, as before, set the > ''allow_multi_page_ring'' module parameter to 0. >I don''t know why git-send-email seems to have sent my patch series into a black hole. Here they are as attachments this time. Paul -- ==============================Paul Durrant, Software Engineer Citrix Systems (R&D) Ltd. First Floor, Building 101 Cambridge Science Park Milton Road Cambridge CB4 0FY United Kingdom ============================== _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
James Harper
2010-Jan-12 20:58 UTC
RE: [Xen-devel] Add multi-page shared ring support toxen-blkfront/back
> > Paul Durrant wrote: > > These patches add support for multi-page shared rings toxen-blkfront/back.> > Shared rings must always be a power of 2 in size and so the size is > controlled > > by the new ''max_ring_page_order'' module parameter of xen-blkback.This> defaults > > to 0 so operationally there should be no difference with thesepatches in> place. > > The maximum value of ''max_ring_page_order'' is currently limited to 2 > > (i.e. 4 pages). > > xen-blkfront will always use the largest shared ring size supportedby the> back > > end. To override this and continue to use a single page ring, asbefore, set> the > > ''allow_multi_page_ring'' module parameter to 0. > > > > I don''t know why git-send-email seems to have sent my patch seriesinto> a black hole. Here they are as attachments this time. >I received the 1/2 and 2/2 emails containing the patches. Is there any measurable performance increase with this update? I assume it means that you can have more read/write operations on the ring at once? What would be useful to me is larger read/write operations... James _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Daniel Stodden
2010-Jan-12 21:22 UTC
RE: [Xen-devel] Add multi-page shared ring support toxen-blkfront/back
On Tue, 2010-01-12 at 15:58 -0500, James Harper wrote:> I received the 1/2 and 2/2 emails containing the patches. > > Is there any measurable performance increase with this update? I assume > it means that you can have more read/write operations on the ring at > once? What would be useful to me is larger read/write operations...Interesting. Why? Asking because ring space is spent on segments, not request headers. Do you see a notable difference somewhere, compared to a bunch of (virtually) consecutive request ranges? Or rather a convenience thing for the gpl PV drivers? Cheers, Daniel _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Dan Magenheimer
2010-Jan-12 23:52 UTC
RE: [Xen-devel] Add multi-page shared ring support to xen-blkfront/back
Haven''t read the patch carefully, but a question: Will blkback fall back safely to single-page allocation if fragmentation has occurred... and, if so, will this inability to allocate multi-page be dynamically propogated to blkfronts (rather than just the largest ring size "supported" by blkback)?> -----Original Message----- > From: Paul Durrant [mailto:paul.durrant@citrix.com] > Sent: Tuesday, January 12, 2010 10:52 AM > To: xen-devel@lists.xensource.com > Subject: Re: [Xen-devel] Add multi-page shared ring support to > xen-blkfront/back > > > Paul Durrant wrote: > > These patches add support for multi-page shared rings to > xen-blkfront/back. > > Shared rings must always be a power of 2 in size and so the > size is controlled > > by the new ''max_ring_page_order'' module parameter of > xen-blkback. This defaults > > to 0 so operationally there should be no difference with > these patches in place. > > The maximum value of ''max_ring_page_order'' is currently limited to 2 > > (i.e. 4 pages). > > xen-blkfront will always use the largest shared ring size > supported by the back > > end. To override this and continue to use a single page > ring, as before, set the > > ''allow_multi_page_ring'' module parameter to 0. > > > > I don''t know why git-send-email seems to have sent my patch > series into > a black hole. Here they are as attachments this time. > > Paul > > -- > ==============================> Paul Durrant, Software Engineer > > Citrix Systems (R&D) Ltd. > First Floor, Building 101 > Cambridge Science Park > Milton Road > Cambridge CB4 0FY > United Kingdom > ==============================>_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
James Harper
2010-Jan-13 06:03 UTC
RE: [Xen-devel] Add multi-page shared ring support toxen-blkfront/back
> > On Tue, 2010-01-12 at 15:58 -0500, James Harper wrote: > > > I received the 1/2 and 2/2 emails containing the patches. > > > > Is there any measurable performance increase with this update? I assume > > it means that you can have more read/write operations on the ring at > > once? What would be useful to me is larger read/write operations... > > Interesting. Why? Asking because ring space is spent on segments, not > request headers. Do you see a notable difference somewhere, compared to > a bunch of (virtually) consecutive request ranges? Or rather a > convenience thing for the gpl PV drivers? >Windows has a limit to the number of outstanding requests that it will issue. It is as low as 16 under some versions of their scsiport infrastructure. I think storport relaxes those limits a bit but I haven't gotten as far as a storport driver yet. So larger requests would probably benefit windows more than more requests would. It would be nice if the ring slots could be dynamic in size. Major rewrite of everything of course, but we'd get the best of both worlds. James _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Konrad Rzeszutek Wilk
2011-Aug-03 18:17 UTC
Re: [Xen-devel] Add multi-page shared ring support to xen-blkfront/back
On Tue, Jan 12, 2010 at 05:51:42PM +0000, Paul Durrant wrote:> Paul Durrant wrote: > >These patches add support for multi-page shared rings to xen-blkfront/back. > >Shared rings must always be a power of 2 in size and so the size is controlled > >by the new ''max_ring_page_order'' module parameter of xen-blkback. This defaults > >to 0 so operationally there should be no difference with these patches in place. > >The maximum value of ''max_ring_page_order'' is currently limited to 2 > >(i.e. 4 pages). > >xen-blkfront will always use the largest shared ring size supported by the back > >end. To override this and continue to use a single page ring, as before, set the > >''allow_multi_page_ring'' module parameter to 0.Hey Paul, Would it be possible to rebase this on 3.0? How is it different from the one posted some time ago by .. um, somebody else? _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel