Duan, Ronghui
2012-Aug-16 10:31 UTC
[RFC v1 5/5] VBD: enlarge max segment per request in blkfront
add segring support in blkback
Signed-off-by: Ronghui Duan <ronghui.duan@intel.com>
diff --git a/drivers/block/xen-blkback/blkback.c
b/drivers/block/xen-blkback/blkback.c
index 45eda98..0bbc226 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -60,6 +60,10 @@ static int xen_blkif_reqs = 64;
module_param_named(reqs, xen_blkif_reqs, int, 0);
MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
+int blkback_ring_type = 2;
+module_param_named(blk_ring_type, blkback_ring_type, int, 0);
+MODULE_PARM_DESC(blk_ring_type, "type of ring for blk device");
+
/* Run-time switchable: /sys/module/blkback/parameters/ */
static unsigned int log_stats;
module_param(log_stats, int, 0644);
@@ -125,7 +129,7 @@ static struct pending_req *alloc_req(struct xen_blkif
*blkif)
struct xen_blkbk *blkbk = blkif->blkbk;
struct pending_req *req = NULL;
unsigned long flags;
- unsigned int max_seg = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ unsigned int max_seg = blkif->ops->max_seg;
spin_lock_irqsave(&blkbk->pending_free_lock, flags);
if (!list_empty(&blkbk->pending_free)) {
@@ -315,8 +319,10 @@ static void xen_blkbk_unmap(struct pending_req *req)
for (i = 0; i < req->nr_pages; i++) {
handle = pending_handle(req, i);
- if (handle == BLKBACK_INVALID_HANDLE)
+ if (handle == BLKBACK_INVALID_HANDLE) {
+ printk("BLKBACK_INVALID_HANDLE\n");
continue;
+ }
gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
GNTMAP_host_map, handle);
pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
@@ -486,6 +492,12 @@ void *get_back_ring(struct xen_blkif *blkif)
return (void *)&blkif->blk_rings;
}
+void *get_back_ring_v2(struct xen_blkif *blkif)
+{
+ return (void *)&blkif->blk_rings_v2;
+}
+
+
void copy_blkif_req(struct xen_blkif *blkif, RING_IDX rc)
{
struct blkif_request *req = (struct blkif_request *)blkif->req;
@@ -506,12 +518,48 @@ void copy_blkif_req(struct xen_blkif *blkif, RING_IDX rc)
}
}
+void copy_blkif_req_v2(struct xen_blkif *blkif, RING_IDX rc)
+{
+ struct blkif_request_header *req = (struct blkif_request_header
*)blkif->req;
+ union blkif_back_rings_v2 *blk_rings = &blkif->blk_rings_v2;
+ switch (blkif->blk_protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ memcpy(req, RING_GET_REQUEST(&blk_rings->native, rc),
+ sizeof(struct blkif_request_header));
+ break;
+ case BLKIF_PROTOCOL_X86_32:
+ blkif_get_x86_32_req_v2(req, RING_GET_REQUEST(&blk_rings->x86_32,
rc));
+ break;
+ case BLKIF_PROTOCOL_X86_64:
+ blkif_get_x86_64_req_v2(req, RING_GET_REQUEST(&blk_rings->x86_64,
rc));
+ break;
+ default:
+ BUG();
+ }
+}
+
void copy_blkif_seg_req(struct xen_blkif *blkif)
{
struct blkif_request *req = (struct blkif_request *)blkif->req;
blkif->seg_req = req->u.rw.seg;
}
+
+void copy_blkif_seg_req_v2(struct xen_blkif *blkif)
+{
+ struct blkif_request_header *req = (struct blkif_request_header
*)blkif->req;
+ struct blkif_segment_back_ring *blk_segrings = &blkif->blk_segrings;
+ int i;
+ RING_IDX rc;
+
+ rc = blk_segrings->req_cons;
+ for (i = 0; i < req->u.rw.nr_segments; i++) {
+ memcpy(&blkif->seg_req[i], RING_GET_REQUEST(blk_segrings, rc++),
+ sizeof(struct blkif_request_segment));
+ }
+ blk_segrings->req_cons = rc;
+}
+
/*
* Function to copy the from the ring buffer the ''struct
blkif_request''
* (which has the sectors we want, number of them, grant references, etc),
@@ -587,10 +635,12 @@ do_block_io_op(struct xen_blkif *blkif)
return more_to_do;
}
+
/*
* Transmutation of the ''struct blkif_request'' to a proper
''struct bio''
* and call the ''submit_bio'' to pass it to the underlying
storage.
*/
+
static int dispatch_rw_block_io(struct xen_blkif *blkif,
struct pending_req *pending_req)
{
@@ -774,54 +824,89 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
return -EIO;
}
-struct blkif_segment_back_ring *
- get_seg_back_ring(struct xen_blkif *blkif)
+void push_back_ring_rsp(struct xen_blkif *blkif, int nr_page, int *notify)
{
- return NULL;
+ union blkif_back_rings *blk_rings = &blkif->blk_rings;
+
+ blk_rings->common.rsp_prod_pvt++;
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, *notify);
}
-void push_back_ring_rsp(union blkif_back_rings *blk_rings, int nr_page, int
*notify)
+void push_back_ring_rsp_v2(struct xen_blkif *blkif, int nr_page, int *notify)
{
+ union blkif_back_rings_v2 *blk_rings = &blkif->blk_rings_v2;
+ struct blkif_segment_back_ring *blk_segrings = &blkif->blk_segrings;
+
blk_rings->common.rsp_prod_pvt++;
+ blk_segrings->rsp_prod_pvt += nr_page;
+ RING_PUSH_RESPONSES(blk_segrings);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, *notify);
}
-/*
- * Put a response on the ring on how the operation fared.
- */
-static void make_response(struct xen_blkif *blkif, u64 id,
- unsigned short op, int nr_page, int st)
+void copy_response(struct xen_blkif *blkif, struct blkif_response *resp)
{
- struct blkif_response resp;
- unsigned long flags;
- union blkif_back_rings *blk_rings - (union blkif_back_rings
*)blkif->ops->get_back_ring(blkif);
- int notify;
+ union blkif_back_rings *blk_rings = &blkif->blk_rings;
+
+ switch (blkif->blk_protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ memcpy(RING_GET_RESPONSE(&blk_rings->native,
blk_rings->native.rsp_prod_pvt),
+ resp, sizeof(*resp));
+ break;
+ case BLKIF_PROTOCOL_X86_32:
+ memcpy(RING_GET_RESPONSE(&blk_rings->x86_32,
blk_rings->x86_32.rsp_prod_pvt),
+ resp, sizeof(*resp));
+ break;
+ case BLKIF_PROTOCOL_X86_64:
+ memcpy(RING_GET_RESPONSE(&blk_rings->x86_64,
blk_rings->x86_64.rsp_prod_pvt),
+ resp, sizeof(*resp));
+ break;
+ default:
+ BUG();
+ }
- resp.id = id;
- resp.operation = op;
- resp.status = st;
+}
- spin_lock_irqsave(&blkif->blk_ring_lock, flags);
- /* Place on the response ring for the relevant domain. */
+void copy_response_v2(struct xen_blkif *blkif, struct blkif_response *resp)
+{
+ union blkif_back_rings_v2 *blk_rings = &blkif->blk_rings_v2;
+
switch (blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
memcpy(RING_GET_RESPONSE(&blk_rings->native,
blk_rings->native.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp, sizeof(*resp));
break;
case BLKIF_PROTOCOL_X86_32:
memcpy(RING_GET_RESPONSE(&blk_rings->x86_32,
blk_rings->x86_32.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp, sizeof(*resp));
break;
case BLKIF_PROTOCOL_X86_64:
memcpy(RING_GET_RESPONSE(&blk_rings->x86_64,
blk_rings->x86_64.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp, sizeof(*resp));
break;
default:
BUG();
}
+}
- blkif->ops->push_back_ring_rsp(blk_rings, nr_page, ¬ify);
+/*
+ * Put a response on the ring on how the operation fared.
+ */
+static void make_response(struct xen_blkif *blkif, u64 id,
+ unsigned short op, int nr_page, int st)
+{
+ struct blkif_response resp;
+ unsigned long flags;
+ int notify;
+
+ resp.id = id;
+ resp.operation = op;
+ resp.status = st;
+
+ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
+ /* Place on the response ring for the relevant domain. */
+ blkif->ops->copy_response(blkif, &resp);
+
+ blkif->ops->push_back_ring_rsp(blkif, nr_page, ¬ify);
spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
if (notify)
@@ -895,9 +980,19 @@ struct blkback_ring_operation blkback_ring_ops = {
.copy_blkif_req = copy_blkif_req,
.copy_blkif_seg_req = copy_blkif_seg_req,
.push_back_ring_rsp = push_back_ring_rsp,
+ .copy_response = copy_response,
.max_seg = BLKIF_MAX_SEGMENTS_PER_REQUEST,
};
+struct blkback_ring_operation blkback_ring_ops_v2 = {
+ .get_back_ring = get_back_ring_v2,
+ .copy_blkif_req = copy_blkif_req_v2,
+ .copy_blkif_seg_req = copy_blkif_seg_req_v2,
+ .push_back_ring_rsp = push_back_ring_rsp_v2,
+ .copy_response = copy_response_v2,
+ .max_seg = BLKIF_MAX_SEGMENTS_PER_REQUEST_V2,
+};
+
static int __init xen_blkif_init(void)
{
int rc = 0;
diff --git a/drivers/block/xen-blkback/common.h
b/drivers/block/xen-blkback/common.h
index 80e8acc..2e241a4 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -48,6 +48,7 @@
pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
__func__, __LINE__, ##args)
+extern int blkback_ring_type;
/* Not a real protocol. Used to generate ring structs which contain
* the elements common to all protocols only. This way we get a
@@ -84,6 +85,22 @@ struct blkif_x86_32_request {
} u;
} __attribute__((__packed__));
+struct blkif_x86_32_request_rw_v2 {
+ uint8_t nr_segments; /* number of segments */
+ blkif_vdev_t handle; /* only for read/write requests */
+ uint64_t id; /* private guest value, echoed in resp */
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
+ uint64_t seg_id;/* segment offset in the segment ring */
+} __attribute__((__packed__));
+
+struct blkif_x86_32_request_v2 {
+ uint8_t operation; /* BLKIF_OP_??? */
+ union {
+ struct blkif_x86_32_request_rw_v2 rw;
+ struct blkif_x86_32_request_discard discard;
+ } u;
+} __attribute__((__packed__));
+
/* i386 protocol version */
#pragma pack(push, 4)
struct blkif_x86_32_response {
@@ -120,6 +137,23 @@ struct blkif_x86_64_request {
} u;
} __attribute__((__packed__));
+struct blkif_x86_64_request_rw_v2 {
+ uint8_t nr_segments; /* number of segments */
+ blkif_vdev_t handle; /* only for read/write requests */
+ uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */
+ uint64_t id;
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
+ uint64_t seg_id;/* segment offset in the segment ring */
+} __attribute__((__packed__));
+
+struct blkif_x86_64_request_v2 {
+ uint8_t operation; /* BLKIF_OP_??? */
+ union {
+ struct blkif_x86_64_request_rw_v2 rw;
+ struct blkif_x86_64_request_discard discard;
+ } u;
+} __attribute__((__packed__));
+
struct blkif_x86_64_response {
uint64_t __attribute__((__aligned__(8))) id;
uint8_t operation; /* copied from request */
@@ -132,6 +166,10 @@ DEFINE_RING_TYPES(blkif_x86_32, struct
blkif_x86_32_request,
struct blkif_x86_32_response);
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
struct blkif_x86_64_response);
+DEFINE_RING_TYPES(blkif_x86_32_v2, struct blkif_x86_32_request_v2,
+ struct blkif_x86_32_response);
+DEFINE_RING_TYPES(blkif_x86_64_v2, struct blkif_x86_64_request_v2,
+ struct blkif_x86_64_response);
union blkif_back_rings {
struct blkif_back_ring native;
@@ -140,6 +178,13 @@ union blkif_back_rings {
struct blkif_x86_64_back_ring x86_64;
};
+union blkif_back_rings_v2 {
+ struct blkif_request_back_ring native;
+ struct blkif_common_back_ring common;
+ struct blkif_x86_32_v2_back_ring x86_32;
+ struct blkif_x86_64_v2_back_ring x86_64;
+};
+
enum blkif_protocol {
BLKIF_PROTOCOL_NATIVE = 1,
BLKIF_PROTOCOL_X86_32 = 2,
@@ -175,7 +220,8 @@ struct blkback_ring_operation {
void *(*get_back_ring) (struct xen_blkif *blkif);
void (*copy_blkif_req) (struct xen_blkif *blkif, RING_IDX rc);
void (*copy_blkif_seg_req) (struct xen_blkif *blkif);
- void (*push_back_ring_rsp) (union blkif_back_rings *blk_rings, int nr_page,
int *notify);
+ void (*push_back_ring_rsp) (struct xen_blkif *blkif, int nr_page, int
*notify);
+ void (*copy_response) (struct xen_blkif *blkif, struct blkif_response *resp);
unsigned int max_seg;
};
@@ -190,7 +236,10 @@ struct xen_blkif {
enum blkif_protocol blk_protocol;
enum blkif_backring_type blk_backring_type;
union blkif_back_rings blk_rings;
+ union blkif_back_rings_v2 blk_rings_v2;
+ struct blkif_segment_back_ring blk_segrings;
void *blk_ring;
+ void *blk_segring;
struct xen_blkbk *blkbk;
/* The VBD attached to this interface. */
struct xen_vbd vbd;
@@ -328,6 +377,31 @@ static inline void blkif_get_x86_32_req(struct
blkif_request *dst,
}
}
+static inline void blkif_get_x86_32_req_v2(struct blkif_request_header *dst,
+ struct blkif_x86_32_request_v2 *src)
+{
+ dst->operation = src->operation;
+ switch (src->operation) {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_WRITE_BARRIER:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ dst->u.rw.nr_segments = src->u.rw.nr_segments;
+ dst->u.rw.handle = src->u.rw.handle;
+ dst->u.rw.id = src->u.rw.id;
+ dst->u.rw.sector_number = src->u.rw.sector_number;
+ dst->u.rw.seg_id = src->u.rw.seg_id;
+ barrier();
+ break;
+ case BLKIF_OP_DISCARD:
+ dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.sector_number = src->u.discard.sector_number;
+ dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+ break;
+ default:
+ break;
+ }
+}
static inline void blkif_get_x86_64_req(struct blkif_request *dst,
struct blkif_x86_64_request *src)
{
@@ -359,4 +433,30 @@ static inline void blkif_get_x86_64_req(struct
blkif_request *dst,
}
}
+static inline void blkif_get_x86_64_req_v2(struct blkif_request_header *dst,
+ struct blkif_x86_64_request_v2 *src)
+{
+ dst->operation = src->operation;
+ switch (src->operation) {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_WRITE_BARRIER:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ dst->u.rw.nr_segments = src->u.rw.nr_segments;
+ dst->u.rw.handle = src->u.rw.handle;
+ dst->u.rw.id = src->u.rw.id;
+ dst->u.rw.sector_number = src->u.rw.sector_number;
+ dst->u.rw.seg_id = src->u.rw.seg_id;
+ barrier();
+ break;
+ case BLKIF_OP_DISCARD:
+ dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.sector_number = src->u.discard.sector_number;
+ dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+ break;
+ default:
+ break;
+ }
+}
+
#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */
diff --git a/drivers/block/xen-blkback/xenbus.c
b/drivers/block/xen-blkback/xenbus.c
index 8b0d496..4678533 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -36,7 +36,7 @@ static int connect_ring(struct backend_info *);
static void backend_changed(struct xenbus_watch *, const char **,
unsigned int);
-extern struct blkback_ring_operation blkback_ring_ops;
+extern struct blkback_ring_operation blkback_ring_ops, blkback_ring_ops_v2;
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
{
@@ -176,6 +176,83 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned
long shared_page,
return 0;
}
+static int
+xen_blkif_map_segring(struct xen_blkif *blkif, unsigned long shared_page)
+{
+ struct blkif_segment_sring *sring;
+ int err;
+
+ err = xenbus_map_ring_valloc(blkif->be->dev, shared_page,
+ &blkif->blk_segring);
+
+ if (err < 0)
+ return err;
+
+ sring = (struct blkif_segment_sring *)blkif->blk_segring;
+ BACK_RING_INIT(&blkif->blk_segrings, sring, PAGE_SIZE);
+
+ return 0;
+}
+
+static int xen_blkif_map_v2(struct xen_blkif *blkif, unsigned long shared_page,
+ unsigned int evtchn)
+{
+ int err;
+
+ /* Already connected through? */
+ if (blkif->irq)
+ return 0;
+
+ err = xenbus_map_ring_valloc(blkif->be->dev, shared_page,
+ &blkif->blk_ring);
+
+ if (err < 0)
+ return err;
+
+ switch (blkif->blk_protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ {
+ struct blkif_request_sring *sring;
+ sring = (struct blkif_request_sring *)blkif->blk_ring;
+ BACK_RING_INIT(&blkif->blk_rings_v2.native, sring,
+ PAGE_SIZE);
+ break;
+ }
+ case BLKIF_PROTOCOL_X86_32:
+ {
+ struct blkif_x86_32_v2_sring *sring_x86_32;
+ sring_x86_32 = (struct blkif_x86_32_v2_sring *)blkif->blk_ring;
+ BACK_RING_INIT(&blkif->blk_rings_v2.x86_32, sring_x86_32,
+ PAGE_SIZE);
+ break;
+ }
+ case BLKIF_PROTOCOL_X86_64:
+ {
+ struct blkif_x86_64_v2_sring *sring_x86_64;
+ sring_x86_64 = (struct blkif_x86_64_v2_sring *)blkif->blk_ring;
+ BACK_RING_INIT(&blkif->blk_rings_v2.x86_64, sring_x86_64,
+ PAGE_SIZE);
+ break;
+ }
+ default:
+ BUG();
+ }
+
+
+
+ err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
+ xen_blkif_be_int, 0,
+ "blkif-backend", blkif);
+ if (err < 0) {
+ xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
+ blkif->blk_rings_v2.common.sring = NULL;
+ return err;
+ }
+ blkif->irq = err;
+
+ return 0;
+}
+
static void xen_blkif_disconnect(struct xen_blkif *blkif)
{
if (blkif->xenblkd) {
@@ -192,10 +269,18 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
blkif->irq = 0;
}
- if (blkif->blk_rings.common.sring) {
+ if (blkif->blk_backring_type == BACKRING_TYPE_1 &&
+ blkif->blk_rings.common.sring) {
xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
blkif->blk_rings.common.sring = NULL;
}
+ if (blkif->blk_backring_type == BACKRING_TYPE_2 &&
+ blkif->blk_rings_v2.common.sring) {
+ xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
+ blkif->blk_rings_v2.common.sring = NULL;
+ xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_segring);
+ blkif->blk_segrings.sring= NULL;
+ }
}
void xen_blkif_free(struct xen_blkif *blkif)
@@ -476,6 +561,9 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
if (err)
goto fail;
+ err = xenbus_printf(XBT_NIL, dev->nodename, "blkback-ring-type",
+ "%u", blkback_ring_type);
+
err = xenbus_switch_state(dev, XenbusStateInitWait);
if (err)
goto fail;
@@ -722,25 +810,68 @@ static int connect_ring(struct backend_info *be)
{
struct xenbus_device *dev = be->dev;
unsigned long ring_ref;
+ unsigned long segring_ref;
unsigned int evtchn;
+ unsigned int ring_type;
char protocol[64] = "";
int err;
DPRINTK("%s", dev->otherend);
- be->blkif->ops = &blkback_ring_ops;
- be->blkif->req = kmalloc(sizeof(struct blkif_request),
- GFP_KERNEL);
- be->blkif->seg_req = kmalloc(sizeof(struct blkif_request_segment)*
- be->blkif->ops->max_seg, GFP_KERNEL);
- be->blkif->blk_backring_type = BACKRING_TYPE_1;
-
- err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref",
"%lu",
- &ring_ref, "event-channel", "%u", &evtchn,
NULL);
- if (err) {
+
+ err = xenbus_scanf(XBT_NIL, dev->otherend, "blkfront-ring-type",
"%u",
+ &ring_type);
+ if (err != 1) {
+ pr_info(DRV_PFX "using legacy blk ring\n");
+ ring_type = 1;
+ }
+
+ if (ring_type == 1) {
+ be->blkif->ops = &blkback_ring_ops;
+ be->blkif->blk_backring_type = BACKRING_TYPE_1;
+ be->blkif->req = kmalloc(sizeof(struct blkif_request), GFP_KERNEL);
+ be->blkif->seg_req = kmalloc(sizeof(struct blkif_request_segment)*
+ be->blkif->ops->max_seg, GFP_KERNEL);
+ if (!be->blkif->req || !be->blkif->seg_req) {
+ kfree(be->blkif->req);
+ kfree(be->blkif->seg_req);
+ xenbus_dev_fatal(dev, err, "no enough memory");
+ return -ENOMEM;
+ }
+ err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref",
"%lu",
+ &ring_ref, "event-channel", "%u", &evtchn,
NULL);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "reading %s/ring-ref and event-channel",
+ dev->otherend);
+ return err;
+ }
+ }
+ else if (ring_type == 2){
+ be->blkif->ops = &blkback_ring_ops_v2;
+ be->blkif->blk_backring_type = BACKRING_TYPE_2;
+ be->blkif->req = kmalloc(sizeof(struct blkif_request_header),
GFP_KERNEL);
+ be->blkif->seg_req = kmalloc(sizeof(struct blkif_request_segment)*
+ be->blkif->ops->max_seg, GFP_KERNEL);
+ if (!be->blkif->req || !be->blkif->seg_req) {
+ kfree(be->blkif->req);
+ kfree(be->blkif->seg_req);
+ xenbus_dev_fatal(dev, err, "no enough memory");
+ return -ENOMEM;
+ }
+ err = xenbus_gather(XBT_NIL, dev->otherend, "reqring-ref",
"%lu",
+ &ring_ref, "event-channel", "%u", &evtchn,
+ "segring-ref", "%lu", &segring_ref, NULL);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "reading %s/ring/segring-ref and event-channel",
+ dev->otherend);
+ return err;
+ }
+ }
+ else {
xenbus_dev_fatal(dev, err,
- "reading %s/ring-ref and event-channel",
- dev->otherend);
- return err;
+ "unsupport %s blkfront ring", dev->otherend);
+ return -EINVAL;
}
be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
@@ -758,19 +889,51 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -1;
}
- pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d
(%s)\n",
- ring_ref, evtchn, be->blkif->blk_protocol, protocol);
-
/* Map the shared frame, irq etc. */
- err = xen_blkif_map(be->blkif, ring_ref, evtchn);
- if (err) {
- xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
- ring_ref, evtchn);
- return err;
+ if (ring_type == 2) {
+ err = xen_blkif_map_segring(be->blkif, segring_ref);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "mapping segment rinfs");
+ return err;
+ }
+ err = xen_blkif_map_v2(be->blkif, ring_ref, evtchn);
+ if (err) {
+ xenbus_unmap_ring_vfree(be->blkif->be->dev,
+ be->blkif->blk_segring);
+ be->blkif->blk_segrings.sring = NULL;
+ xenbus_dev_fatal(dev, err, "mapping request rinfs");
+ return err;
+ }
+ pr_info(DRV_PFX
+ "ring-ref %ld,segring-ref %ld,event-channel %d,protocol %d
(%s)\n",
+ ring_ref, segring_ref, evtchn, be->blkif->blk_protocol, protocol);
+ }
+ else {
+ err = xen_blkif_map(be->blkif, ring_ref, evtchn);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
+ ring_ref, evtchn);
+ return err;
+ }
+ pr_info(DRV_PFX "ring-ref %ld,event-channel %d,protocol %d (%s)\n",
+ ring_ref, evtchn, be->blkif->blk_protocol, protocol);
}
err = xen_blkif_init_blkbk(be->blkif);
if (err) {
+ if (ring_type == 2) {
+ xenbus_unmap_ring_vfree(be->blkif->be->dev,
+ be->blkif->blk_segring);
+ be->blkif->blk_segrings.sring = NULL;
+ xenbus_unmap_ring_vfree(be->blkif->be->dev,
+ be->blkif->blk_ring);
+ be->blkif->blk_rings_v2.common.sring = NULL;
+ }
+ else {
+ xenbus_unmap_ring_vfree(be->blkif->be->dev,
+ be->blkif->blk_ring);
+ be->blkif->blk_rings.common.sring = NULL;
+ }
xenbus_dev_fatal(dev, err, "xen blkif init blkbk fails\n");
return err;
}
-ronghui
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
Possibly Parallel Threads
- [RFC v1 3/5] VBD: enlarge max segment per request in blkfront
- [patch] xen-blkback: sync I/O after backend disconnected
- [PATCH 0001/001] xen: multi page ring support for block devices
- [PATCH 0001/001] xen: multi page ring support for block devices
- [PATCH 0001/001] xen: multi page ring support for block devices
