Displaying 2 results from an estimated 2 matches for "seg_req".
2012 Aug 16
0
[RFC v1 3/5] VBD: enlarge max segment per request in blkfront
...t page **pages = req->pages;
unsigned int i, invcount = 0;
grant_handle_t handle;
int ret;
@@ -341,11 +368,13 @@ static void xen_blkbk_unmap(struct pending_req *req)
BUG_ON(ret);
}
+
static int xen_blkbk_map(struct blkif_request *req,
+ struct blkif_request_segment *seg_req,
struct pending_req *pending_req,
struct seg_buf seg[])
{
- struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct gnttab_map_grant_ref *map = pending_req->map;
int i;
int nseg = req->u.rw.nr_segments;
int ret = 0;
@@ -362,7...
2012 Aug 16
0
[RFC v1 5/5] VBD: enlarge max segment per request in blkfront
...break;
+ case BLKIF_PROTOCOL_X86_32:
+ blkif_get_x86_32_req_v2(req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
+ break;
+ case BLKIF_PROTOCOL_X86_64:
+ blkif_get_x86_64_req_v2(req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
+ break;
+ default:
+ BUG();
+ }
+}
+
void copy_blkif_seg_req(struct xen_blkif *blkif)
{
struct blkif_request *req = (struct blkif_request *)blkif->req;
blkif->seg_req = req->u.rw.seg;
}
+
+void copy_blkif_seg_req_v2(struct xen_blkif *blkif)
+{
+ struct blkif_request_header *req = (struct blkif_request_header *)blkif->req;
+ struct blkif_s...