Displaying 13 results from an estimated 13 matches for "blk_rings".
2012 Aug 16
0
[RFC v1 5/5] VBD: enlarge max segment per request in blkfront
...ot;BLKBACK_INVALID_HANDLE\n");
continue;
+ }
gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
GNTMAP_host_map, handle);
pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
@@ -486,6 +492,12 @@ void *get_back_ring(struct xen_blkif *blkif)
return (void *)&blkif->blk_rings;
}
+void *get_back_ring_v2(struct xen_blkif *blkif)
+{
+ return (void *)&blkif->blk_rings_v2;
+}
+
+
void copy_blkif_req(struct xen_blkif *blkif, RING_IDX rc)
{
struct blkif_request *req = (struct blkif_request *)blkif->req;
@@ -506,12 +518,48 @@ void copy_blkif_req(struct xen_blk...
2012 Aug 16
0
[RFC v1 3/5] VBD: enlarge max segment per request in blkfront
...amp;pending_req->blkif->refcnt) <= 2) {
if (atomic_read(&pending_req->blkif->drain))
@@ -489,8 +520,37 @@ static void end_block_io_op(struct bio *bio, int error)
bio_put(bio);
}
+void *get_back_ring(struct xen_blkif *blkif)
+{
+ return (void *)&blkif->blk_rings;
+}
+void copy_blkif_req(struct xen_blkif *blkif, RING_IDX rc)
+{
+ struct blkif_request *req = (struct blkif_request *)blkif->req;
+ union blkif_back_rings *blk_rings = &blkif->blk_rings;
+ switch (blkif->blk_protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ memcpy(req,...
2011 May 02
32
[PATCH] blkback: Fix block I/O latency issue
...might go to sleep (i.e. more_to_do == 0) then we better
+ let blkfront know about it (by setting req_event appropriately) so that
+ blkfront will bother to wake us up (via interrupt) when it submits a
+ new I/O */
+ if (!more_to_do)
+ RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
return more_to_do;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
2011 Apr 04
0
[PATCH] linux-2.6.18/backends: use xenbus_be.ko interfaces instead of open-coding them
...rea))
+ return PTR_ERR(area);
+ blkif->blk_ring_area = area;
switch (blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
{
blkif_sring_t *sring;
- sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
+ sring = (blkif_sring_t *)area->addr;
BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
break;
}
case BLKIF_PROTOCOL_X86_32:
{
blkif_x86_32_sring_t *sring_x86_32;
- sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
+ sring_x86_32 = (blkif_x86_32_sring_t *)area->addr;
BACK_RING_INIT(&blkif->blk_rings.x86_32, s...
2012 Mar 05
11
[PATCH 0001/001] xen: multi page ring support for block devices
...lt; 0)
return err;
@@ -140,21 +141,24 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
{
struct blkif_sring *sring;
sring = (struct blkif_sring *)blkif->blk_ring;
- BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
+ BACK_RING_INIT(&blkif->blk_rings.native, sring,
+ PAGE_SIZE << ring_order);
break;
}
case BLKIF_PROTOCOL_X86_32:
{
struct blkif_x86_32_sring *sring_x86_32;...
2012 Mar 05
11
[PATCH 0001/001] xen: multi page ring support for block devices
...lt; 0)
return err;
@@ -140,21 +141,24 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
{
struct blkif_sring *sring;
sring = (struct blkif_sring *)blkif->blk_ring;
- BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
+ BACK_RING_INIT(&blkif->blk_rings.native, sring,
+ PAGE_SIZE << ring_order);
break;
}
case BLKIF_PROTOCOL_X86_32:
{
struct blkif_x86_32_sring *sring_x86_32;...
2012 Mar 05
11
[PATCH 0001/001] xen: multi page ring support for block devices
...lt; 0)
return err;
@@ -140,21 +141,24 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
{
struct blkif_sring *sring;
sring = (struct blkif_sring *)blkif->blk_ring;
- BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
+ BACK_RING_INIT(&blkif->blk_rings.native, sring,
+ PAGE_SIZE << ring_order);
break;
}
case BLKIF_PROTOCOL_X86_32:
{
struct blkif_x86_32_sring *sring_x86_32;...
2011 Jun 21
13
VM disk I/O limit patch
...}
if (log_stats)
@@ -306,7 +344,6 @@ irqreturn_t blkif_be_int(int irq, void *
/******************************************************************
* DOWNWARD CALLS -- These interface with the block-device layer proper.
*/
-
static int do_block_io_op(blkif_t *blkif)
{
blkif_back_rings_t *blk_rings = &blkif->blk_rings;
@@ -314,15 +351,27 @@ static int do_block_io_op(blkif_t *blkif
pending_req_t *pending_req;
RING_IDX rc, rp;
int more_to_do = 0, ret;
+ static int last_done_nr_sects = 0;
rc = blk_rings->common.req_cons;
rp = blk_rings->common.sring->req_prod;
rmb(...
2011 Jun 21
13
VM disk I/O limit patch
...}
if (log_stats)
@@ -306,7 +344,6 @@ irqreturn_t blkif_be_int(int irq, void *
/******************************************************************
* DOWNWARD CALLS -- These interface with the block-device layer proper.
*/
-
static int do_block_io_op(blkif_t *blkif)
{
blkif_back_rings_t *blk_rings = &blkif->blk_rings;
@@ -314,15 +351,27 @@ static int do_block_io_op(blkif_t *blkif
pending_req_t *pending_req;
RING_IDX rc, rp;
int more_to_do = 0, ret;
+ static int last_done_nr_sects = 0;
rc = blk_rings->common.req_cons;
rp = blk_rings->common.sring->req_prod;
rmb(...
2010 Sep 15
15
xenpaging fixes for kernel and hypervisor
Patrick,
there following patches fix xenpaging for me.
Granttable handling is incomplete. If a page is gone, a GNTST_eagain
should be returned to the caller to inidcate the hypercall has to be
retried after a while, until the page is available again.
Please review.
Olaf
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
2011 Aug 15
6
[patch] xen-blkback: sync I/O after backend disconnected
...s_watch *, const char **,
unsigned int);
+static void xen_vbd_sync(struct xen_vbd *vbd);
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
{
@@ -232,6 +233,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
free_vm_area(blkif->blk_ring_area);
blkif->blk_rings.common.sring = NULL;
}
+ xen_vbd_sync(&blkif->vbd);
}
void xen_blkif_free(struct xen_blkif *blkif)
@@ -332,6 +334,12 @@ static void xen_vbd_free(struct xen_vbd *vbd)
vbd->bdev = NULL;
}
+static void xen_vbd_sync(struct xen_vbd *vbd)
+{
+ if (vbd->bdev)
+ fsync_bdev(vbd->...
2011 Nov 17
29
[PATCH 00 of 17] Documentation updates
The following series flushes my documentation queue and replaces
previous postings of those patches.
The main difference is that the xl cfg file is now formatted using POD
instead of markdown and presented as a manpage.
I have setup a cron job to build docs/html and publish it at
http://xenbits.xen.org/docs/unstable/ (it''s a bit bare right now).
The motivation for some of these patches
2011 Sep 01
9
[PATCH V4 0/3] xen-blkfront/blkback discard support
Dear list,
This is the V4 of the trim support for xen-blkfront/blkback,
Now we move BLKIF_OP_TRIM to BLKIF_OP_DISCARD, and dropped all
"trim" stuffs in the patches, and use "discard" instead.
Also we updated the helpers of blkif_x86_{32|64}_request or we
will meet problems using a non-native protocol.
And this patch has been tested with both SSD and raw file,
with SSD we will