> pvSCSI backend driver
I''m sorry this is so late, but here are my comments on this patch.
First of all, this patch looks a lot better than the old one did.
Most of this set of comments is to do with implementation details,
rather than anything architectural. The only bit which worries me is
the xenbus-level protocol for adding and removing LUNs, and I suspect
that''s just because I''ve not understood what you''re
actually doing.
If you could briefly explain how you expect this to work, at the level
of individual xenstore operations, I expect that would make things a
lot clearer.
I also have some more details on the code itself, below. I''ve made
comments on the code itself, rather than on the patches, because that
was easier than dealing with several overlapping patches.
emulate.c:> /* quoted scsi_debug.c/resp_report_luns() */
> static void __report_luns(pending_req_t *pending_req, void *data)
> {
It''s kind of a pity that you''ve had to duplicate so much stuff
from
scsi_debug.c, and it''d be nicer if you''d been able to share
stuff a
bit. I guess that''s a problem for another time.
> int __pre_do_emulation(pending_req_t *pending_req, void *data)
> {
> uint8_t op_code = pending_req->cmnd[0];
>
> if ((bitmap[op_code] & VSCSIIF_NEED_EMULATE_REQBUF) &&
> pre_function[op_code] != NULL) {
> pre_function[op_code](pending_req, data);
> }
>
> /*
> 0: no need for native driver call, so should return immediately.
> 1: non emulation or should call native driver
> after modifing the request buffer.
> */
> return (bitmap[op_code] & VSCSIIF_NEED_CMD_EXEC);
The code only matches up with the comment here because
VSCSIIF_NEED_CMD_EXEC is 1. Perhaps you meant !!(bitmap[op_code] &
VSCSIIF_NEED_CMD_EXEC)?
> }
>
> void scsiback_emulation_init(void)
> {
> int i;
>
> /* Initialize to default state */
> for (i = 0; i < VSCSI_MAX_SCSI_OP_CODE; i++) {
> bitmap[i] = VSCSIIF_NEED_CMD_EXEC;
> pre_function[i] = NULL;
> post_function[i] = NULL;
> /* means,
> - no need for pre-emulation
> - no need for post-emulation
> - call native driver
>
> (Current setting is black-list bases, white-list
> bases may be appropriate for security.)
> */
Yeah, I''d certainly prefer a whitelist here.
> }
>
> /*
> Register appropriate functions below as you need.
> (See scsi/scsi.h for definition of SCSI op_code.)
> */
> pre_function[REPORT_LUNS] = __report_luns;
> bitmap[REPORT_LUNS] = (VSCSIIF_NEED_EMULATE_REQBUF |
> VSCSIIF_NEED_EMULATE_RSPBUF);
>
> return;
> }
>
interface.c:> static int map_frontend_page( struct vscsibk_info *info,
> unsigned long ring_ref)
> {
> struct gnttab_map_grant_ref op;
> int err;
>
> gnttab_set_map_op(&op, (unsigned long)info->ring_area->addr,
> GNTMAP_host_map, ring_ref,
> info->domid);
>
> err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
> BUG_ON(err);
>
> if (op.status) {
> printk(KERN_ERR "scsiback: Grant table operation failure !\n");
> return op.status;
> }
>
> info->shmem_ref = ring_ref;
> info->shmem_handle = op.handle;
>
> return 0;
Would it be better to return GNTST_okay here? That would be
consistent with the error paths. It doesn''t matter a great deal,
really.
> }
>
> void scsiback_free(struct vscsibk_info *info)
> {
> if (atomic_dec_and_test(&info->refcnt))
> kmem_cache_free(scsiback_cachep, info);
> }
If refcnt wasn''t one when this function was called, who would be
responsible for freeing the info structure?
scsiback.c:>
>
> #define NO_ASYNC 1 /*!aync*/
Umm... Is this a leftover of an earlier version of the patch? Do you
expect NO_ASYNC to be set to 0 any time soon? What would such a
change actually mean?
> /* Unmap every segment in @req. Release req''s scatter-gather
list.
> Skips any segments which have an INVALID_HANDLE handle, but
doesn''t
> set the handles to INVALID_HANDLE. */
> void scsiback_fast_flush_area(pending_req_t *req)
> {
> struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
You''re allocating nearly 700 bytes on the stack here. That''s
kind of
risky if you''re using 4k stacks, but I guess if it works it''s
good
enough.
> unsigned int i, invcount = 0;
> grant_handle_t handle;
> int err;
>
> if (req->nr_segments) {
> for (i = 0; i < req->nr_segments; i++) {
> handle = pending_handle(req, i);
> if (handle == SCSIBACK_INVALID_HANDLE)
> continue;
> gnttab_set_unmap_op(&unmap[i], vaddr(req, i),
> GNTMAP_host_map, handle);
> pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
> invcount++;
> }
>
> err = HYPERVISOR_grant_table_op(
> GNTTABOP_unmap_grant_ref, unmap, invcount);
> BUG_ON(err);
> kfree(req->sgl);
> }
>
> return;
> }
> void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
> pending_req_t *pending_req)
> {
> vscsiif_response_t *ring_res;
> struct vscsibk_info *info = pending_req->info;
> int notify;
> int more_to_do = 1;
> unsigned long flags;
>
> DPRINTK("%s\n",__FUNCTION__);
>
> spin_lock_irqsave(&info->ring_lock, flags);
>
> rmb();
What is this barrier supposed to protect?
>
> ring_res = RING_GET_RESPONSE(&info->ring,
info->ring.rsp_prod_pvt);
> info->ring.rsp_prod_pvt++;
>
> ring_res->rslt = result;
> ring_res->rqid = pending_req->rqid;
>
> if (sense_buffer != NULL) {
> memcpy(ring_res->sense_buffer, sense_buffer,
> VSCSIIF_SENSE_BUFFERSIZE);
> ring_res->sense_len = VSCSIIF_SENSE_BUFFERSIZE;
> } else {
> ring_res->sense_len = 0;
> }
>
> RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
> if (info->ring.rsp_prod_pvt == info->ring.req_cons) {
> RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
> } else if (RING_HAS_UNCONSUMED_REQUESTS(&info->ring)) {
> more_to_do = 1;
> }
>
> spin_unlock_irqrestore(&info->ring_lock, flags);
>
> if (more_to_do)
> scsiback_notify_work(info);
>
> if (notify)
> notify_remote_via_irq(info->irq);
>
> scsiback_put(pending_req->info);
> free_req(pending_req);
> }
> #ifdef NO_ASYNC /*!async*/
> static void scsiback_cmd_done(struct request *req, int errors)
> {
> pending_req_t *pending_req = req->end_io_data;
> struct scsi_device *sdev = pending_req->sdev;
> unsigned char *sense_buffer;
>
> sense_buffer = req->sense;
>
> #else
> static void scsiback_cmd_done(void *data, char *sense_buffer,
> int errors, int resid)
> {
> pending_req_t *pending_req = data;
> struct scsi_device *sdev = pending_req->sdev;
>
> DPRINTK("%s\n",__FUNCTION__);
> #endif
>
> if ((errors != 0) && (pending_req->cmnd[0] != TEST_UNIT_READY))
{
> printk(KERN_ERR "scsiback: %d:%d:%d:%d
",sdev->host->host_no,
> sdev->channel, sdev->id, sdev->lun);
> printk(KERN_ERR "status = 0x%02x, message = 0x%02x, host = 0x%02x,
driver = 0x%02x\n",
> status_byte(errors), msg_byte(errors),
> host_byte(errors), driver_byte(errors));
>
> printk(KERN_ERR "scsiback: cmnd[0]=0x%02X nr_segments=%d\n",
> pending_req->cmnd[0],
> pending_req->nr_segments);
Hmm. I think you may have a denial-of-service condition here. You''re
generating kernel log messages whenever a frontend-submitted request
fails, with no rate limiting, so if a frontend can reliably produce
SCSI requests which fail then they can generate an unbounded stream of
log messages. Aside from the irritation value, that can also lead to
dom0''s filesystem getting full, which usually causes problems.
I''d recommend you convert these messages to DPRINTK()s, or put them
behind a rate-limiter of some sort.
>
> if (CHECK_CONDITION & status_byte(errors))
> __scsi_print_sense("scsiback", sense_buffer,
SCSI_SENSE_BUFFERSIZE);
> }
>
> scsiback_rsp_emulation(pending_req);
> scsiback_fast_flush_area(pending_req);
> scsiback_do_resp_with_sense(sense_buffer, errors, pending_req);
>
>
> #ifdef NO_ASYNC /*!async*/
> __blk_put_request(req->q, req);
> #endif
> }
>
>
> /* quoted scsi_lib.c/scsi_req_map_sg . */
> static int requset_map_sg(struct request *rq, pending_req_t *pending_req,
unsigned int count)
> {
I don''t quite understand why it was necessary to duplicate all of this
code. Would you mind explaining why you were unable to use the
existing implementation, please?
Also, there''s a typo in the name of the function.
> struct request_queue *q = rq->q;
> int nr_pages;
> unsigned int nsegs = count;
>
> unsigned int data_len = 0, len, bytes, off;
> struct page *page;
> struct bio *bio = NULL;
> int i, err, nr_vecs = 0;
>
> for (i = 0; i < nsegs; i++) {
> page = pending_req->sgl[i].page;
> off = (unsigned int)pending_req->sgl[i].offset;
> len = (unsigned int)pending_req->sgl[i].length;
> data_len += len;
>
> nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
> while (len > 0) {
> bytes = min_t(unsigned int, len, PAGE_SIZE - off);
>
> if (!bio) {
> nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
> nr_pages -= nr_vecs;
> bio = bio_alloc(GFP_KERNEL, nr_vecs);
> if (!bio) {
> err = -ENOMEM;
> goto free_bios;
> }
> bio->bi_end_io = scsiback_bi_endio;
> }
>
> if (bio_add_pc_page(q, bio, page, bytes, off) !> bytes) {
> bio_put(bio);
> err = -EINVAL;
> goto free_bios;
> }
>
> if (bio->bi_vcnt >= nr_vecs) {
> err = scsiback_merge_bio(rq, bio);
> if (err) {
> bio_endio(bio, bio->bi_size, 0);
> goto free_bios;
> }
> bio = NULL;
> }
>
> page++;
> len -= bytes;
> off = 0;
> }
> }
>
> rq->buffer = rq->data = NULL;
> rq->data_len = data_len;
>
> return 0;
>
> free_bios:
> while ((bio = rq->bio) != NULL) {
> rq->bio = bio->bi_next;
> /*
> * call endio instead of bio_put incase it was bounced
> */
> bio_endio(bio, bio->bi_size, 0);
> }
>
> return err;
> }
>
> #endif
>
> void scsiback_cmd_exec(pending_req_t *pending_req)
> {
> int err;
>
> int cmd_len = (int)pending_req->cmd_len;
> int data_dir = (int)pending_req->sc_data_direction;
> unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
> unsigned int timeout;
> #ifdef NO_ASYNC /*!async*/
> struct request *rq;
> int write;
> #else
> unsigned int data_len = pending_req->request_bufflen;
> #endif
>
> DPRINTK("%s\n",__FUNCTION__);
>
> /* because it doesn''t timeout backend earlier than frontend.*/
> if (pending_req->timeout_per_command)
> timeout = (pending_req->timeout_per_command * HZ * 2);
> else
> timeout = VSCSIIF_TIMEOUT;
Eh? Why does the backend use double the timeout suggested by the
frontend? Would pending_req->timeout_per_command * HZ not make more
sense?
>
> #ifdef NO_ASYNC /*!async*/
> err = 0;
> write = (data_dir == DMA_TO_DEVICE);
> rq = blk_get_request(pending_req->sdev->request_queue, write,
GFP_KERNEL);
>
> rq->flags |= REQ_BLOCK_PC;
> rq->cmd_len = cmd_len;
> memcpy(rq->cmd, pending_req->cmnd, cmd_len);
>
> memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
> rq->sense = pending_req->sense_buffer;
> rq->sense_len = 0;
>
> rq->retries = 0;
> rq->timeout = timeout;
> rq->end_io_data = pending_req;
>
> if (nr_segments) {
>
> if (requset_map_sg(rq, pending_req, nr_segments)) {
> printk(KERN_ERR "scsiback: SG Request Map Error\n");
> return;
> }
> }
>
> blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done);
>
> #else /*async*/
> /* not allowed to retry in backend. */
> /* timeout of backend is longer than that of brontend.*/
> err = scsi_execute_async(pending_req->sdev,
&(pending_req->cmnd[0]),
> cmd_len, data_dir, &(pending_req->sgl[0]), data_len, nr_segments,
timeout, 0,
> pending_req, scsiback_cmd_done, GFP_ATOMIC);
I think you''re always schedulable when you get here, aren''t
you? Why
do you need to use GFP_ATOMIC, rather than GFP_KERNEL?
>
> #endif /*!async*/
>
> if (err)
> scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
pending_req);
err is always 0 in the non-async case. Does this want to be inside
the #if?
>
> return ;
> }
> static void scsiback_device_reset_exec(pending_req_t *pending_req)
> {
> struct vscsibk_info *info = pending_req->info;
> int err;
> struct scsi_device *sdev = pending_req->sdev;
>
> scsiback_get(info);
>
> err = scsi_reset_provider(sdev, SCSI_TRY_RESET_DEVICE);
>
> scsiback_do_resp_with_sense(NULL, err, pending_req);
>
> notify_remote_via_irq(info->irq);
I think this notify should redundant because of the one in
do_resp_with_sense(), shouldn''t it?
> return;
> }
>
>
> static int prepare_pending_reqs(struct vscsibk_info *info,
> vscsiif_request_t *ring_req, pending_req_t *pending_req)
> {
> struct scsi_device *sdev;
> struct ids_tuple vir;
> int err = -EINVAL;
>
> DPRINTK("%s\n",__FUNCTION__);
>
> pending_req->rqid = ring_req->rqid;
> pending_req->act = ring_req->act;
>
> pending_req->info = info;
>
> vir.chn = ring_req->channel;
> vir.tgt = ring_req->id;
> vir.lun = ring_req->lun;
>
> sdev = scsiback_do_translation(info, &vir);
> if (!sdev) {
> pending_req->sdev = NULL;
> printk(KERN_ERR "scsiback: doesn''t exist.\n");
You have another potential denial-of-service condition here.
> err = -ENODEV;
> goto invald_value;
> }
> pending_req->sdev = sdev;
>
> /* request range check from frontend */
> if ((ring_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
> (ring_req->sc_data_direction != DMA_TO_DEVICE) &&
> (ring_req->sc_data_direction != DMA_FROM_DEVICE) &&
> (ring_req->sc_data_direction != DMA_NONE)) {
> printk(KERN_ERR "scsiback: invalid parameter data_dir = %d\n",
> ring_req->sc_data_direction);
> err = -EINVAL;
> goto invald_value;
> }
Careful. ring_req points at the shared ring, unless I''ve
misunderstood what this is doing, and so the frontend can still modify
it. That means that these sanity checks aren''t necessarily going
to do what you want.
I think the right way to fix this is to take a local copy of the
request, and then operate on that. That will prevent the frontend
from modifying things after you''ve validated them. You''ll
probably
also want a barrier() between the memcpy() and the use of the copied
structure, so that the compiler doesn''t try to get clever and optimise
the copy out again.
>
> if (ring_req->nr_segments > VSCSIIF_SG_TABLESIZE) {
> printk(KERN_ERR "scsiback: invalid parameter nr_seg = %d\n",
> ring_req->nr_segments);
> err = -EINVAL;
> goto invald_value;
> }
> pending_req->nr_segments = ring_req->nr_segments;
>
> if (ring_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
> printk(KERN_ERR "scsiback: invalid parameter cmd_len = %d\n",
> ring_req->cmd_len);
> err = -EINVAL;
> goto invald_value;
> }
> memcpy(pending_req->cmnd, ring_req->cmnd, ring_req->cmd_len);
> pending_req->cmd_len = ring_req->cmd_len;
>
> pending_req->sc_data_direction = ring_req->sc_data_direction;
> pending_req->timeout_per_command = ring_req->timeout_per_command;
>
> if(scsiback_gnttab_data_map(ring_req, pending_req)) {
> printk(KERN_ERR "scsiback: invalid buffer\n");
> err = -EINVAL;
> goto invald_value;
> }
>
> return 0;
>
> invald_value:
> return err;
> }
>
>
> static int scsiback_do_cmd_fn(struct vscsibk_info *info)
> {
> struct vscsiif_back_ring *ring = &info->ring;
> vscsiif_request_t *ring_req;
>
> pending_req_t *pending_req;
> RING_IDX rc, rp;
> int err, more_to_do = 0;
>
> DPRINTK("%s\n",__FUNCTION__);
>
> rc = ring->req_cons;
> rp = ring->sring->req_prod;
> rmb();
>
> while ((rc != rp)) {
> if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
> break;
> pending_req = alloc_req(info);
> if (NULL == pending_req) {
> more_to_do = 1;
> break;
> }
>
> ring_req = RING_GET_REQUEST(ring, rc);
> ring->req_cons = ++rc;
>
> scsiback_get(info);
> err = prepare_pending_reqs(info, ring_req,
> pending_req);
> if (err == -EINVAL) {
> scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
> pending_req);
> continue;
> } else if (err == -ENODEV) {
> scsiback_do_resp_with_sense(NULL, (DID_NO_CONNECT << 16),
> pending_req);
Do you need another continue here? It looks like you''re going to send
an error and then process the request anyway, which sounds like it
would confuse the frontend.
> }
>
> if (pending_req->act == VSCSIIF_ACT_SCSI_CDB) {
> scsiback_req_emulation_or_cmdexec(pending_req);
> } else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) {
> scsiback_device_reset_exec(pending_req);
> } else {
> printk(KERN_ERR "scsiback: invalid parameter for request\n");
> scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
> pending_req);
> continue;
> }
>
I think you should probably have a cond_resched() in here somewhere.
Otherwise, the frontend could set its producer pointer into the far
future and cause the kthread to spin picking up invalid requests, and
if dom0 has a non-preemptible kernel that will cause problems.
> }
>
> if (RING_HAS_UNCONSUMED_REQUESTS(ring))
> more_to_do = 1;
>
> return more_to_do;
> }
>
>
> int scsiback_schedule(void *data)
> {
> struct vscsibk_info *info = (struct vscsibk_info *)data;
>
> DPRINTK("%s\n",__FUNCTION__);
>
> scsiback_get(info);
This looks like it should have been done by whoever called
kthread_start(). Doing it here doesn''t actually protect you against
anything: if the info structure can disappear before the kthread is
stopped, it can usually disappear before it starts, in which case
you''ll get a reference to a garbage structure.
>
> while (!kthread_should_stop()) {
> wait_event_interruptible(
> info->wq,
> info->waiting_reqs || kthread_should_stop());
> wait_event_interruptible(
> pending_free_wq,
> !list_empty(&pending_free) || kthread_should_stop());
>
> info->waiting_reqs = 0;
> smp_mb();
>
> if (scsiback_do_cmd_fn(info))
> info->waiting_reqs = 1;
> }
>
> info->kthread = NULL;
> scsiback_put(info);
>
> return 0;
> }
translate.c:> int scsiback_add_translation_entry(struct vscsibk_info *info,
> struct scsi_device *sdev, struct ids_tuple *v)
> {
> int err = 0;
> struct v2p_entry *entry;
> struct v2p_entry *new;
> struct list_head *head = &(info->v2p_entry_lists);
> unsigned long flags;
>
> spin_lock_irqsave(&info->v2p_lock, flags);
>
> /* Check double assignment to identical virtual ID */
> list_for_each_entry(entry, head, l) {
> if ((entry->v.chn == v->chn) &&
> (entry->v.tgt == v->tgt) &&
> (entry->v.lun == v->lun)) {
> printk(KERN_WARNING "scsiback: Virtual ID is already used. "
> "Assignment was not performed.\n");
> err = -EEXIST;
> goto out;
> }
>
> }
>
> /* Create a new translation entry and add to the list */
> if ((new = kmalloc(sizeof(struct v2p_entry), GFP_KERNEL)) == NULL) {
At this point, you''re holding a spinlock and you have interrupts off.
That makes GFP_KERNEL unsafe. You should either restructure so that
you''re not holding the lock here, or you should convert to GFP_ATOMIC.
> printk(KERN_ERR "scsiback: %s: kmalloc() error.\n",
__FUNCTION__);
> err = -ENOMEM;
> goto out;
> }
> new->v = *v;
> new->sdev = sdev;
> list_add(&new->l, head);
>
> out:
> spin_unlock_irqrestore(&info->v2p_lock, flags);
> return err;
> }
> /*
> Delete the translation entry specfied
> */
> int scsiback_del_translation_entry(struct vscsibk_info *info,
> struct ids_tuple *v)
> {
> struct v2p_entry *entry;
> struct list_head *head = &(info->v2p_entry_lists);
> unsigned long flags;
>
> spin_lock_irqsave(&info->v2p_lock, flags);
> /* Find out the translation entry specified */
> list_for_each_entry(entry, head, l) {
> if ((entry->v.chn == v->chn) &&
> (entry->v.tgt == v->tgt) &&
> (entry->v.lun == v->lun)) {
> goto found;
> }
> }
>
> spin_unlock_irqrestore(&info->v2p_lock, flags);
> return 0;
>
> found:
> /* Delete the translation entry specfied */
> list_del(&entry->l);
> kfree(entry);
I think the entry holds a reference to the scsi device, doesn''t it?
Do you need to scsi_device_put() it from here?
>
> spin_unlock_irqrestore(&info->v2p_lock, flags);
> return 0;
> }
> /*
> Perform virtual to physical translation
> */
> struct scsi_device *scsiback_do_translation(struct vscsibk_info *info,
> struct ids_tuple *v)
I''m not sure I understand the synchronisation here. What prevents the
returned scsi_device from disappearing while the caller is still using
it?
> {
> struct v2p_entry *entry;
> struct list_head *head = &(info->v2p_entry_lists);
> struct scsi_device *sdev = NULL;
> unsigned long flags;
>
> spin_lock_irqsave(&info->v2p_lock, flags);
> list_for_each_entry(entry, head, l) {
> if ((entry->v.chn == v->chn) &&
> (entry->v.tgt == v->tgt) &&
> (entry->v.lun == v->lun)) {
> sdev = entry->sdev;
> goto out;
> }
> }
> out:
> spin_unlock_irqrestore(&info->v2p_lock, flags);
> return sdev;
> }
xenbus.c:>
> struct scsi_device *scsiback_get_scsi_device(struct ids_tuple *phy)
> {
> struct Scsi_Host *shost;
> struct scsi_device *sdev = NULL;
>
> shost = scsi_host_lookup(phy->hst);
This gets a reference to the scsi host, but I can''t see where you
release that reference.
> if (IS_ERR(shost)) {
> printk(KERN_ERR "scsiback: host%d doesn''t exist.\n",
> phy->hst);
> goto invald_value;
> }
> sdev = scsi_device_lookup(shost, phy->chn, phy->tgt, phy->lun);
> if (!sdev) {
> printk(KERN_ERR "scsiback: %d:%d:%d:%d doesn''t
exist.\n",
> phy->hst, phy->chn, phy->tgt, phy->lun);
> goto invald_value;
> }
>
> invald_value:
> return (sdev);
> }
> static void scsiback_do_lun_hotplug(struct backend_info *be, int op)
> {
As I said before, I don''t really understand how the xenbus activity
here is supposed to work, or what the transaction is supposed to be
protecting. Would you mind explaining it to me, please?
> }
>
>
> static void scsiback_frontend_changed(struct xenbus_device *dev,
> enum xenbus_state frontend_state)
> {
> struct backend_info *be = dev->dev.driver_data;
> int err;
>
> switch (frontend_state) {
> case XenbusStateInitialising:
> break;
> case XenbusStateInitialised:
> err = scsiback_map(be);
> if (err)
> break;
>
> scsiback_do_lun_hotplug(be, VSCSIBACK_OP_ADD_OR_DEL_LUN);
> err = xenbus_switch_state(dev, XenbusStateConnected);
> if (err)
> xenbus_dev_fatal(dev, err, "switching to Connected state",
> dev->nodename);
> break;
> case XenbusStateConnected:
> if (dev->state == XenbusStateConnected)
> break;
>
> err = xenbus_switch_state(dev, XenbusStateConnected);
> if (err)
> xenbus_dev_fatal(dev, err, "switching to Connected state",
> dev->nodename);
xenbus_switch_state() will call xenbus_dev_fatal() automatically on error,
so you don''t have to.
> break;
>
> case XenbusStateClosing:
> scsiback_disconnect(be->info);
> xenbus_switch_state(dev, XenbusStateClosing);
> break;
>
> case XenbusStateClosed:
> xenbus_switch_state(dev, XenbusStateClosed);
> if (xenbus_dev_is_online(dev))
> break;
>
> case XenbusStateReconfiguring:
> scsiback_do_lun_hotplug(be, VSCSIBACK_OP_ADD_OR_DEL_LUN);
> err = xenbus_switch_state(dev, XenbusStateReconfigured);
> if (err)
> xenbus_dev_fatal(dev, err, "switching to Reconfigured state",
> dev->nodename);
> break;
>
> case XenbusStateUnknown:
> device_unregister(&dev->dev);
> break;
> default:
> xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
> frontend_state);
> break;
> }
> }
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel