[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] linux-2.6.18/scsifront: respect full ring on reset processing
Additionally, the error return case of the wait_event_interruptible() at the end of scsifront_dev_reset_handler() wasn't handled, potentially causing shadow slot corruption. Doing this also revealed that io_lock was pointless - it was only used inside the worker thread, i.e. protected nothing. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/drivers/xen/scsifront/common.h +++ b/drivers/xen/scsifront/common.h @@ -99,7 +99,6 @@ struct vscsifrnt_info { struct Scsi_Host *host; - spinlock_t io_lock; spinlock_t shadow_lock; unsigned int evtchn; unsigned int irq; @@ -113,8 +112,9 @@ struct vscsifrnt_info { struct task_struct *kthread; wait_queue_head_t wq; - unsigned int waiting_resp; - + wait_queue_head_t wq_sync; + unsigned int waiting_resp:1; + unsigned int waiting_sync:1; }; #define DPRINTK(_f, _a...) \ --- a/drivers/xen/scsifront/scsifront.c +++ b/drivers/xen/scsifront/scsifront.c @@ -49,16 +49,19 @@ static int get_id_from_freelist(struct v return free; } -static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id) +static void _add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id) { - unsigned long flags; - - spin_lock_irqsave(&info->shadow_lock, flags); - info->shadow[id].next_free = info->shadow_free; info->shadow[id].sc = NULL; info->shadow_free = id; +} +static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id) +{ + unsigned long flags; + + spin_lock_irqsave(&info->shadow_lock, flags); + _add_id_to_freelist(info, id); spin_unlock_irqrestore(&info->shadow_lock, flags); } @@ -169,7 +172,19 @@ static void scsifront_sync_cmd_done(stru spin_lock_irqsave(&info->shadow_lock, flags); info->shadow[id].wait_reset = 1; - info->shadow[id].rslt_reset = ring_res->rslt; + switch (info->shadow[id].rslt_reset) { + case 0: + info->shadow[id].rslt_reset = ring_res->rslt; + break; + case -1: + _add_id_to_freelist(info, id); + break; + default: + shost_printk(KERN_ERR "scsifront: ", info->host, + "bad reset state %d, possibly leaking %u\n", + info->shadow[id].rslt_reset, id); + break; + } spin_unlock_irqrestore(&info->shadow_lock, flags); wake_up(&(info->shadow[id].wq_reset)); @@ -184,7 +199,7 @@ static int scsifront_cmd_done(struct vsc int more_to_do = 0; unsigned long flags; - spin_lock_irqsave(&info->io_lock, flags); + spin_lock_irqsave(info->host->host_lock, flags); rp = info->ring.sring->rsp_prod; rmb(); @@ -206,8 +221,11 @@ static int scsifront_cmd_done(struct vsc info->ring.sring->rsp_event = i + 1; } - spin_unlock_irqrestore(&info->io_lock, flags); + info->waiting_sync = 0; + spin_unlock_irqrestore(info->host->host_lock, flags); + + wake_up(&info->wq_sync); /* Yield point for this unbounded loop. */ cond_resched(); @@ -425,17 +443,33 @@ static int scsifront_dev_reset_handler(s vscsiif_request_t *ring_req; uint16_t rqid; - int err; + int err = 0; + for (;;) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) - spin_lock_irq(host->host_lock); + spin_lock_irq(host->host_lock); #endif + if (!RING_FULL(&info->ring)) + break; + if (err) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) + spin_unlock_irq(host->host_lock); +#endif + return FAILED; + } + info->waiting_sync = 1; + spin_unlock_irq(host->host_lock); + err = wait_event_interruptible(info->wq_sync, + !info->waiting_sync); + spin_lock_irq(host->host_lock); + } ring_req = scsifront_pre_request(info); ring_req->act = VSCSIIF_ACT_SCSI_RESET; rqid = ring_req->rqid; info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET; + info->shadow[rqid].rslt_reset = 0; ring_req->channel = sc->device->channel; ring_req->id = sc->device->id; @@ -454,13 +488,19 @@ static int scsifront_dev_reset_handler(s scsifront_do_request(info); spin_unlock_irq(host->host_lock); - wait_event_interruptible(info->shadow[rqid].wq_reset, - info->shadow[rqid].wait_reset); + err = wait_event_interruptible(info->shadow[rqid].wq_reset, + info->shadow[rqid].wait_reset); spin_lock_irq(host->host_lock); - err = info->shadow[rqid].rslt_reset; - - add_id_to_freelist(info, rqid); + if (!err) { + err = info->shadow[rqid].rslt_reset; + add_id_to_freelist(info, rqid); + } else { + spin_lock(&info->shadow_lock); + info->shadow[rqid].rslt_reset = -1; + spin_unlock(&info->shadow_lock); + err = FAILED; + } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_unlock_irq(host->host_lock); --- a/drivers/xen/scsifront/xenbus.c +++ b/drivers/xen/scsifront/xenbus.c @@ -205,7 +205,7 @@ static int scsifront_probe(struct xenbus } init_waitqueue_head(&info->wq); - spin_lock_init(&info->io_lock); + init_waitqueue_head(&info->wq_sync); spin_lock_init(&info->shadow_lock); snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no); Attachment:
xen-scsifront-reset-ring-empty.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |