From a2ace46632fb38c7a3771f2f0d235a4295e83bcf Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 30 Jan 2013 09:26:14 +0000 Subject: s390/dasd: Implement block timeout handling This patch implements generic block layer timeout handling callbacks for DASDs. When the timeout expires the respective cqr is aborted. With this timeout handler time-critical request abort is guaranteed as the abort does not depend on the internal state of the various DASD driver queues. Signed-off-by: Hannes Reinecke Acked-by: Stefan Weinhuber Signed-off-by: Stefan Weinhuber Signed-off-by: Martin Schwidefsky --- drivers/s390/block/dasd.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) (limited to 'drivers/s390/block/dasd.c') diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 000e5140bda4..87478becedb0 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2573,8 +2573,10 @@ static void __dasd_process_request_queue(struct dasd_block *block) */ cqr->callback_data = (void *) req; cqr->status = DASD_CQR_FILLED; + req->completion_data = cqr; blk_start_request(req); list_add_tail(&cqr->blocklist, &block->ccw_queue); + INIT_LIST_HEAD(&cqr->devlist); dasd_profile_start(block, cqr, req); } } @@ -2861,6 +2863,80 @@ static void do_dasd_request(struct request_queue *queue) spin_unlock(&block->queue_lock); } +/* + * Block timeout callback, called from the block layer + * + * request_queue lock is held on entry. + * + * Return values: + * BLK_EH_RESET_TIMER if the request should be left running + * BLK_EH_NOT_HANDLED if the request is handled or terminated + * by the driver. + */ +enum blk_eh_timer_return dasd_times_out(struct request *req) +{ + struct dasd_ccw_req *cqr = req->completion_data; + struct dasd_block *block = req->q->queuedata; + struct dasd_device *device; + int rc = 0; + + if (!cqr) + return BLK_EH_NOT_HANDLED; + + device = cqr->startdev ? cqr->startdev : block->base; + DBF_DEV_EVENT(DBF_WARNING, device, + " dasd_times_out cqr %p status %x", + cqr, cqr->status); + + spin_lock(&block->queue_lock); + spin_lock(get_ccwdev_lock(device->cdev)); + cqr->retries = -1; + cqr->intrc = -ETIMEDOUT; + if (cqr->status >= DASD_CQR_QUEUED) { + spin_unlock(get_ccwdev_lock(device->cdev)); + rc = dasd_cancel_req(cqr); + } else if (cqr->status == DASD_CQR_FILLED || + cqr->status == DASD_CQR_NEED_ERP) { + cqr->status = DASD_CQR_TERMINATED; + spin_unlock(get_ccwdev_lock(device->cdev)); + } else if (cqr->status == DASD_CQR_IN_ERP) { + struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; + + list_for_each_entry_safe(searchcqr, nextcqr, + &block->ccw_queue, blocklist) { + tmpcqr = searchcqr; + while (tmpcqr->refers) + tmpcqr = tmpcqr->refers; + if (tmpcqr != cqr) + continue; + /* searchcqr is an ERP request for cqr */ + searchcqr->retries = -1; + searchcqr->intrc = -ETIMEDOUT; + if (searchcqr->status >= DASD_CQR_QUEUED) { + spin_unlock(get_ccwdev_lock(device->cdev)); + rc = dasd_cancel_req(searchcqr); + spin_lock(get_ccwdev_lock(device->cdev)); + } else if ((searchcqr->status == DASD_CQR_FILLED) || + (searchcqr->status == DASD_CQR_NEED_ERP)) { + searchcqr->status = DASD_CQR_TERMINATED; + rc = 0; + } else if (searchcqr->status == DASD_CQR_IN_ERP) { + /* + * Shouldn't happen; most recent ERP + * request is at the front of queue + */ + continue; + } + break; + } + spin_unlock(get_ccwdev_lock(device->cdev)); + } + dasd_schedule_block_bh(block); + spin_unlock(&block->queue_lock); + + return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; +} + /* * Allocate and initialize request queue and default I/O scheduler. */ -- cgit v1.2.3