summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-06-09 20:43:55 +0200
committerJens Axboe <jaxboe@fusionio.com>2011-06-09 20:43:55 +0200
commitc3af54afbac3675337cedf326b7b127ffa7f7327 (patch)
treecc49ada3a2ef88f1415af73635cff7d062615856 /block
parenta9dce2a3b4f0686dd66cb44d4826a59508bce969 (diff)
block: remove non-syncing __disk_block_events() and fold it into disk_block_events()
After the previous update to disk_check_events(), nobody is using non-syncing __disk_block_events(). Remove @sync and, as this makes __disk_block_events() virtually identical to disk_block_events(), remove the underscore prefixed version. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/genhd.c55
1 files changed, 24 insertions, 31 deletions
diff --git a/block/genhd.c b/block/genhd.c
index 3f093307764..ab0731d8976 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1414,22 +1414,36 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
return msecs_to_jiffies(intv_msecs);
}
-static void __disk_block_events(struct gendisk *disk, bool sync)
+/**
+ * disk_block_events - block and flush disk event checking
+ * @disk: disk to block events for
+ *
+ * On return from this function, it is guaranteed that event checking
+ * isn't in progress and won't happen until unblocked by
+ * disk_unblock_events(). Events blocking is counted and the actual
+ * unblocking happens after the matching number of unblocks are done.
+ *
+ * Note that this intentionally does not block event checking from
+ * disk_clear_events().
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void disk_block_events(struct gendisk *disk)
{
struct disk_events *ev = disk->ev;
unsigned long flags;
bool cancel;
+ if (!ev)
+ return;
+
spin_lock_irqsave(&ev->lock, flags);
cancel = !ev->block++;
spin_unlock_irqrestore(&ev->lock, flags);
- if (cancel) {
- if (sync)
- cancel_delayed_work_sync(&disk->ev->dwork);
- else
- cancel_delayed_work(&disk->ev->dwork);
- }
+ if (cancel)
+ cancel_delayed_work_sync(&disk->ev->dwork);
}
static void __disk_unblock_events(struct gendisk *disk, bool check_now)
@@ -1461,27 +1475,6 @@ out_unlock:
}
/**
- * disk_block_events - block and flush disk event checking
- * @disk: disk to block events for
- *
- * On return from this function, it is guaranteed that event checking
- * isn't in progress and won't happen until unblocked by
- * disk_unblock_events(). Events blocking is counted and the actual
- * unblocking happens after the matching number of unblocks are done.
- *
- * Note that this intentionally does not block event checking from
- * disk_clear_events().
- *
- * CONTEXT:
- * Might sleep.
- */
-void disk_block_events(struct gendisk *disk)
-{
- if (disk->ev)
- __disk_block_events(disk, true);
-}
-
-/**
* disk_unblock_events - unblock disk event checking
* @disk: disk to unblock events for
*
@@ -1554,7 +1547,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
spin_unlock_irq(&ev->lock);
/* uncondtionally schedule event check and wait for it to finish */
- __disk_block_events(disk, true);
+ disk_block_events(disk);
queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
flush_delayed_work(&ev->dwork);
__disk_unblock_events(disk, false);
@@ -1672,7 +1665,7 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev,
if (intv < 0 && intv != -1)
return -EINVAL;
- __disk_block_events(disk, true);
+ disk_block_events(disk);
disk->ev->poll_msecs = intv;
__disk_unblock_events(disk, true);
@@ -1778,7 +1771,7 @@ static void disk_del_events(struct gendisk *disk)
if (!disk->ev)
return;
- __disk_block_events(disk, true);
+ disk_block_events(disk);
mutex_lock(&disk_events_mutex);
list_del_init(&disk->ev->node);