From 06f9d4f94a075285d25253edbf57f2cda07d4ff3 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 22 Mar 2006 00:07:40 -0800 Subject: [PATCH] unshare: Error if passed unsupported flags A bare bones trivial patch to ensure we always get -EINVAL on the unsupported cases for sys_unshare. If this goes in before 2.6.16 it allows us to forward compatible with future applications using sys_unshare. Signed-off-by: Eric W. Biederman Cc: JANAK DESAI Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel/fork.c') diff --git a/kernel/fork.c b/kernel/fork.c index b373322ca49..9bd7b65ee41 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1534,6 +1534,12 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) check_unshare_flags(&unshare_flags); + /* Return -EINVAL for all unsupported flags */ + err = -EINVAL; + if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| + CLONE_VM|CLONE_FILES|CLONE_SYSVSEM)) + goto bad_unshare_out; + if ((err = unshare_thread(unshare_flags))) goto bad_unshare_out; if ((err = unshare_fs(unshare_flags, &new_fs))) -- cgit v1.2.3 From 0c9e63fd38a2fb2181668a0cdd622a3c23cfd567 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 23 Mar 2006 03:00:12 -0800 Subject: [PATCH] Shrinks sizeof(files_struct) and better layout 1) Reduce the size of (struct fdtable) to exactly 64 bytes on 32bits platforms, lowering kmalloc() allocated space by 50%. 2) Reduce the size of (files_struct), using a special 32 bits (or 64bits) embedded_fd_set, instead of a 1024 bits fd_set for the close_on_exec_init and open_fds_init fields. This save some ram (248 bytes per task) as most tasks dont open more than 32 files. D-Cache footprint for such tasks is also reduced to the minimum. 3) Reduce size of allocated fdset. Currently two full pages are allocated, that is 32768 bits on x86 for example, and way too much. The minimum is now L1_CACHE_BYTES. UP and SMP should benefit from this patch, because most tasks will touch only one cache line when open()/close() stdin/stdout/stderr (0/1/2), (next_fd, close_on_exec_init, open_fds_init, fd_array[0 .. 2] being in the same cache line) Signed-off-by: Eric Dumazet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fcntl.c | 9 ++++----- fs/file.c | 34 ++++++++++++++-------------------- fs/open.c | 8 ++++---- include/linux/file.h | 28 ++++++++++++++++++++++++---- include/linux/init_task.h | 10 +++++----- kernel/fork.c | 8 ++++---- 6 files changed, 55 insertions(+), 42 deletions(-) (limited to 'kernel/fork.c') diff --git a/fs/fcntl.c b/fs/fcntl.c index dc4a7007f4e..03c789560fb 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -73,8 +73,8 @@ repeat: * orig_start..fdt->next_fd */ start = orig_start; - if (start < fdt->next_fd) - start = fdt->next_fd; + if (start < files->next_fd) + start = files->next_fd; newfd = start; if (start < fdt->max_fdset) { @@ -102,9 +102,8 @@ repeat: * we reacquire the fdtable pointer and use it while holding * the lock, no one can free it during that time. */ - fdt = files_fdtable(files); - if (start <= fdt->next_fd) - fdt->next_fd = newfd + 1; + if (start <= files->next_fd) + files->next_fd = newfd + 1; error = newfd; diff --git a/fs/file.c b/fs/file.c index cea7cbea11d..bbc74331473 100644 --- a/fs/file.c +++ b/fs/file.c @@ -125,7 +125,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu) kmem_cache_free(files_cachep, fdt->free_files); return; } - if (fdt->max_fdset <= __FD_SETSIZE && fdt->max_fds <= NR_OPEN_DEFAULT) { + if (fdt->max_fdset <= EMBEDDED_FD_SET_SIZE && + fdt->max_fds <= NR_OPEN_DEFAULT) { /* * The fdtable was embedded */ @@ -155,8 +156,9 @@ static void free_fdtable_rcu(struct rcu_head *rcu) void free_fdtable(struct fdtable *fdt) { - if (fdt->free_files || fdt->max_fdset > __FD_SETSIZE || - fdt->max_fds > NR_OPEN_DEFAULT) + if (fdt->free_files || + fdt->max_fdset > EMBEDDED_FD_SET_SIZE || + fdt->max_fds > NR_OPEN_DEFAULT) call_rcu(&fdt->rcu, free_fdtable_rcu); } @@ -199,7 +201,6 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *fdt) (nfdt->max_fds - fdt->max_fds) * sizeof(struct file *)); } - nfdt->next_fd = fdt->next_fd; } /* @@ -220,11 +221,9 @@ fd_set * alloc_fdset(int num) void free_fdset(fd_set *array, int num) { - int size = num / 8; - - if (num <= __FD_SETSIZE) /* Don't free an embedded fdset */ + if (num <= EMBEDDED_FD_SET_SIZE) /* Don't free an embedded fdset */ return; - else if (size <= PAGE_SIZE) + else if (num <= 8 * PAGE_SIZE) kfree(array); else vfree(array); @@ -237,22 +236,17 @@ static struct fdtable *alloc_fdtable(int nr) fd_set *new_openset = NULL, *new_execset = NULL; struct file **new_fds; - fdt = kmalloc(sizeof(*fdt), GFP_KERNEL); + fdt = kzalloc(sizeof(*fdt), GFP_KERNEL); if (!fdt) goto out; - memset(fdt, 0, sizeof(*fdt)); - nfds = __FD_SETSIZE; + nfds = 8 * L1_CACHE_BYTES; /* Expand to the max in easy steps */ - do { - if (nfds < (PAGE_SIZE * 8)) - nfds = PAGE_SIZE * 8; - else { - nfds = nfds * 2; - if (nfds > NR_OPEN) - nfds = NR_OPEN; - } - } while (nfds <= nr); + while (nfds <= nr) { + nfds = nfds * 2; + if (nfds > NR_OPEN) + nfds = NR_OPEN; + } new_openset = alloc_fdset(nfds); new_execset = alloc_fdset(nfds); diff --git a/fs/open.c b/fs/open.c index 70e0230d8e7..1091dadd6c3 100644 --- a/fs/open.c +++ b/fs/open.c @@ -973,7 +973,7 @@ repeat: fdt = files_fdtable(files); fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fdset, - fdt->next_fd); + files->next_fd); /* * N.B. For clone tasks sharing a files structure, this test @@ -998,7 +998,7 @@ repeat: FD_SET(fd, fdt->open_fds); FD_CLR(fd, fdt->close_on_exec); - fdt->next_fd = fd + 1; + files->next_fd = fd + 1; #if 1 /* Sanity check */ if (fdt->fd[fd] != NULL) { @@ -1019,8 +1019,8 @@ static void __put_unused_fd(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = files_fdtable(files); __FD_CLR(fd, fdt->open_fds); - if (fd < fdt->next_fd) - fdt->next_fd = fd; + if (fd < files->next_fd) + files->next_fd = fd; } void fastcall put_unused_fd(unsigned int fd) diff --git a/include/linux/file.h b/include/linux/file.h index 9901b850f2e..9f7c2513866 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -10,6 +10,7 @@ #include #include #include +#include /* * The default fd array needs to be at least BITS_PER_LONG, @@ -17,10 +18,22 @@ */ #define NR_OPEN_DEFAULT BITS_PER_LONG +/* + * The embedded_fd_set is a small fd_set, + * suitable for most tasks (which open <= BITS_PER_LONG files) + */ +struct embedded_fd_set { + unsigned long fds_bits[1]; +}; + +/* + * More than this number of fds: we use a separately allocated fd_set + */ +#define EMBEDDED_FD_SET_SIZE (BITS_PER_BYTE * sizeof(struct embedded_fd_set)) + struct fdtable { unsigned int max_fds; int max_fdset; - int next_fd; struct file ** fd; /* current fd array */ fd_set *close_on_exec; fd_set *open_fds; @@ -33,13 +46,20 @@ struct fdtable { * Open file table structure */ struct files_struct { + /* + * read mostly part + */ atomic_t count; struct fdtable *fdt; struct fdtable fdtab; - fd_set close_on_exec_init; - fd_set open_fds_init; + /* + * written part on a separate cache line in SMP + */ + spinlock_t file_lock ____cacheline_aligned_in_smp; + int next_fd; + struct embedded_fd_set close_on_exec_init; + struct embedded_fd_set open_fds_init; struct file * fd_array[NR_OPEN_DEFAULT]; - spinlock_t file_lock; /* Protects concurrent writers. Nests inside tsk->alloc_lock */ }; #define files_fdtable(files) (rcu_dereference((files)->fdt)) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index dcfd2ecccb5..92146f3b742 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -7,11 +7,10 @@ #define INIT_FDTABLE \ { \ .max_fds = NR_OPEN_DEFAULT, \ - .max_fdset = __FD_SETSIZE, \ - .next_fd = 0, \ + .max_fdset = EMBEDDED_FD_SET_SIZE, \ .fd = &init_files.fd_array[0], \ - .close_on_exec = &init_files.close_on_exec_init, \ - .open_fds = &init_files.open_fds_init, \ + .close_on_exec = (fd_set *)&init_files.close_on_exec_init, \ + .open_fds = (fd_set *)&init_files.open_fds_init, \ .rcu = RCU_HEAD_INIT, \ .free_files = NULL, \ .next = NULL, \ @@ -20,9 +19,10 @@ #define INIT_FILES \ { \ .count = ATOMIC_INIT(1), \ - .file_lock = SPIN_LOCK_UNLOCKED, \ .fdt = &init_files.fdtab, \ .fdtab = INIT_FDTABLE, \ + .file_lock = SPIN_LOCK_UNLOCKED, \ + .next_fd = 0, \ .close_on_exec_init = { { 0, } }, \ .open_fds_init = { { 0, } }, \ .fd_array = { NULL, } \ diff --git a/kernel/fork.c b/kernel/fork.c index 9bd7b65ee41..c79ae0b19a4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -607,12 +607,12 @@ static struct files_struct *alloc_files(void) atomic_set(&newf->count, 1); spin_lock_init(&newf->file_lock); + newf->next_fd = 0; fdt = &newf->fdtab; - fdt->next_fd = 0; fdt->max_fds = NR_OPEN_DEFAULT; - fdt->max_fdset = __FD_SETSIZE; - fdt->close_on_exec = &newf->close_on_exec_init; - fdt->open_fds = &newf->open_fds_init; + fdt->max_fdset = EMBEDDED_FD_SET_SIZE; + fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init; + fdt->open_fds = (fd_set *)&newf->open_fds_init; fdt->fd = &newf->fd_array[0]; INIT_RCU_HEAD(&fdt->rcu); fdt->free_files = NULL; -- cgit v1.2.3 From 2056a782f8e7e65fd4bfd027506b4ce1c5e9ccd4 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 23 Mar 2006 20:00:26 +0100 Subject: [PATCH] Block queue IO tracing support (blktrace) as of 2006-03-23 Signed-off-by: Jens Axboe --- block/Kconfig | 12 + block/Makefile | 2 + block/blktrace.c | 538 +++++++++++++++++++++++++++++++++++++++++++ block/elevator.c | 4 + block/ioctl.c | 6 + block/ll_rw_blk.c | 44 +++- drivers/block/cciss.c | 2 + drivers/md/dm.c | 13 +- fs/bio.c | 4 + fs/compat_ioctl.c | 1 + include/linux/blkdev.h | 3 + include/linux/blktrace_api.h | 277 ++++++++++++++++++++++ include/linux/compat_ioctl.h | 4 + include/linux/fs.h | 4 + include/linux/sched.h | 1 + kernel/fork.c | 1 + mm/highmem.c | 3 + 17 files changed, 916 insertions(+), 3 deletions(-) create mode 100644 block/blktrace.c create mode 100644 include/linux/blktrace_api.h (limited to 'kernel/fork.c') diff --git a/block/Kconfig b/block/Kconfig index 377f6dd20e1..96783645092 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -11,4 +11,16 @@ config LBD your machine, or if you want to have a raid or loopback device bigger than 2TB. Otherwise say N. +config BLK_DEV_IO_TRACE + bool "Support for tracing block io actions" + select RELAY + select DEBUG_FS + help + Say Y here, if you want to be able to trace the block layer actions + on a given queue. Tracing allows you to see any traffic happening + on a block device queue. For more information (and the user space + support tools needed), fetch the blktrace app from: + + git://brick.kernel.dk/data/git/blktrace.git + source block/Kconfig.iosched diff --git a/block/Makefile b/block/Makefile index 7e4f93e2b44..c05de0e0037 100644 --- a/block/Makefile +++ b/block/Makefile @@ -8,3 +8,5 @@ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_AS) += as-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o + +obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o diff --git a/block/blktrace.c b/block/blktrace.c new file mode 100644 index 00000000000..36f3a172275 --- /dev/null +++ b/block/blktrace.c @@ -0,0 +1,538 @@ +/* + * Copyright (C) 2006 Jens Axboe + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, }; +static unsigned int blktrace_seq __read_mostly = 1; + +/* + * Send out a notify for this process, if we haven't done so since a trace + * started + */ +static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) +{ + struct blk_io_trace *t; + + t = relay_reserve(bt->rchan, sizeof(*t) + sizeof(tsk->comm)); + if (t) { + t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; + t->device = bt->dev; + t->action = BLK_TC_ACT(BLK_TC_NOTIFY); + t->pid = tsk->pid; + t->cpu = smp_processor_id(); + t->pdu_len = sizeof(tsk->comm); + memcpy((void *) t + sizeof(*t), tsk->comm, t->pdu_len); + tsk->btrace_seq = blktrace_seq; + } +} + +static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, + pid_t pid) +{ + if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) + return 1; + if (sector < bt->start_lba || sector > bt->end_lba) + return 1; + if (bt->pid && pid != bt->pid) + return 1; + + return 0; +} + +/* + * Data direction bit lookup + */ +static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) }; + +/* + * Bio action bits of interest + */ +static u32 bio_act[3] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC) }; + +/* + * More could be added as needed, taking care to increment the decrementer + * to get correct indexing + */ +#define trace_barrier_bit(rw) \ + (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0)) +#define trace_sync_bit(rw) \ + (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1)) + +/* + * The worker for the various blk_add_trace*() types. Fills out a + * blk_io_trace structure and places it in a per-cpu subbuffer. + */ +void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, + int rw, u32 what, int error, int pdu_len, void *pdu_data) +{ + struct task_struct *tsk = current; + struct blk_io_trace *t; + unsigned long flags; + unsigned long *sequence; + pid_t pid; + int cpu; + + if (unlikely(bt->trace_state != Blktrace_running)) + return; + + what |= ddir_act[rw & WRITE]; + what |= bio_act[trace_barrier_bit(rw)]; + what |= bio_act[trace_sync_bit(rw)]; + + pid = tsk->pid; + if (unlikely(act_log_check(bt, what, sector, pid))) + return; + + /* + * A word about the locking here - we disable interrupts to reserve + * some space in the relay per-cpu buffer, to prevent an irq + * from coming in and stepping on our toes. Once reserved, it's + * enough to get preemption disabled to prevent read of this data + * before we are through filling it. get_cpu()/put_cpu() does this + * for us + */ + local_irq_save(flags); + + if (unlikely(tsk->btrace_seq != blktrace_seq)) + trace_note_tsk(bt, tsk); + + t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); + if (t) { + cpu = smp_processor_id(); + sequence = per_cpu_ptr(bt->sequence, cpu); + + t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; + t->sequence = ++(*sequence); + t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu); + t->sector = sector; + t->bytes = bytes; + t->action = what; + t->pid = pid; + t->device = bt->dev; + t->cpu = cpu; + t->error = error; + t->pdu_len = pdu_len; + + if (pdu_len) + memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); + } + + local_irq_restore(flags); +} + +EXPORT_SYMBOL_GPL(__blk_add_trace); + +static struct dentry *blk_tree_root; +static struct mutex blk_tree_mutex; +static unsigned int root_users; + +static inline void blk_remove_root(void) +{ + if (blk_tree_root) { + debugfs_remove(blk_tree_root); + blk_tree_root = NULL; + } +} + +static void blk_remove_tree(struct dentry *dir) +{ + mutex_lock(&blk_tree_mutex); + debugfs_remove(dir); + if (--root_users == 0) + blk_remove_root(); + mutex_unlock(&blk_tree_mutex); +} + +static struct dentry *blk_create_tree(const char *blk_name) +{ + struct dentry *dir = NULL; + + mutex_lock(&blk_tree_mutex); + + if (!blk_tree_root) { + blk_tree_root = debugfs_create_dir("block", NULL); + if (!blk_tree_root) + goto err; + } + + dir = debugfs_create_dir(blk_name, blk_tree_root); + if (dir) + root_users++; + else + blk_remove_root(); + +err: + mutex_unlock(&blk_tree_mutex); + return dir; +} + +static void blk_trace_cleanup(struct blk_trace *bt) +{ + relay_close(bt->rchan); + debugfs_remove(bt->dropped_file); + blk_remove_tree(bt->dir); + free_percpu(bt->sequence); + kfree(bt); +} + +static int blk_trace_remove(request_queue_t *q) +{ + struct blk_trace *bt; + + bt = xchg(&q->blk_trace, NULL); + if (!bt) + return -EINVAL; + + if (bt->trace_state == Blktrace_setup || + bt->trace_state == Blktrace_stopped) + blk_trace_cleanup(bt); + + return 0; +} + +static int blk_dropped_open(struct inode *inode, struct file *filp) +{ + filp->private_data = inode->u.generic_ip; + + return 0; +} + +static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct blk_trace *bt = filp->private_data; + char buf[16]; + + snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); + + return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); +} + +static struct file_operations blk_dropped_fops = { + .owner = THIS_MODULE, + .open = blk_dropped_open, + .read = blk_dropped_read, +}; + +/* + * Keep track of how many times we encountered a full subbuffer, to aid + * the user space app in telling how many lost events there were. + */ +static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, + void *prev_subbuf, size_t prev_padding) +{ + struct blk_trace *bt; + + if (!relay_buf_full(buf)) + return 1; + + bt = buf->chan->private_data; + atomic_inc(&bt->dropped); + return 0; +} + +static int blk_remove_buf_file_callback(struct dentry *dentry) +{ + debugfs_remove(dentry); + return 0; +} + +static struct dentry *blk_create_buf_file_callback(const char *filename, + struct dentry *parent, + int mode, + struct rchan_buf *buf, + int *is_global) +{ + return debugfs_create_file(filename, mode, parent, buf, + &relay_file_operations); +} + +static struct rchan_callbacks blk_relay_callbacks = { + .subbuf_start = blk_subbuf_start_callback, + .create_buf_file = blk_create_buf_file_callback, + .remove_buf_file = blk_remove_buf_file_callback, +}; + +/* + * Setup everything required to start tracing + */ +static int blk_trace_setup(request_queue_t *q, struct block_device *bdev, + char __user *arg) +{ + struct blk_user_trace_setup buts; + struct blk_trace *old_bt, *bt = NULL; + struct dentry *dir = NULL; + char b[BDEVNAME_SIZE]; + int ret, i; + + if (copy_from_user(&buts, arg, sizeof(buts))) + return -EFAULT; + + if (!buts.buf_size || !buts.buf_nr) + return -EINVAL; + + strcpy(buts.name, bdevname(bdev, b)); + + /* + * some device names have larger paths - convert the slashes + * to underscores for this to work as expected + */ + for (i = 0; i < strlen(buts.name); i++) + if (buts.name[i] == '/') + buts.name[i] = '_'; + + if (copy_to_user(arg, &buts, sizeof(buts))) + return -EFAULT; + + ret = -ENOMEM; + bt = kzalloc(sizeof(*bt), GFP_KERNEL); + if (!bt) + goto err; + + bt->sequence = alloc_percpu(unsigned long); + if (!bt->sequence) + goto err; + + ret = -ENOENT; + dir = blk_create_tree(buts.name); + if (!dir) + goto err; + + bt->dir = dir; + bt->dev = bdev->bd_dev; + atomic_set(&bt->dropped, 0); + + ret = -EIO; + bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops); + if (!bt->dropped_file) + goto err; + + bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks); + if (!bt->rchan) + goto err; + bt->rchan->private_data = bt; + + bt->act_mask = buts.act_mask; + if (!bt->act_mask) + bt->act_mask = (u16) -1; + + bt->start_lba = buts.start_lba; + bt->end_lba = buts.end_lba; + if (!bt->end_lba) + bt->end_lba = -1ULL; + + bt->pid = buts.pid; + bt->trace_state = Blktrace_setup; + + ret = -EBUSY; + old_bt = xchg(&q->blk_trace, bt); + if (old_bt) { + (void) xchg(&q->blk_trace, old_bt); + goto err; + } + + return 0; +err: + if (dir) + blk_remove_tree(dir); + if (bt) { + if (bt->dropped_file) + debugfs_remove(bt->dropped_file); + if (bt->sequence) + free_percpu(bt->sequence); + if (bt->rchan) + relay_close(bt->rchan); + kfree(bt); + } + return ret; +} + +static int blk_trace_startstop(request_queue_t *q, int start) +{ + struct blk_trace *bt; + int ret; + + if ((bt = q->blk_trace) == NULL) + return -EINVAL; + + /* + * For starting a trace, we can transition from a setup or stopped + * trace. For stopping a trace, the state must be running + */ + ret = -EINVAL; + if (start) { + if (bt->trace_state == Blktrace_setup || + bt->trace_state == Blktrace_stopped) { + blktrace_seq++; + smp_mb(); + bt->trace_state = Blktrace_running; + ret = 0; + } + } else { + if (bt->trace_state == Blktrace_running) { + bt->trace_state = Blktrace_stopped; + relay_flush(bt->rchan); + ret = 0; + } + } + + return ret; +} + +/** + * blk_trace_ioctl: - handle the ioctls associated with tracing + * @bdev: the block device + * @cmd: the ioctl cmd + * @arg: the argument data, if any + * + **/ +int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) +{ + request_queue_t *q; + int ret, start = 0; + + q = bdev_get_queue(bdev); + if (!q) + return -ENXIO; + + mutex_lock(&bdev->bd_mutex); + + switch (cmd) { + case BLKTRACESETUP: + ret = blk_trace_setup(q, bdev, arg); + break; + case BLKTRACESTART: + start = 1; + case BLKTRACESTOP: + ret = blk_trace_startstop(q, start); + break; + case BLKTRACETEARDOWN: + ret = blk_trace_remove(q); + break; + default: + ret = -ENOTTY; + break; + } + + mutex_unlock(&bdev->bd_mutex); + return ret; +} + +/** + * blk_trace_shutdown: - stop and cleanup trace structures + * @q: the request queue associated with the device + * + **/ +void blk_trace_shutdown(request_queue_t *q) +{ + blk_trace_startstop(q, 0); + blk_trace_remove(q); +} + +/* + * Average offset over two calls to sched_clock() with a gettimeofday() + * in the middle + */ +static void blk_check_time(unsigned long long *t) +{ + unsigned long long a, b; + struct timeval tv; + + a = sched_clock(); + do_gettimeofday(&tv); + b = sched_clock(); + + *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000; + *t -= (a + b) / 2; +} + +static void blk_trace_check_cpu_time(void *data) +{ + unsigned long long *t; + int cpu = get_cpu(); + + t = &per_cpu(blk_trace_cpu_offset, cpu); + + /* + * Just call it twice, hopefully the second call will be cache hot + * and a little more precise + */ + blk_check_time(t); + blk_check_time(t); + + put_cpu(); +} + +/* + * Call blk_trace_check_cpu_time() on each CPU to calibrate our inter-CPU + * timings + */ +static void blk_trace_calibrate_offsets(void) +{ + unsigned long flags; + + smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1); + local_irq_save(flags); + blk_trace_check_cpu_time(NULL); + local_irq_restore(flags); +} + +static void blk_trace_set_ht_offsets(void) +{ +#if defined(CONFIG_SCHED_SMT) + int cpu, i; + + /* + * now make sure HT siblings have the same time offset + */ + preempt_disable(); + for_each_online_cpu(cpu) { + unsigned long long *cpu_off, *sibling_off; + + for_each_cpu_mask(i, cpu_sibling_map[cpu]) { + if (i == cpu) + continue; + + cpu_off = &per_cpu(blk_trace_cpu_offset, cpu); + sibling_off = &per_cpu(blk_trace_cpu_offset, i); + *sibling_off = *cpu_off; + } + } + preempt_enable(); +#endif +} + +static __init int blk_trace_init(void) +{ + mutex_init(&blk_tree_mutex); + blk_trace_calibrate_offsets(); + blk_trace_set_ht_offsets(); + + return 0; +} + +module_init(blk_trace_init); + diff --git a/block/elevator.c b/block/elevator.c index db3d0d8296a..5e558c4689a 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -33,6 +33,7 @@ #include #include #include +#include #include @@ -333,6 +334,8 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) struct list_head *pos; unsigned ordseq; + blk_add_trace_rq(q, rq, BLK_TA_INSERT); + rq->q = q; switch (where) { @@ -499,6 +502,7 @@ struct request *elv_next_request(request_queue_t *q) * not be passed by new incoming requests */ rq->flags |= REQ_STARTED; + blk_add_trace_rq(q, rq, BLK_TA_ISSUE); } if (!q->boundary_rq || q->boundary_rq == rq) { diff --git a/block/ioctl.c b/block/ioctl.c index 35fdb7dc651..9cfa2e1ecb2 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -5,6 +5,7 @@ #include #include #include +#include #include static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) @@ -189,6 +190,11 @@ static int blkdev_locked_ioctl(struct file *file, struct block_device *bdev, return put_ulong(arg, bdev->bd_inode->i_size >> 9); case BLKGETSIZE64: return put_u64(arg, bdev->bd_inode->i_size); + case BLKTRACESTART: + case BLKTRACESTOP: + case BLKTRACESETUP: + case BLKTRACETEARDOWN: + return blk_trace_ioctl(bdev, cmd, (char __user *) arg); } return -ENOIOCTLCMD; } diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 6c793b196aa..062067fa7ea 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -28,6 +28,7 @@ #include #include #include +#include /* * for max sense size @@ -1556,8 +1557,10 @@ void blk_plug_device(request_queue_t *q) if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) return; - if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) + if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); + blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); + } } EXPORT_SYMBOL(blk_plug_device); @@ -1621,14 +1624,21 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, /* * devices don't necessarily have an ->unplug_fn defined */ - if (q->unplug_fn) + if (q->unplug_fn) { + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, + q->rq.count[READ] + q->rq.count[WRITE]); + q->unplug_fn(q); + } } static void blk_unplug_work(void *data) { request_queue_t *q = data; + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, + q->rq.count[READ] + q->rq.count[WRITE]); + q->unplug_fn(q); } @@ -1636,6 +1646,9 @@ static void blk_unplug_timeout(unsigned long data) { request_queue_t *q = (request_queue_t *)data; + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, + q->rq.count[READ] + q->rq.count[WRITE]); + kblockd_schedule_work(&q->unplug_work); } @@ -1753,6 +1766,9 @@ static void blk_release_queue(struct kobject *kobj) if (q->queue_tags) __blk_queue_free_tags(q); + if (q->blk_trace) + blk_trace_shutdown(q); + kmem_cache_free(requestq_cachep, q); } @@ -2129,6 +2145,8 @@ rq_starved: rq_init(q, rq); rq->rl = rl; + + blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); out: return rq; } @@ -2157,6 +2175,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw, if (!rq) { struct io_context *ioc; + blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); + __generic_unplug_device(q); spin_unlock_irq(q->queue_lock); io_schedule(); @@ -2210,6 +2230,8 @@ EXPORT_SYMBOL(blk_get_request); */ void blk_requeue_request(request_queue_t *q, struct request *rq) { + blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); + if (blk_rq_tagged(rq)) blk_queue_end_tag(q, rq); @@ -2844,6 +2866,8 @@ static int __make_request(request_queue_t *q, struct bio *bio) if (!q->back_merge_fn(q, req, bio)) break; + blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); + req->biotail->bi_next = bio; req->biotail = bio; req->nr_sectors = req->hard_nr_sectors += nr_sectors; @@ -2859,6 +2883,8 @@ static int __make_request(request_queue_t *q, struct bio *bio) if (!q->front_merge_fn(q, req, bio)) break; + blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); + bio->bi_next = req->bio; req->bio = bio; @@ -2976,6 +3002,7 @@ void generic_make_request(struct bio *bio) request_queue_t *q; sector_t maxsector; int ret, nr_sectors = bio_sectors(bio); + dev_t old_dev; might_sleep(); /* Test device or partition size, when known. */ @@ -3002,6 +3029,8 @@ void generic_make_request(struct bio *bio) * NOTE: we don't repeat the blk_size check for each new device. * Stacking drivers are expected to know what they are doing. */ + maxsector = -1; + old_dev = 0; do { char b[BDEVNAME_SIZE]; @@ -3034,6 +3063,15 @@ end_io: */ blk_partition_remap(bio); + if (maxsector != -1) + blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, + maxsector); + + blk_add_trace_bio(q, bio, BLK_TA_QUEUE); + + maxsector = bio->bi_sector; + old_dev = bio->bi_bdev->bd_dev; + ret = q->make_request_fn(q, bio); } while (ret); } @@ -3153,6 +3191,8 @@ static int __end_that_request_first(struct request *req, int uptodate, int total_bytes, bio_nbytes, error, next_idx = 0; struct bio *bio; + blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); + /* * extend uptodate bool to allow < 0 value to be direct io error */ diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index e29b8926f80..1f2890989b5 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -2331,6 +2332,7 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd, cmd->rq->completion_data = cmd; cmd->rq->errors = status; + blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE); blk_complete_request(cmd->rq); } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 26b08ee425c..8c82373f7ff 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -17,6 +17,7 @@ #include #include #include +#include static const char *_name = DM_NAME; @@ -334,6 +335,8 @@ static void dec_pending(struct dm_io *io, int error) /* nudge anyone waiting on suspend queue */ wake_up(&io->md->wait); + blk_add_trace_bio(io->md->queue, io->bio, BLK_TA_COMPLETE); + bio_endio(io->bio, io->bio->bi_size, io->error); free_io(io->md, io); } @@ -392,6 +395,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, struct target_io *tio) { int r; + sector_t sector; /* * Sanity checks. @@ -407,10 +411,17 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, * this io. */ atomic_inc(&tio->io->io_count); + sector = clone->bi_sector; r = ti->type->map(ti, clone, &tio->info); - if (r > 0) + if (r > 0) { /* the bio has been remapped so dispatch it */ + + blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, + tio->io->bio->bi_bdev->bd_dev, sector, + clone->bi_sector); + generic_make_request(clone); + } else if (r < 0) { /* error the io and bail out */ diff --git a/fs/bio.c b/fs/bio.c index 8f1d2e815c9..0a8c59cb68f 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -25,6 +25,7 @@ #include #include #include +#include #include /* for struct sg_iovec */ #define BIO_POOL_SIZE 256 @@ -1095,6 +1096,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) if (!bp) return bp; + blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi, + bi->bi_sector + first_sectors); + BUG_ON(bi->bi_vcnt != 1); BUG_ON(bi->bi_idx != 0); atomic_set(&bp->cnt, 3); diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index c666769a875..7c031f00fd7 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -72,6 +72,7 @@ #include #include #include +#include #include /* siocdevprivate_ioctl */ #include diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 56bb6a4e15f..c179966f1a2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -22,6 +22,7 @@ typedef struct request_queue request_queue_t; struct elevator_queue; typedef struct elevator_queue elevator_t; struct request_pm_state; +struct blk_trace; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ @@ -416,6 +417,8 @@ struct request_queue unsigned int sg_reserved_size; int node; + struct blk_trace *blk_trace; + /* * reserved for flush operations */ diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h new file mode 100644 index 00000000000..b34d3e73d5e --- /dev/null +++ b/include/linux/blktrace_api.h @@ -0,0 +1,277 @@ +#ifndef BLKTRACE_H +#define BLKTRACE_H + +#include +#include +#include + +/* + * Trace categories + */ +enum blktrace_cat { + BLK_TC_READ = 1 << 0, /* reads */ + BLK_TC_WRITE = 1 << 1, /* writes */ + BLK_TC_BARRIER = 1 << 2, /* barrier */ + BLK_TC_SYNC = 1 << 3, /* barrier */ + BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ + BLK_TC_REQUEUE = 1 << 5, /* requeueing */ + BLK_TC_ISSUE = 1 << 6, /* issue */ + BLK_TC_COMPLETE = 1 << 7, /* completions */ + BLK_TC_FS = 1 << 8, /* fs requests */ + BLK_TC_PC = 1 << 9, /* pc requests */ + BLK_TC_NOTIFY = 1 << 10, /* special message */ + + BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ +}; + +#define BLK_TC_SHIFT (16) +#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT) + +/* + * Basic trace actions + */ +enum blktrace_act { + __BLK_TA_QUEUE = 1, /* queued */ + __BLK_TA_BACKMERGE, /* back merged to existing rq */ + __BLK_TA_FRONTMERGE, /* front merge to existing rq */ + __BLK_TA_GETRQ, /* allocated new request */ + __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */ + __BLK_TA_REQUEUE, /* request requeued */ + __BLK_TA_ISSUE, /* sent to driver */ + __BLK_TA_COMPLETE, /* completed by driver */ + __BLK_TA_PLUG, /* queue was plugged */ + __BLK_TA_UNPLUG_IO, /* queue was unplugged by io */ + __BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */ + __BLK_TA_INSERT, /* insert request */ + __BLK_TA_SPLIT, /* bio was split */ + __BLK_TA_BOUNCE, /* bio was bounced */ + __BLK_TA_REMAP, /* bio was remapped */ +}; + +/* + * Trace actions in full. Additionally, read or write is masked + */ +#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE)) +#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE)) +#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE)) +#define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_SPLIT (__BLK_TA_SPLIT) +#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE) +#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE)) + +#define BLK_IO_TRACE_MAGIC 0x65617400 +#define BLK_IO_TRACE_VERSION 0x07 + +/* + * The trace itself + */ +struct blk_io_trace { + u32 magic; /* MAGIC << 8 | version */ + u32 sequence; /* event number */ + u64 time; /* in microseconds */ + u64 sector; /* disk offset */ + u32 bytes; /* transfer length */ + u32 action; /* what happened */ + u32 pid; /* who did it */ + u32 device; /* device number */ + u32 cpu; /* on what cpu did it happen */ + u16 error; /* completion error */ + u16 pdu_len; /* length of data after this trace */ +}; + +/* + * The remap event + */ +struct blk_io_trace_remap { + u32 device; + u32 __pad; + u64 sector; +}; + +enum { + Blktrace_setup = 1, + Blktrace_running, + Blktrace_stopped, +}; + +struct blk_trace { + int trace_state; + struct rchan *rchan; + unsigned long *sequence; + u16 act_mask; + u64 start_lba; + u64 end_lba; + u32 pid; + u32 dev; + struct dentry *dir; + struct dentry *dropped_file; + atomic_t dropped; +}; + +/* + * User setup structure passed with BLKTRACESTART + */ +struct blk_user_trace_setup { + char name[BDEVNAME_SIZE]; /* output */ + u16 act_mask; /* input */ + u32 buf_size; /* input */ + u32 buf_nr; /* input */ + u64 start_lba; + u64 end_lba; + u32 pid; +}; + +#if defined(CONFIG_BLK_DEV_IO_TRACE) +extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); +extern void blk_trace_shutdown(request_queue_t *); +extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); + +/** + * blk_add_trace_rq - Add a trace for a request oriented action + * @q: queue the io is for + * @rq: the source request + * @what: the action + * + * Description: + * Records an action against a request. Will log the bio offset + size. + * + **/ +static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, + u32 what) +{ + struct blk_trace *bt = q->blk_trace; + int rw = rq->flags & 0x07; + + if (likely(!bt)) + return; + + if (blk_pc_request(rq)) { + what |= BLK_TC_ACT(BLK_TC_PC); + __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); + } else { + what |= BLK_TC_ACT(BLK_TC_FS); + __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL); + } +} + +/** + * blk_add_trace_bio - Add a trace for a bio oriented action + * @q: queue the io is for + * @bio: the source bio + * @what: the action + * + * Description: + * Records an action against a bio. Will log the bio offset + size. + * + **/ +static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio, + u32 what) +{ + struct blk_trace *bt = q->blk_trace; + + if (likely(!bt)) + return; + + __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL); +} + +/** + * blk_add_trace_generic - Add a trace for a generic action + * @q: queue the io is for + * @bio: the source bio + * @rw: the data direction + * @what: the action + * + * Description: + * Records a simple trace + * + **/ +static inline void blk_add_trace_generic(struct request_queue *q, + struct bio *bio, int rw, u32 what) +{ + struct blk_trace *bt = q->blk_trace; + + if (likely(!bt)) + return; + + if (bio) + blk_add_trace_bio(q, bio, what); + else + __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL); +} + +/** + * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload + * @q: queue the io is for + * @what: the action + * @bio: the source bio + * @pdu: the integer payload + * + * Description: + * Adds a trace with some integer payload. This might be an unplug + * option given as the action, with the depth at unplug time given + * as the payload + * + **/ +static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what, + struct bio *bio, unsigned int pdu) +{ + struct blk_trace *bt = q->blk_trace; + u64 rpdu = cpu_to_be64(pdu); + + if (likely(!bt)) + return; + + if (bio) + __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu); + else + __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); +} + +/** + * blk_add_trace_remap - Add a trace for a remap operation + * @q: queue the io is for + * @bio: the source bio + * @dev: target device + * @from: source sector + * @to: target sector + * + * Description: + * Device mapper or raid target sometimes need to split a bio because + * it spans a stripe (or similar). Add a trace for that action. + * + **/ +static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio, + dev_t dev, sector_t from, sector_t to) +{ + struct blk_trace *bt = q->blk_trace; + struct blk_io_trace_remap r; + + if (likely(!bt)) + return; + + r.device = cpu_to_be32(dev); + r.sector = cpu_to_be64(to); + + __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); +} + +#else /* !CONFIG_BLK_DEV_IO_TRACE */ +#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) +#define blk_trace_shutdown(q) do { } while (0) +#define blk_add_trace_rq(q, rq, what) do { } while (0) +#define blk_add_trace_bio(q, rq, what) do { } while (0) +#define blk_add_trace_generic(q, rq, rw, what) do { } while (0) +#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0) +#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0) +#endif /* CONFIG_BLK_DEV_IO_TRACE */ + +#endif diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h index ae7dfb790df..efb518f16bb 100644 --- a/include/linux/compat_ioctl.h +++ b/include/linux/compat_ioctl.h @@ -97,6 +97,10 @@ COMPATIBLE_IOCTL(BLKRRPART) COMPATIBLE_IOCTL(BLKFLSBUF) COMPATIBLE_IOCTL(BLKSECTSET) COMPATIBLE_IOCTL(BLKSSZGET) +COMPATIBLE_IOCTL(BLKTRACESTART) +COMPATIBLE_IOCTL(BLKTRACESTOP) +COMPATIBLE_IOCTL(BLKTRACESETUP) +COMPATIBLE_IOCTL(BLKTRACETEARDOWN) ULONG_IOCTL(BLKRASET) ULONG_IOCTL(BLKFRASET) /* RAID */ diff --git a/include/linux/fs.h b/include/linux/fs.h index f9c9dea636d..9b34a1b0345 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -197,6 +197,10 @@ extern int dir_notify_enable; #define BLKBSZGET _IOR(0x12,112,size_t) #define BLKBSZSET _IOW(0x12,113,size_t) #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ +#define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup) +#define BLKTRACESTART _IO(0x12,116) +#define BLKTRACESTOP _IO(0x12,117) +#define BLKTRACETEARDOWN _IO(0x12,118) #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ #define FIBMAP _IO(0x00,1) /* bmap access */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 62e6314382f..e60a91d5b36 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -706,6 +706,7 @@ struct task_struct { prio_array_t *array; unsigned short ioprio; + unsigned int btrace_seq; unsigned long sleep_avg; unsigned long long timestamp, last_ran; diff --git a/kernel/fork.c b/kernel/fork.c index c79ae0b19a4..c21bae8c93b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -181,6 +181,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) /* One for us, one for whoever does the "release_task()" (usually parent) */ atomic_set(&tsk->usage,2); atomic_set(&tsk->fs_excl, 0); + tsk->btrace_seq = 0; return tsk; } diff --git a/mm/highmem.c b/mm/highmem.c index ce2e7e8bbfa..d0ea1eec6a9 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -26,6 +26,7 @@ #include #include #include +#include #include static mempool_t *page_pool, *isa_page_pool; @@ -483,6 +484,8 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) pool = isa_page_pool; } + blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE); + /* * slow path */ -- cgit v1.2.3