diff options
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r-- | drivers/dma-buf/Kconfig | 11 | ||||
-rw-r--r-- | drivers/dma-buf/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma-buf/dma-buf-sysfs-stats.c | 337 | ||||
-rw-r--r-- | drivers/dma-buf/dma-buf-sysfs-stats.h | 62 | ||||
-rw-r--r-- | drivers/dma-buf/dma-buf.c | 40 | ||||
-rw-r--r-- | drivers/dma-buf/dma-fence-chain.c | 2 | ||||
-rw-r--r-- | drivers/dma-buf/dma-resv.c | 33 | ||||
-rw-r--r-- | drivers/dma-buf/st-dma-fence-chain.c | 16 | ||||
-rw-r--r-- | drivers/dma-buf/udmabuf.c | 59 |
9 files changed, 518 insertions, 43 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 4e16c71c24b7..9561e3d2d428 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -72,6 +72,17 @@ menuconfig DMABUF_HEAPS allows userspace to allocate dma-bufs that can be shared between drivers. +menuconfig DMABUF_SYSFS_STATS + bool "DMA-BUF sysfs statistics" + select DMA_SHARED_BUFFER + help + Choose this option to enable DMA-BUF sysfs statistics + in location /sys/kernel/dmabuf/buffers. + + /sys/kernel/dmabuf/buffers/<inode_number> will contain + statistics for the DMA-BUF with the unique inode number + <inode_number>. + source "drivers/dma-buf/heaps/Kconfig" endmenu diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 995e05f609ff..40d81f23cacf 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_DMABUF_HEAPS) += heaps/ obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o +obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o dmabuf_selftests-y := \ selftest.o \ diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c new file mode 100644 index 000000000000..a2638e84199c --- /dev/null +++ b/drivers/dma-buf/dma-buf-sysfs-stats.c @@ -0,0 +1,337 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * DMA-BUF sysfs statistics. + * + * Copyright (C) 2021 Google LLC. + */ + +#include <linux/dma-buf.h> +#include <linux/dma-resv.h> +#include <linux/kobject.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/sysfs.h> + +#include "dma-buf-sysfs-stats.h" + +#define to_dma_buf_entry_from_kobj(x) container_of(x, struct dma_buf_sysfs_entry, kobj) + +/** + * DOC: overview + * + * ``/sys/kernel/debug/dma_buf/bufinfo`` provides an overview of every DMA-BUF + * in the system. However, since debugfs is not safe to be mounted in + * production, procfs and sysfs can be used to gather DMA-BUF statistics on + * production systems. + * + * The ``/proc/<pid>/fdinfo/<fd>`` files in procfs can be used to gather + * information about DMA-BUF fds. Detailed documentation about the interface + * is present in Documentation/filesystems/proc.rst. + * + * Unfortunately, the existing procfs interfaces can only provide information + * about the DMA-BUFs for which processes hold fds or have the buffers mmapped + * into their address space. This necessitated the creation of the DMA-BUF sysfs + * statistics interface to provide per-buffer information on production systems. + * + * The interface at ``/sys/kernel/dma-buf/buffers`` exposes information about + * every DMA-BUF when ``CONFIG_DMABUF_SYSFS_STATS`` is enabled. + * + * The following stats are exposed by the interface: + * + * * ``/sys/kernel/dmabuf/buffers/<inode_number>/exporter_name`` + * * ``/sys/kernel/dmabuf/buffers/<inode_number>/size`` + * * ``/sys/kernel/dmabuf/buffers/<inode_number>/attachments/<attach_uid>/device`` + * * ``/sys/kernel/dmabuf/buffers/<inode_number>/attachments/<attach_uid>/map_counter`` + * + * The information in the interface can also be used to derive per-exporter and + * per-device usage statistics. The data from the interface can be gathered + * on error conditions or other important events to provide a snapshot of + * DMA-BUF usage. It can also be collected periodically by telemetry to monitor + * various metrics. + * + * Detailed documentation about the interface is present in + * Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers. + */ + +struct dma_buf_stats_attribute { + struct attribute attr; + ssize_t (*show)(struct dma_buf *dmabuf, + struct dma_buf_stats_attribute *attr, char *buf); +}; +#define to_dma_buf_stats_attr(x) container_of(x, struct dma_buf_stats_attribute, attr) + +static ssize_t dma_buf_stats_attribute_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct dma_buf_stats_attribute *attribute; + struct dma_buf_sysfs_entry *sysfs_entry; + struct dma_buf *dmabuf; + + attribute = to_dma_buf_stats_attr(attr); + sysfs_entry = to_dma_buf_entry_from_kobj(kobj); + dmabuf = sysfs_entry->dmabuf; + + if (!dmabuf || !attribute->show) + return -EIO; + + return attribute->show(dmabuf, attribute, buf); +} + +static const struct sysfs_ops dma_buf_stats_sysfs_ops = { + .show = dma_buf_stats_attribute_show, +}; + +static ssize_t exporter_name_show(struct dma_buf *dmabuf, + struct dma_buf_stats_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", dmabuf->exp_name); +} + +static ssize_t size_show(struct dma_buf *dmabuf, + struct dma_buf_stats_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%zu\n", dmabuf->size); +} + +static struct dma_buf_stats_attribute exporter_name_attribute = + __ATTR_RO(exporter_name); +static struct dma_buf_stats_attribute size_attribute = __ATTR_RO(size); + +static struct attribute *dma_buf_stats_default_attrs[] = { + &exporter_name_attribute.attr, + &size_attribute.attr, + NULL, +}; +ATTRIBUTE_GROUPS(dma_buf_stats_default); + +static void dma_buf_sysfs_release(struct kobject *kobj) +{ + struct dma_buf_sysfs_entry *sysfs_entry; + + sysfs_entry = to_dma_buf_entry_from_kobj(kobj); + kfree(sysfs_entry); +} + +static struct kobj_type dma_buf_ktype = { + .sysfs_ops = &dma_buf_stats_sysfs_ops, + .release = dma_buf_sysfs_release, + .default_groups = dma_buf_stats_default_groups, +}; + +#define to_dma_buf_attach_entry_from_kobj(x) container_of(x, struct dma_buf_attach_sysfs_entry, kobj) + +struct dma_buf_attach_stats_attribute { + struct attribute attr; + ssize_t (*show)(struct dma_buf_attach_sysfs_entry *sysfs_entry, + struct dma_buf_attach_stats_attribute *attr, char *buf); +}; +#define to_dma_buf_attach_stats_attr(x) container_of(x, struct dma_buf_attach_stats_attribute, attr) + +static ssize_t dma_buf_attach_stats_attribute_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct dma_buf_attach_stats_attribute *attribute; + struct dma_buf_attach_sysfs_entry *sysfs_entry; + + attribute = to_dma_buf_attach_stats_attr(attr); + sysfs_entry = to_dma_buf_attach_entry_from_kobj(kobj); + + if (!attribute->show) + return -EIO; + + return attribute->show(sysfs_entry, attribute, buf); +} + +static const struct sysfs_ops dma_buf_attach_stats_sysfs_ops = { + .show = dma_buf_attach_stats_attribute_show, +}; + +static ssize_t map_counter_show(struct dma_buf_attach_sysfs_entry *sysfs_entry, + struct dma_buf_attach_stats_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%u\n", sysfs_entry->map_counter); +} + +static struct dma_buf_attach_stats_attribute map_counter_attribute = + __ATTR_RO(map_counter); + +static struct attribute *dma_buf_attach_stats_default_attrs[] = { + &map_counter_attribute.attr, + NULL, +}; +ATTRIBUTE_GROUPS(dma_buf_attach_stats_default); + +static void dma_buf_attach_sysfs_release(struct kobject *kobj) +{ + struct dma_buf_attach_sysfs_entry *sysfs_entry; + + sysfs_entry = to_dma_buf_attach_entry_from_kobj(kobj); + kfree(sysfs_entry); +} + +static struct kobj_type dma_buf_attach_ktype = { + .sysfs_ops = &dma_buf_attach_stats_sysfs_ops, + .release = dma_buf_attach_sysfs_release, + .default_groups = dma_buf_attach_stats_default_groups, +}; + +void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach) +{ + struct dma_buf_attach_sysfs_entry *sysfs_entry; + + sysfs_entry = attach->sysfs_entry; + if (!sysfs_entry) + return; + + sysfs_delete_link(&sysfs_entry->kobj, &attach->dev->kobj, "device"); + + kobject_del(&sysfs_entry->kobj); + kobject_put(&sysfs_entry->kobj); +} + +int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach, + unsigned int uid) +{ + struct dma_buf_attach_sysfs_entry *sysfs_entry; + int ret; + struct dma_buf *dmabuf; + + if (!attach) + return -EINVAL; + + dmabuf = attach->dmabuf; + + sysfs_entry = kzalloc(sizeof(struct dma_buf_attach_sysfs_entry), + GFP_KERNEL); + if (!sysfs_entry) + return -ENOMEM; + + sysfs_entry->kobj.kset = dmabuf->sysfs_entry->attach_stats_kset; + + attach->sysfs_entry = sysfs_entry; + + ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_attach_ktype, + NULL, "%u", uid); + if (ret) + goto kobj_err; + + ret = sysfs_create_link(&sysfs_entry->kobj, &attach->dev->kobj, + "device"); + if (ret) + goto link_err; + + return 0; + +link_err: + kobject_del(&sysfs_entry->kobj); +kobj_err: + kobject_put(&sysfs_entry->kobj); + attach->sysfs_entry = NULL; + + return ret; +} +void dma_buf_stats_teardown(struct dma_buf *dmabuf) +{ + struct dma_buf_sysfs_entry *sysfs_entry; + + sysfs_entry = dmabuf->sysfs_entry; + if (!sysfs_entry) + return; + + kset_unregister(sysfs_entry->attach_stats_kset); + kobject_del(&sysfs_entry->kobj); + kobject_put(&sysfs_entry->kobj); +} + + +/* Statistics files do not need to send uevents. */ +static int dmabuf_sysfs_uevent_filter(struct kset *kset, struct kobject *kobj) +{ + return 0; +} + +static const struct kset_uevent_ops dmabuf_sysfs_no_uevent_ops = { + .filter = dmabuf_sysfs_uevent_filter, +}; + +static struct kset *dma_buf_stats_kset; +static struct kset *dma_buf_per_buffer_stats_kset; +int dma_buf_init_sysfs_statistics(void) +{ + dma_buf_stats_kset = kset_create_and_add("dmabuf", + &dmabuf_sysfs_no_uevent_ops, + kernel_kobj); + if (!dma_buf_stats_kset) + return -ENOMEM; + + dma_buf_per_buffer_stats_kset = kset_create_and_add("buffers", + &dmabuf_sysfs_no_uevent_ops, + &dma_buf_stats_kset->kobj); + if (!dma_buf_per_buffer_stats_kset) { + kset_unregister(dma_buf_stats_kset); + return -ENOMEM; + } + + return 0; +} + +void dma_buf_uninit_sysfs_statistics(void) +{ + kset_unregister(dma_buf_per_buffer_stats_kset); + kset_unregister(dma_buf_stats_kset); +} + +int dma_buf_stats_setup(struct dma_buf *dmabuf) +{ + struct dma_buf_sysfs_entry *sysfs_entry; + int ret; + struct kset *attach_stats_kset; + + if (!dmabuf || !dmabuf->file) + return -EINVAL; + + if (!dmabuf->exp_name) { + pr_err("exporter name must not be empty if stats needed\n"); + return -EINVAL; + } + + sysfs_entry = kzalloc(sizeof(struct dma_buf_sysfs_entry), GFP_KERNEL); + if (!sysfs_entry) + return -ENOMEM; + + sysfs_entry->kobj.kset = dma_buf_per_buffer_stats_kset; + sysfs_entry->dmabuf = dmabuf; + + dmabuf->sysfs_entry = sysfs_entry; + + /* create the directory for buffer stats */ + ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL, + "%lu", file_inode(dmabuf->file)->i_ino); + if (ret) + goto err_sysfs_dmabuf; + + /* create the directory for attachment stats */ + attach_stats_kset = kset_create_and_add("attachments", + &dmabuf_sysfs_no_uevent_ops, + &sysfs_entry->kobj); + if (!attach_stats_kset) { + ret = -ENOMEM; + goto err_sysfs_attach; + } + + sysfs_entry->attach_stats_kset = attach_stats_kset; + + return 0; + +err_sysfs_attach: + kobject_del(&sysfs_entry->kobj); +err_sysfs_dmabuf: + kobject_put(&sysfs_entry->kobj); + dmabuf->sysfs_entry = NULL; + return ret; +} diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.h b/drivers/dma-buf/dma-buf-sysfs-stats.h new file mode 100644 index 000000000000..5f4703249117 --- /dev/null +++ b/drivers/dma-buf/dma-buf-sysfs-stats.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * DMA-BUF sysfs statistics. + * + * Copyright (C) 2021 Google LLC. + */ + +#ifndef _DMA_BUF_SYSFS_STATS_H +#define _DMA_BUF_SYSFS_STATS_H + +#ifdef CONFIG_DMABUF_SYSFS_STATS + +int dma_buf_init_sysfs_statistics(void); +void dma_buf_uninit_sysfs_statistics(void); + +int dma_buf_stats_setup(struct dma_buf *dmabuf); +int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach, + unsigned int uid); +static inline void dma_buf_update_attachment_map_count(struct dma_buf_attachment *attach, + int delta) +{ + struct dma_buf_attach_sysfs_entry *entry = attach->sysfs_entry; + + entry->map_counter += delta; +} +void dma_buf_stats_teardown(struct dma_buf *dmabuf); +void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach); +static inline unsigned int dma_buf_update_attach_uid(struct dma_buf *dmabuf) +{ + struct dma_buf_sysfs_entry *entry = dmabuf->sysfs_entry; + + return entry->attachment_uid++; +} +#else + +static inline int dma_buf_init_sysfs_statistics(void) +{ + return 0; +} + +static inline void dma_buf_uninit_sysfs_statistics(void) {} + +static inline int dma_buf_stats_setup(struct dma_buf *dmabuf) +{ + return 0; +} +static inline int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach, + unsigned int uid) +{ + return 0; +} + +static inline void dma_buf_stats_teardown(struct dma_buf *dmabuf) {} +static inline void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach) {} +static inline void dma_buf_update_attachment_map_count(struct dma_buf_attachment *attach, + int delta) {} +static inline unsigned int dma_buf_update_attach_uid(struct dma_buf *dmabuf) +{ + return 0; +} +#endif +#endif // _DMA_BUF_SYSFS_STATS_H diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 511fe0d217a0..510b42771974 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -29,6 +29,8 @@ #include <uapi/linux/dma-buf.h> #include <uapi/linux/magic.h> +#include "dma-buf-sysfs-stats.h" + static inline int is_dma_buf_file(struct file *); struct dma_buf_list { @@ -79,6 +81,7 @@ static void dma_buf_release(struct dentry *dentry) if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) dma_resv_fini(dmabuf->resv); + dma_buf_stats_teardown(dmabuf); module_put(dmabuf->owner); kfree(dmabuf->name); kfree(dmabuf); @@ -580,6 +583,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) file->f_mode |= FMODE_LSEEK; dmabuf->file = file; + ret = dma_buf_stats_setup(dmabuf); + if (ret) + goto err_sysfs; + mutex_init(&dmabuf->lock); INIT_LIST_HEAD(&dmabuf->attachments); @@ -589,6 +596,14 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) return dmabuf; +err_sysfs: + /* + * Set file->f_path.dentry->d_fsdata to NULL so that when + * dma_buf_release() gets invoked by dentry_ops, it exits + * early before calling the release() dma_buf op. + */ + file->f_path.dentry->d_fsdata = NULL; + fput(file); err_dmabuf: kfree(dmabuf); err_module: @@ -723,6 +738,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, { struct dma_buf_attachment *attach; int ret; + unsigned int attach_uid; if (WARN_ON(!dmabuf || !dev)) return ERR_PTR(-EINVAL); @@ -748,8 +764,13 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, } dma_resv_lock(dmabuf->resv, NULL); list_add(&attach->node, &dmabuf->attachments); + attach_uid = dma_buf_update_attach_uid(dmabuf); dma_resv_unlock(dmabuf->resv); + ret = dma_buf_attach_stats_setup(attach, attach_uid); + if (ret) + goto err_sysfs; + /* When either the importer or the exporter can't handle dynamic * mappings we cache the mapping here to avoid issues with the * reservation object lock. @@ -776,6 +797,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, dma_resv_unlock(attach->dmabuf->resv); attach->sgt = sgt; attach->dir = DMA_BIDIRECTIONAL; + dma_buf_update_attachment_map_count(attach, 1 /* delta */); } return attach; @@ -792,6 +814,7 @@ err_unlock: if (dma_buf_is_dynamic(attach->dmabuf)) dma_resv_unlock(attach->dmabuf->resv); +err_sysfs: dma_buf_detach(dmabuf, attach); return ERR_PTR(ret); } @@ -841,6 +864,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) dma_resv_lock(attach->dmabuf->resv, NULL); __unmap_dma_buf(attach, attach->sgt, attach->dir); + dma_buf_update_attachment_map_count(attach, -1 /* delta */); if (dma_buf_is_dynamic(attach->dmabuf)) { dmabuf->ops->unpin(attach); @@ -854,6 +878,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) if (dmabuf->ops->detach) dmabuf->ops->detach(dmabuf, attach); + dma_buf_attach_stats_teardown(attach); kfree(attach); } EXPORT_SYMBOL_GPL(dma_buf_detach); @@ -926,6 +951,9 @@ EXPORT_SYMBOL_GPL(dma_buf_unpin); * the underlying backing storage is pinned for as long as a mapping exists, * therefore users/importers should not hold onto a mapping for undue amounts of * time. + * + * Important: Dynamic importers must wait for the exclusive fence of the struct + * dma_resv attached to the DMA-BUF first. */ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) @@ -993,6 +1021,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, } #endif /* CONFIG_DMA_API_DEBUG */ + if (!IS_ERR(sg_table)) + dma_buf_update_attachment_map_count(attach, 1 /* delta */); + return sg_table; } EXPORT_SYMBOL_GPL(dma_buf_map_attachment); @@ -1030,6 +1061,8 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, if (dma_buf_is_dynamic(attach->dmabuf) && !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) dma_buf_unpin(attach); + + dma_buf_update_attachment_map_count(attach, -1 /* delta */); } EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); @@ -1469,6 +1502,12 @@ static inline void dma_buf_uninit_debugfs(void) static int __init dma_buf_init(void) { + int ret; + + ret = dma_buf_init_sysfs_statistics(); + if (ret) + return ret; + dma_buf_mnt = kern_mount(&dma_buf_fs_type); if (IS_ERR(dma_buf_mnt)) return PTR_ERR(dma_buf_mnt); @@ -1484,5 +1523,6 @@ static void __exit dma_buf_deinit(void) { dma_buf_uninit_debugfs(); kern_unmount(dma_buf_mnt); + dma_buf_uninit_sysfs_statistics(); } __exitcall(dma_buf_deinit); diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index 7d129e68ac70..1b4cb3e5cec9 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -137,6 +137,7 @@ static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb) struct dma_fence_chain *chain; chain = container_of(cb, typeof(*chain), cb); + init_irq_work(&chain->work, dma_fence_chain_irq_work); irq_work_queue(&chain->work); dma_fence_put(f); } @@ -239,7 +240,6 @@ void dma_fence_chain_init(struct dma_fence_chain *chain, rcu_assign_pointer(chain->prev, prev); chain->fence = fence; chain->prev_seqno = 0; - init_irq_work(&chain->work, dma_fence_chain_irq_work); /* Try to reuse the context of the previous chain node. */ if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) { diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index f26c71747d43..e744fd87c63c 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -615,25 +615,21 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) */ bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) { - unsigned int seq, shared_count; + struct dma_fence *fence; + unsigned int seq; int ret; rcu_read_lock(); retry: ret = true; - shared_count = 0; seq = read_seqcount_begin(&obj->seq); if (test_all) { struct dma_resv_list *fobj = dma_resv_shared_list(obj); - unsigned int i; - - if (fobj) - shared_count = fobj->shared_count; + unsigned int i, shared_count; + shared_count = fobj ? fobj->shared_count : 0; for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence; - fence = rcu_dereference(fobj->shared[i]); ret = dma_resv_test_signaled_single(fence); if (ret < 0) @@ -641,24 +637,19 @@ retry: else if (!ret) break; } - - if (read_seqcount_retry(&obj->seq, seq)) - goto retry; } - if (!shared_count) { - struct dma_fence *fence_excl = dma_resv_excl_fence(obj); - - if (fence_excl) { - ret = dma_resv_test_signaled_single(fence_excl); - if (ret < 0) - goto retry; + fence = dma_resv_excl_fence(obj); + if (ret && fence) { + ret = dma_resv_test_signaled_single(fence); + if (ret < 0) + goto retry; - if (read_seqcount_retry(&obj->seq, seq)) - goto retry; - } } + if (read_seqcount_retry(&obj->seq, seq)) + goto retry; + rcu_read_unlock(); return ret; } diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c index 9525f7f56119..8ce1ea59d31b 100644 --- a/drivers/dma-buf/st-dma-fence-chain.c +++ b/drivers/dma-buf/st-dma-fence-chain.c @@ -58,28 +58,20 @@ static struct dma_fence *mock_fence(void) return &f->base; } -static inline struct mock_chain { - struct dma_fence_chain base; -} *to_mock_chain(struct dma_fence *f) { - return container_of(f, struct mock_chain, base.base); -} - static struct dma_fence *mock_chain(struct dma_fence *prev, struct dma_fence *fence, u64 seqno) { - struct mock_chain *f; + struct dma_fence_chain *f; - f = kmalloc(sizeof(*f), GFP_KERNEL); + f = dma_fence_chain_alloc(); if (!f) return NULL; - dma_fence_chain_init(&f->base, - dma_fence_get(prev), - dma_fence_get(fence), + dma_fence_chain_init(f, dma_fence_get(prev), dma_fence_get(fence), seqno); - return &f->base.base; + return &f->base; } static int sanitycheck(void *arg) diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index db732f71e59a..8df761a10251 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -11,9 +11,15 @@ #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/udmabuf.h> +#include <linux/hugetlb.h> -static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */ -static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */ +static int list_limit = 1024; +module_param(list_limit, int, 0644); +MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024."); + +static int size_limit_mb = 64; +module_param(size_limit_mb, int, 0644); +MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64."); struct udmabuf { pgoff_t pagecount; @@ -160,10 +166,13 @@ static long udmabuf_create(struct miscdevice *device, { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct file *memfd = NULL; + struct address_space *mapping = NULL; struct udmabuf *ubuf; struct dma_buf *buf; pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit; - struct page *page; + struct page *page, *hpage = NULL; + pgoff_t subpgoff, maxsubpgs; + struct hstate *hpstate; int seals, ret = -EINVAL; u32 i, flags; @@ -194,7 +203,8 @@ static long udmabuf_create(struct miscdevice *device, memfd = fget(list[i].memfd); if (!memfd) goto err; - if (!shmem_mapping(file_inode(memfd)->i_mapping)) + mapping = file_inode(memfd)->i_mapping; + if (!shmem_mapping(mapping) && !is_file_hugepages(memfd)) goto err; seals = memfd_fcntl(memfd, F_GET_SEALS, 0); if (seals == -EINVAL) @@ -205,17 +215,48 @@ static long udmabuf_create(struct miscdevice *device, goto err; pgoff = list[i].offset >> PAGE_SHIFT; pgcnt = list[i].size >> PAGE_SHIFT; + if (is_file_hugepages(memfd)) { + hpstate = hstate_file(memfd); + pgoff = list[i].offset >> huge_page_shift(hpstate); + subpgoff = (list[i].offset & + ~huge_page_mask(hpstate)) >> PAGE_SHIFT; + maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT; + } for (pgidx = 0; pgidx < pgcnt; pgidx++) { - page = shmem_read_mapping_page( - file_inode(memfd)->i_mapping, pgoff + pgidx); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto err; + if (is_file_hugepages(memfd)) { + if (!hpage) { + hpage = find_get_page_flags(mapping, pgoff, + FGP_ACCESSED); + if (IS_ERR(hpage)) { + ret = PTR_ERR(hpage); + goto err; + } + } + page = hpage + subpgoff; + get_page(page); + subpgoff++; + if (subpgoff == maxsubpgs) { + put_page(hpage); + hpage = NULL; + subpgoff = 0; + pgoff++; + } + } else { + page = shmem_read_mapping_page(mapping, + pgoff + pgidx); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto err; + } } ubuf->pages[pgbuf++] = page; } fput(memfd); memfd = NULL; + if (hpage) { + put_page(hpage); + hpage = NULL; + } } exp_info.ops = &udmabuf_ops; |