summaryrefslogtreecommitdiff
path: root/drivers/dma-buf
diff options
context:
space:
mode:
authorThomas Zimmermann <tzimmermann@suse.de>2022-07-14 09:57:37 +0200
committerThomas Zimmermann <tzimmermann@suse.de>2022-07-14 09:57:37 +0200
commitf83d9396d1f63048c423efa00e4e244da10a35fd (patch)
tree48da2cf9b20cc7049c92dd4a7dd74be11add86e5 /drivers/dma-buf
parent5ee8c8f930ba7d20717c4fc2d9f1ce0e757d1155 (diff)
parent0180290abb5ce5c870f84a00ffeda5802f641dce (diff)
Merge drm/drm-next into drm-misc-next-fixes
Backmerging from drm/drm-next for the final fixes that will go into v5.20. Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/Kconfig6
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c126
-rw-r--r--drivers/dma-buf/dma-fence-chain.c4
-rw-r--r--drivers/dma-buf/dma-fence-unwrap.c162
-rw-r--r--drivers/dma-buf/st-dma-fence-unwrap.c157
-rw-r--r--drivers/dma-buf/sync_file.c119
-rw-r--r--drivers/dma-buf/udmabuf.c23
8 files changed, 445 insertions, 154 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 541efe01abc7..e4dc53a36428 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -75,7 +75,7 @@ menuconfig DMABUF_HEAPS
between drivers.
menuconfig DMABUF_SYSFS_STATS
- bool "DMA-BUF sysfs statistics"
+ bool "DMA-BUF sysfs statistics (DEPRECATED)"
depends on DMA_SHARED_BUFFER
help
Choose this option to enable DMA-BUF sysfs statistics
@@ -85,6 +85,10 @@ menuconfig DMABUF_SYSFS_STATS
statistics for the DMA-BUF with the unique inode number
<inode_number>.
+ This option is deprecated and should sooner or later be removed.
+ Android is the only user of this and it turned out that this resulted
+ in quite some performance problems.
+
source "drivers/dma-buf/heaps/Kconfig"
endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 4c9eb53ba3f8..70ec901edf2c 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
- dma-resv.o
+ dma-fence-unwrap.o dma-resv.o
obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
obj-$(CONFIG_DMABUF_HEAPS) += heaps/
obj-$(CONFIG_SYNC_FILE) += sync_file.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 79795857be3e..630133284e2b 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -20,6 +20,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <linux/sync_file.h>
#include <linux/poll.h>
#include <linux/dma-resv.h>
#include <linux/mm.h>
@@ -192,6 +193,9 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
* Note that this only signals the completion of the respective fences, i.e. the
* DMA transfers are complete. Cache flushing and any other necessary
* preparations before CPU access can begin still need to happen.
+ *
+ * As an alternative to poll(), the set of fences on DMA buffer can be
+ * exported as a &sync_file using &dma_buf_sync_file_export.
*/
static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
@@ -326,6 +330,101 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
return 0;
}
+#if IS_ENABLED(CONFIG_SYNC_FILE)
+static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
+ void __user *user_data)
+{
+ struct dma_buf_export_sync_file arg;
+ enum dma_resv_usage usage;
+ struct dma_fence *fence = NULL;
+ struct sync_file *sync_file;
+ int fd, ret;
+
+ if (copy_from_user(&arg, user_data, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.flags & ~DMA_BUF_SYNC_RW)
+ return -EINVAL;
+
+ if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
+ return -EINVAL;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
+ ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
+ if (ret)
+ goto err_put_fd;
+
+ if (!fence)
+ fence = dma_fence_get_stub();
+
+ sync_file = sync_file_create(fence);
+
+ dma_fence_put(fence);
+
+ if (!sync_file) {
+ ret = -ENOMEM;
+ goto err_put_fd;
+ }
+
+ arg.fd = fd;
+ if (copy_to_user(user_data, &arg, sizeof(arg))) {
+ ret = -EFAULT;
+ goto err_put_file;
+ }
+
+ fd_install(fd, sync_file->file);
+
+ return 0;
+
+err_put_file:
+ fput(sync_file->file);
+err_put_fd:
+ put_unused_fd(fd);
+ return ret;
+}
+
+static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
+ const void __user *user_data)
+{
+ struct dma_buf_import_sync_file arg;
+ struct dma_fence *fence;
+ enum dma_resv_usage usage;
+ int ret = 0;
+
+ if (copy_from_user(&arg, user_data, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.flags & ~DMA_BUF_SYNC_RW)
+ return -EINVAL;
+
+ if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
+ return -EINVAL;
+
+ fence = sync_file_get_fence(arg.fd);
+ if (!fence)
+ return -EINVAL;
+
+ usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
+ DMA_RESV_USAGE_READ;
+
+ dma_resv_lock(dmabuf->resv, NULL);
+
+ ret = dma_resv_reserve_fences(dmabuf->resv, 1);
+ if (!ret)
+ dma_resv_add_fence(dmabuf->resv, fence, usage);
+
+ dma_resv_unlock(dmabuf->resv);
+
+ dma_fence_put(fence);
+
+ return ret;
+}
+#endif
+
static long dma_buf_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -369,6 +468,13 @@ static long dma_buf_ioctl(struct file *file,
case DMA_BUF_SET_NAME_B:
return dma_buf_set_name(dmabuf, (const char __user *)arg);
+#if IS_ENABLED(CONFIG_SYNC_FILE)
+ case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
+ return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
+ case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
+ return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
+#endif
+
default:
return -ENOTTY;
}
@@ -408,6 +514,7 @@ static inline int is_dma_buf_file(struct file *file)
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
{
+ static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
struct file *file;
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
@@ -417,6 +524,13 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
inode->i_size = dmabuf->size;
inode_set_bytes(inode, dmabuf->size);
+ /*
+ * The ->i_ino acquired from get_next_ino() is not unique thus
+ * not suitable for using it as dentry name by dmabuf stats.
+ * Override ->i_ino with the unique and dmabuffs specific
+ * value.
+ */
+ inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
flags, &dma_buf_fops);
if (IS_ERR(file))
@@ -544,10 +658,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file->f_mode |= FMODE_LSEEK;
dmabuf->file = file;
- ret = dma_buf_stats_setup(dmabuf);
- if (ret)
- goto err_sysfs;
-
mutex_init(&dmabuf->lock);
INIT_LIST_HEAD(&dmabuf->attachments);
@@ -555,6 +665,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
list_add(&dmabuf->list_node, &db_list.head);
mutex_unlock(&db_list.lock);
+ ret = dma_buf_stats_setup(dmabuf);
+ if (ret)
+ goto err_sysfs;
+
return dmabuf;
err_sysfs:
@@ -1351,7 +1465,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
return ret;
seq_puts(s, "\nDma-buf Objects:\n");
- seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
+ seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
"size", "flags", "mode", "count", "ino");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
@@ -1368,7 +1482,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
file_count(buf_obj->file),
buf_obj->exp_name,
file_inode(buf_obj->file)->i_ino,
- buf_obj->name ?: "");
+ buf_obj->name ?: "<none>");
spin_unlock(&buf_obj->name_lock);
dma_resv_describe(buf_obj->resv, s);
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index 06f8ef97c6e8..a0d920576ba6 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -62,8 +62,8 @@ struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
replacement = NULL;
}
- tmp = cmpxchg((struct dma_fence __force **)&chain->prev,
- prev, replacement);
+ tmp = unrcu_pointer(cmpxchg(&chain->prev, RCU_INITIALIZER(prev),
+ RCU_INITIALIZER(replacement)));
if (tmp == prev)
dma_fence_put(tmp);
else
diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
new file mode 100644
index 000000000000..502a65ea6d44
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-unwrap.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dma-fence-util: misc functions for dma_fence objects
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
+#include <linux/dma-fence-unwrap.h>
+#include <linux/slab.h>
+
+/* Internal helper to start new array iteration, don't use directly */
+static struct dma_fence *
+__dma_fence_unwrap_array(struct dma_fence_unwrap *cursor)
+{
+ cursor->array = dma_fence_chain_contained(cursor->chain);
+ cursor->index = 0;
+ return dma_fence_array_first(cursor->array);
+}
+
+/**
+ * dma_fence_unwrap_first - return the first fence from fence containers
+ * @head: the entrypoint into the containers
+ * @cursor: current position inside the containers
+ *
+ * Unwraps potential dma_fence_chain/dma_fence_array containers and return the
+ * first fence.
+ */
+struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
+ struct dma_fence_unwrap *cursor)
+{
+ cursor->chain = dma_fence_get(head);
+ return __dma_fence_unwrap_array(cursor);
+}
+EXPORT_SYMBOL_GPL(dma_fence_unwrap_first);
+
+/**
+ * dma_fence_unwrap_next - return the next fence from a fence containers
+ * @cursor: current position inside the containers
+ *
+ * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return
+ * the next fence from them.
+ */
+struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
+{
+ struct dma_fence *tmp;
+
+ ++cursor->index;
+ tmp = dma_fence_array_next(cursor->array, cursor->index);
+ if (tmp)
+ return tmp;
+
+ cursor->chain = dma_fence_chain_walk(cursor->chain);
+ return __dma_fence_unwrap_array(cursor);
+}
+EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
+
+/* Implementation for the dma_fence_merge() marco, don't use directly */
+struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
+ struct dma_fence **fences,
+ struct dma_fence_unwrap *iter)
+{
+ struct dma_fence_array *result;
+ struct dma_fence *tmp, **array;
+ unsigned int i;
+ size_t count;
+
+ count = 0;
+ for (i = 0; i < num_fences; ++i) {
+ dma_fence_unwrap_for_each(tmp, &iter[i], fences[i])
+ ++count;
+ }
+
+ if (count == 0)
+ return dma_fence_get_stub();
+
+ array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
+ if (!array)
+ return NULL;
+
+ /*
+ * This trashes the input fence array and uses it as position for the
+ * following merge loop. This works because the dma_fence_merge()
+ * wrapper macro is creating this temporary array on the stack together
+ * with the iterators.
+ */
+ for (i = 0; i < num_fences; ++i)
+ fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
+
+ count = 0;
+ do {
+ unsigned int sel;
+
+restart:
+ tmp = NULL;
+ for (i = 0; i < num_fences; ++i) {
+ struct dma_fence *next;
+
+ while (fences[i] && dma_fence_is_signaled(fences[i]))
+ fences[i] = dma_fence_unwrap_next(&iter[i]);
+
+ next = fences[i];
+ if (!next)
+ continue;
+
+ /*
+ * We can't guarantee that inpute fences are ordered by
+ * context, but it is still quite likely when this
+ * function is used multiple times. So attempt to order
+ * the fences by context as we pass over them and merge
+ * fences with the same context.
+ */
+ if (!tmp || tmp->context > next->context) {
+ tmp = next;
+ sel = i;
+
+ } else if (tmp->context < next->context) {
+ continue;
+
+ } else if (dma_fence_is_later(tmp, next)) {
+ fences[i] = dma_fence_unwrap_next(&iter[i]);
+ goto restart;
+ } else {
+ fences[sel] = dma_fence_unwrap_next(&iter[sel]);
+ goto restart;
+ }
+ }
+
+ if (tmp) {
+ array[count++] = dma_fence_get(tmp);
+ fences[sel] = dma_fence_unwrap_next(&iter[sel]);
+ }
+ } while (tmp);
+
+ if (count == 0) {
+ tmp = dma_fence_get_stub();
+ goto return_tmp;
+ }
+
+ if (count == 1) {
+ tmp = array[0];
+ goto return_tmp;
+ }
+
+ result = dma_fence_array_create(count, array,
+ dma_fence_context_alloc(1),
+ 1, false);
+ if (!result) {
+ tmp = NULL;
+ goto return_tmp;
+ }
+ return &result->base;
+
+return_tmp:
+ kfree(array);
+ return tmp;
+}
+EXPORT_SYMBOL_GPL(__dma_fence_unwrap_merge);
diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c
index 039f016b57be..4105d5ea8dde 100644
--- a/drivers/dma-buf/st-dma-fence-unwrap.c
+++ b/drivers/dma-buf/st-dma-fence-unwrap.c
@@ -4,27 +4,19 @@
* Copyright (C) 2022 Advanced Micro Devices, Inc.
*/
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
#include <linux/dma-fence-unwrap.h>
-#if 0
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/mm.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/random.h>
-#endif
#include "selftest.h"
#define CHAIN_SZ (4 << 10)
-static inline struct mock_fence {
+struct mock_fence {
struct dma_fence base;
spinlock_t lock;
-} *to_mock_fence(struct dma_fence *f) {
- return container_of(f, struct mock_fence, base);
-}
+};
static const char *mock_name(struct dma_fence *f)
{
@@ -45,7 +37,8 @@ static struct dma_fence *mock_fence(void)
return NULL;
spin_lock_init(&f->lock);
- dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+ dma_fence_init(&f->base, &mock_ops, &f->lock,
+ dma_fence_context_alloc(1), 1);
return &f->base;
}
@@ -59,7 +52,7 @@ static struct dma_fence *mock_array(unsigned int num_fences, ...)
fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
if (!fences)
- return NULL;
+ goto error_put;
va_start(valist, num_fences);
for (i = 0; i < num_fences; ++i)
@@ -70,13 +63,17 @@ static struct dma_fence *mock_array(unsigned int num_fences, ...)
dma_fence_context_alloc(1),
1, false);
if (!array)
- goto cleanup;
+ goto error_free;
return &array->base;
-cleanup:
- for (i = 0; i < num_fences; ++i)
- dma_fence_put(fences[i]);
+error_free:
kfree(fences);
+
+error_put:
+ va_start(valist, num_fences);
+ for (i = 0; i < num_fences; ++i)
+ dma_fence_put(va_arg(valist, typeof(*fences)));
+ va_end(valist);
return NULL;
}
@@ -113,7 +110,6 @@ static int sanitycheck(void *arg)
if (!chain)
return -ENOMEM;
- dma_fence_signal(f);
dma_fence_put(chain);
return err;
}
@@ -154,10 +150,8 @@ static int unwrap_array(void *arg)
err = -EINVAL;
}
- dma_fence_signal(f1);
- dma_fence_signal(f2);
dma_fence_put(array);
- return 0;
+ return err;
}
static int unwrap_chain(void *arg)
@@ -196,10 +190,8 @@ static int unwrap_chain(void *arg)
err = -EINVAL;
}
- dma_fence_signal(f1);
- dma_fence_signal(f2);
dma_fence_put(chain);
- return 0;
+ return err;
}
static int unwrap_chain_array(void *arg)
@@ -242,10 +234,115 @@ static int unwrap_chain_array(void *arg)
err = -EINVAL;
}
- dma_fence_signal(f1);
- dma_fence_signal(f2);
dma_fence_put(chain);
- return 0;
+ return err;
+}
+
+static int unwrap_merge(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *f3;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ f2 = mock_fence();
+ if (!f2) {
+ err = -ENOMEM;
+ goto error_put_f1;
+ }
+
+ f3 = dma_fence_unwrap_merge(f1, f2);
+ if (!f3) {
+ err = -ENOMEM;
+ goto error_put_f2;
+ }
+
+ dma_fence_unwrap_for_each(fence, &iter, f3) {
+ if (fence == f1) {
+ dma_fence_put(f1);
+ f1 = NULL;
+ } else if (fence == f2) {
+ dma_fence_put(f2);
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f3);
+error_put_f2:
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
+static int unwrap_merge_complex(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *f3, *f4, *f5;
+ struct dma_fence_unwrap iter;
+ int err = -ENOMEM;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ f2 = mock_fence();
+ if (!f2)
+ goto error_put_f1;
+
+ f3 = dma_fence_unwrap_merge(f1, f2);
+ if (!f3)
+ goto error_put_f2;
+
+ /* The resulting array has the fences in reverse */
+ f4 = dma_fence_unwrap_merge(f2, f1);
+ if (!f4)
+ goto error_put_f3;
+
+ /* Signaled fences should be filtered, the two arrays merged. */
+ f5 = dma_fence_unwrap_merge(f3, f4, dma_fence_get_stub());
+ if (!f5)
+ goto error_put_f4;
+
+ err = 0;
+ dma_fence_unwrap_for_each(fence, &iter, f5) {
+ if (fence == f1) {
+ dma_fence_put(f1);
+ f1 = NULL;
+ } else if (fence == f2) {
+ dma_fence_put(f2);
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f5);
+error_put_f4:
+ dma_fence_put(f4);
+error_put_f3:
+ dma_fence_put(f3);
+error_put_f2:
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
}
int dma_fence_unwrap(void)
@@ -255,6 +352,8 @@ int dma_fence_unwrap(void)
SUBTEST(unwrap_array),
SUBTEST(unwrap_chain),
SUBTEST(unwrap_chain_array),
+ SUBTEST(unwrap_merge),
+ SUBTEST(unwrap_merge_complex),
};
return subtests(tests, NULL);
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 0fe564539166..3ebec19a8e02 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -146,50 +146,6 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len)
return buf;
}
-static int sync_file_set_fence(struct sync_file *sync_file,
- struct dma_fence **fences, int num_fences)
-{
- struct dma_fence_array *array;
-
- /*
- * The reference for the fences in the new sync_file and held
- * in add_fence() during the merge procedure, so for num_fences == 1
- * we already own a new reference to the fence. For num_fence > 1
- * we own the reference of the dma_fence_array creation.
- */
-
- if (num_fences == 0) {
- sync_file->fence = dma_fence_get_stub();
- kfree(fences);
-
- } else if (num_fences == 1) {
- sync_file->fence = fences[0];
- kfree(fences);
-
- } else {
- array = dma_fence_array_create(num_fences, fences,
- dma_fence_context_alloc(1),
- 1, false);
- if (!array)
- return -ENOMEM;
-
- sync_file->fence = &array->base;
- }
-
- return 0;
-}
-
-static void add_fence(struct dma_fence **fences,
- int *i, struct dma_fence *fence)
-{
- fences[*i] = fence;
-
- if (!dma_fence_is_signaled(fence)) {
- dma_fence_get(fence);
- (*i)++;
- }
-}
-
/**
* sync_file_merge() - merge two sync_files
* @name: name of new fence
@@ -203,84 +159,21 @@ static void add_fence(struct dma_fence **fences,
static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
- struct dma_fence *a_fence, *b_fence, **fences;
- struct dma_fence_unwrap a_iter, b_iter;
- unsigned int index, num_fences;
struct sync_file *sync_file;
+ struct dma_fence *fence;
sync_file = sync_file_alloc();
if (!sync_file)
return NULL;
- num_fences = 0;
- dma_fence_unwrap_for_each(a_fence, &a_iter, a->fence)
- ++num_fences;
- dma_fence_unwrap_for_each(b_fence, &b_iter, b->fence)
- ++num_fences;
-
- if (num_fences > INT_MAX)
- goto err_free_sync_file;
-
- fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
- if (!fences)
- goto err_free_sync_file;
-
- /*
- * We can't guarantee that fences in both a and b are ordered, but it is
- * still quite likely.
- *
- * So attempt to order the fences as we pass over them and merge fences
- * with the same context.
- */
-
- index = 0;
- for (a_fence = dma_fence_unwrap_first(a->fence, &a_iter),
- b_fence = dma_fence_unwrap_first(b->fence, &b_iter);
- a_fence || b_fence; ) {
-
- if (!b_fence) {
- add_fence(fences, &index, a_fence);
- a_fence = dma_fence_unwrap_next(&a_iter);
-
- } else if (!a_fence) {
- add_fence(fences, &index, b_fence);
- b_fence = dma_fence_unwrap_next(&b_iter);
-
- } else if (a_fence->context < b_fence->context) {
- add_fence(fences, &index, a_fence);
- a_fence = dma_fence_unwrap_next(&a_iter);
-
- } else if (b_fence->context < a_fence->context) {
- add_fence(fences, &index, b_fence);
- b_fence = dma_fence_unwrap_next(&b_iter);
-
- } else if (__dma_fence_is_later(a_fence->seqno, b_fence->seqno,
- a_fence->ops)) {
- add_fence(fences, &index, a_fence);
- a_fence = dma_fence_unwrap_next(&a_iter);
- b_fence = dma_fence_unwrap_next(&b_iter);
-
- } else {
- add_fence(fences, &index, b_fence);
- a_fence = dma_fence_unwrap_next(&a_iter);
- b_fence = dma_fence_unwrap_next(&b_iter);
- }
+ fence = dma_fence_unwrap_merge(a->fence, b->fence);
+ if (!fence) {
+ fput(sync_file->file);
+ return NULL;
}
-
- if (sync_file_set_fence(sync_file, fences, index) < 0)
- goto err_put_fences;
-
+ sync_file->fence = fence;
strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
return sync_file;
-
-err_put_fences:
- while (index)
- dma_fence_put(fences[--index]);
- kfree(fences);
-
-err_free_sync_file:
- fput(sync_file->file);
- return NULL;
}
static int sync_file_release(struct inode *inode, struct file *file)
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index e7330684d3b8..38e8767ec371 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -32,8 +32,11 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct udmabuf *ubuf = vma->vm_private_data;
+ pgoff_t pgoff = vmf->pgoff;
- vmf->page = ubuf->pages[vmf->pgoff];
+ if (pgoff >= ubuf->pagecount)
+ return VM_FAULT_SIGBUS;
+ vmf->page = ubuf->pages[pgoff];
get_page(vmf->page);
return 0;
}
@@ -365,7 +368,23 @@ static struct miscdevice udmabuf_misc = {
static int __init udmabuf_dev_init(void)
{
- return misc_register(&udmabuf_misc);
+ int ret;
+
+ ret = misc_register(&udmabuf_misc);
+ if (ret < 0) {
+ pr_err("Could not initialize udmabuf device\n");
+ return ret;
+ }
+
+ ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
+ DMA_BIT_MASK(64));
+ if (ret < 0) {
+ pr_err("Could not setup DMA mask for udmabuf device\n");
+ misc_deregister(&udmabuf_misc);
+ return ret;
+ }
+
+ return 0;
}
static void __exit udmabuf_dev_exit(void)