summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-01-09 12:34:49 +0100
committerJoerg Roedel <joerg.roedel@amd.com>2009-03-05 20:35:04 +0100
commit30dfa90cc8c4c9621d8d5aa9499f3a5df3376307 (patch)
tree4276579cf6c9dd516412007447c5672e0697061a /lib
parentf2f45e5f3c921c73c913e9a9c00f21ec01c86b4d (diff)
dma-debug: add hash functions for dma_debug_entries
Impact: implement necessary functions for the core hash Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/dma-debug.c101
1 files changed, 101 insertions, 0 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 31099712328..5ff7d2e2b60 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -18,9 +18,14 @@
*/
#include <linux/dma-debug.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/list.h>
+#define HASH_SIZE 1024ULL
+#define HASH_FN_SHIFT 13
+#define HASH_FN_MASK (HASH_SIZE - 1)
+
enum {
dma_debug_single,
dma_debug_page,
@@ -40,3 +45,99 @@ struct dma_debug_entry {
int sg_mapped_ents;
};
+struct hash_bucket {
+ struct list_head list;
+ spinlock_t lock;
+} __cacheline_aligned_in_smp;
+
+/* Hash list to save the allocated dma addresses */
+static struct hash_bucket dma_entry_hash[HASH_SIZE];
+
+/*
+ * Hash related functions
+ *
+ * Every DMA-API request is saved into a struct dma_debug_entry. To
+ * have quick access to these structs they are stored into a hash.
+ */
+static int hash_fn(struct dma_debug_entry *entry)
+{
+ /*
+ * Hash function is based on the dma address.
+ * We use bits 20-27 here as the index into the hash
+ */
+ return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
+}
+
+/*
+ * Request exclusive access to a hash bucket for a given dma_debug_entry.
+ */
+static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
+ unsigned long *flags)
+{
+ int idx = hash_fn(entry);
+ unsigned long __flags;
+
+ spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
+ *flags = __flags;
+ return &dma_entry_hash[idx];
+}
+
+/*
+ * Give up exclusive access to the hash bucket
+ */
+static void put_hash_bucket(struct hash_bucket *bucket,
+ unsigned long *flags)
+{
+ unsigned long __flags = *flags;
+
+ spin_unlock_irqrestore(&bucket->lock, __flags);
+}
+
+/*
+ * Search a given entry in the hash bucket list
+ */
+static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
+ struct dma_debug_entry *ref)
+{
+ struct dma_debug_entry *entry;
+
+ list_for_each_entry(entry, &bucket->list, list) {
+ if ((entry->dev_addr == ref->dev_addr) &&
+ (entry->dev == ref->dev))
+ return entry;
+ }
+
+ return NULL;
+}
+
+/*
+ * Add an entry to a hash bucket
+ */
+static void hash_bucket_add(struct hash_bucket *bucket,
+ struct dma_debug_entry *entry)
+{
+ list_add_tail(&entry->list, &bucket->list);
+}
+
+/*
+ * Remove entry from a hash bucket list
+ */
+static void hash_bucket_del(struct dma_debug_entry *entry)
+{
+ list_del(&entry->list);
+}
+
+/*
+ * Wrapper function for adding an entry to the hash.
+ * This function takes care of locking itself.
+ */
+static void add_dma_entry(struct dma_debug_entry *entry)
+{
+ struct hash_bucket *bucket;
+ unsigned long flags;
+
+ bucket = get_hash_bucket(entry, &flags);
+ hash_bucket_add(bucket, entry);
+ put_hash_bucket(bucket, &flags);
+}
+