diff options
-rw-r--r-- | arch/arm/mach-ux500/dcache.c | 70 | ||||
-rw-r--r-- | arch/arm/mach-ux500/hwmem-int.c | 165 | ||||
-rw-r--r-- | drivers/misc/hwmem/Makefile | 2 | ||||
-rw-r--r-- | drivers/misc/hwmem/contig_alloc.c | 468 | ||||
-rw-r--r-- | drivers/misc/hwmem/hwmem-main.c | 462 | ||||
-rw-r--r-- | include/linux/hwmem.h | 22 |
6 files changed, 810 insertions, 379 deletions
diff --git a/arch/arm/mach-ux500/dcache.c b/arch/arm/mach-ux500/dcache.c index b1c3942c181..b117d4e8283 100644 --- a/arch/arm/mach-ux500/dcache.c +++ b/arch/arm/mach-ux500/dcache.c @@ -9,7 +9,6 @@ * License terms: GNU General Public License (GPL), version 2. */ -#include <linux/hwmem.h> #include <linux/dma-mapping.h> #include <asm/pgtable.h> @@ -91,75 +90,6 @@ static void flush_inner_dcache_all(void); static bool is_cache_exclusive(void); -enum hwmem_alloc_flags cachi_get_cache_settings( - enum hwmem_alloc_flags requested_cache_settings) -{ - static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED | - HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT | - HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW | - HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE | - HWMEM_ALLOC_HINT_INNER_CACHE_ONLY; - - enum hwmem_alloc_flags cache_settings; - - if (!(requested_cache_settings & CACHE_ON_FLAGS_MASK) && - requested_cache_settings & (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE | - HWMEM_ALLOC_HINT_UNCACHED | HWMEM_ALLOC_HINT_WRITE_COMBINE)) - /* - * We never use uncached as it's extremely slow and there is - * no scenario where it would be better than buffered memory. - */ - return HWMEM_ALLOC_HINT_WRITE_COMBINE; - - /* - * The user has specified cached or nothing at all, both are treated as - * cached. - */ - cache_settings = (requested_cache_settings & - ~(HWMEM_ALLOC_HINT_UNCACHED | - HWMEM_ALLOC_HINT_NO_WRITE_COMBINE | - HWMEM_ALLOC_HINT_INNER_CACHE_ONLY | - HWMEM_ALLOC_HINT_CACHE_NAOW)) | - HWMEM_ALLOC_HINT_WRITE_COMBINE | HWMEM_ALLOC_HINT_CACHED | - HWMEM_ALLOC_HINT_CACHE_AOW | - HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE; - if (!(cache_settings & (HWMEM_ALLOC_HINT_CACHE_WB | - HWMEM_ALLOC_HINT_CACHE_WT))) - cache_settings |= HWMEM_ALLOC_HINT_CACHE_WB; - /* - * On ARMv7 "alloc on write" is just a hint so we need to assume the - * worst case ie "alloc on write". We would however like to remember - * the requested "alloc on write" setting so that we can pass it on to - * the hardware, we use the reserved bit in the alloc flags to do that. - */ - if (requested_cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW) - cache_settings |= HWMEM_ALLOC_RESERVED_CHI; - else - cache_settings &= ~HWMEM_ALLOC_RESERVED_CHI; - - return cache_settings; -} - -void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings, - pgprot_t *pgprot) -{ - if (cache_settings & HWMEM_ALLOC_HINT_CACHED) { - if (cache_settings & HWMEM_ALLOC_HINT_CACHE_WT) - *pgprot = __pgprot_modify(*pgprot, L_PTE_MT_MASK, - L_PTE_MT_WRITETHROUGH); - else { - if (cache_settings & HWMEM_ALLOC_RESERVED_CHI) - *pgprot = __pgprot_modify(*pgprot, - L_PTE_MT_MASK, L_PTE_MT_WRITEALLOC); - else - *pgprot = __pgprot_modify(*pgprot, - L_PTE_MT_MASK, L_PTE_MT_WRITEBACK); - } - } else { - *pgprot = pgprot_writecombine(*pgprot); - } -} - void drain_cpu_write_buf(void) { dsb(); diff --git a/arch/arm/mach-ux500/hwmem-int.c b/arch/arm/mach-ux500/hwmem-int.c new file mode 100644 index 00000000000..c23049df4a6 --- /dev/null +++ b/arch/arm/mach-ux500/hwmem-int.c @@ -0,0 +1,165 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Hardware memory driver integration + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/hwmem.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/err.h> +#include <linux/slab.h> + +/* CONA API */ +void *cona_create(const char *name, phys_addr_t region_paddr, + size_t region_size); +void *cona_alloc(void *instance, size_t size); +void cona_free(void *instance, void *alloc); +phys_addr_t cona_get_alloc_paddr(void *alloc); +void *cona_get_alloc_kaddr(void *instance, void *alloc); +size_t cona_get_alloc_size(void *alloc); + +struct hwmem_mem_type_struct *hwmem_mem_types; +unsigned int hwmem_num_mem_types; + +static phys_addr_t hwmem_paddr; +static size_t hwmem_size; + +static int __init parse_hwmem_param(char *p) +{ + hwmem_size = memparse(p, &p); + + if (*p != '@') + goto no_at; + + hwmem_paddr = memparse(p + 1, &p); + + return 0; + +no_at: + hwmem_size = 0; + + return -EINVAL; +} +early_param("hwmem", parse_hwmem_param); + +static int __init setup_hwmem(void) +{ + static const unsigned int NUM_MEM_TYPES = 2; + + int ret; + + if (hwmem_paddr != PAGE_ALIGN(hwmem_paddr) || + hwmem_size != PAGE_ALIGN(hwmem_size) || hwmem_size == 0) { + printk(KERN_WARNING "HWMEM: hwmem_paddr !=" + " PAGE_ALIGN(hwmem_paddr) || hwmem_size !=" + " PAGE_ALIGN(hwmem_size) || hwmem_size == 0\n"); + return -ENOMSG; + } + + hwmem_mem_types = kzalloc(sizeof(struct hwmem_mem_type_struct) * + NUM_MEM_TYPES, GFP_KERNEL); + if (hwmem_mem_types == NULL) + return -ENOMEM; + + hwmem_mem_types[0].id = HWMEM_MEM_SCATTERED_SYS; + hwmem_mem_types[0].allocator_api.alloc = cona_alloc; + hwmem_mem_types[0].allocator_api.free = cona_free; + hwmem_mem_types[0].allocator_api.get_alloc_paddr = + cona_get_alloc_paddr; + hwmem_mem_types[0].allocator_api.get_alloc_kaddr = + cona_get_alloc_kaddr; + hwmem_mem_types[0].allocator_api.get_alloc_size = cona_get_alloc_size; + hwmem_mem_types[0].allocator_instance = cona_create("hwmem", + hwmem_paddr, hwmem_size); + if (IS_ERR(hwmem_mem_types[0].allocator_instance)) { + ret = PTR_ERR(hwmem_mem_types[0].allocator_instance); + goto hwmem_ima_init_failed; + } + + hwmem_mem_types[1] = hwmem_mem_types[0]; + hwmem_mem_types[1].id = HWMEM_MEM_CONTIGUOUS_SYS; + + hwmem_num_mem_types = NUM_MEM_TYPES; + + return 0; + +hwmem_ima_init_failed: + kfree(hwmem_mem_types); + + return ret; +} +arch_initcall_sync(setup_hwmem); + +enum hwmem_alloc_flags cachi_get_cache_settings( + enum hwmem_alloc_flags requested_cache_settings) +{ + static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED | + HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT | + HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW | + HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE | + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY; + + enum hwmem_alloc_flags cache_settings; + + if (!(requested_cache_settings & CACHE_ON_FLAGS_MASK) && + requested_cache_settings & (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE | + HWMEM_ALLOC_HINT_UNCACHED | HWMEM_ALLOC_HINT_WRITE_COMBINE)) + /* + * We never use uncached as it's extremely slow and there is + * no scenario where it would be better than buffered memory. + */ + return HWMEM_ALLOC_HINT_WRITE_COMBINE; + + /* + * The user has specified cached or nothing at all, both are treated as + * cached. + */ + cache_settings = (requested_cache_settings & + ~(HWMEM_ALLOC_HINT_UNCACHED | + HWMEM_ALLOC_HINT_NO_WRITE_COMBINE | + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY | + HWMEM_ALLOC_HINT_CACHE_NAOW)) | + HWMEM_ALLOC_HINT_WRITE_COMBINE | HWMEM_ALLOC_HINT_CACHED | + HWMEM_ALLOC_HINT_CACHE_AOW | + HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE; + if (!(cache_settings & (HWMEM_ALLOC_HINT_CACHE_WB | + HWMEM_ALLOC_HINT_CACHE_WT))) + cache_settings |= HWMEM_ALLOC_HINT_CACHE_WB; + /* + * On ARMv7 "alloc on write" is just a hint so we need to assume the + * worst case ie "alloc on write". We would however like to remember + * the requested "alloc on write" setting so that we can pass it on to + * the hardware, we use the reserved bit in the alloc flags to do that. + */ + if (requested_cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW) + cache_settings |= HWMEM_ALLOC_RESERVED_CHI; + else + cache_settings &= ~HWMEM_ALLOC_RESERVED_CHI; + + return cache_settings; +} + +void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings, + pgprot_t *pgprot) +{ + if (cache_settings & HWMEM_ALLOC_HINT_CACHED) { + if (cache_settings & HWMEM_ALLOC_HINT_CACHE_WT) + *pgprot = __pgprot_modify(*pgprot, L_PTE_MT_MASK, + L_PTE_MT_WRITETHROUGH); + else { + if (cache_settings & HWMEM_ALLOC_RESERVED_CHI) + *pgprot = __pgprot_modify(*pgprot, + L_PTE_MT_MASK, L_PTE_MT_WRITEALLOC); + else + *pgprot = __pgprot_modify(*pgprot, + L_PTE_MT_MASK, L_PTE_MT_WRITEBACK); + } + } else { + *pgprot = pgprot_writecombine(*pgprot); + } +} diff --git a/drivers/misc/hwmem/Makefile b/drivers/misc/hwmem/Makefile index 18da2ad7817..c307616a181 100644 --- a/drivers/misc/hwmem/Makefile +++ b/drivers/misc/hwmem/Makefile @@ -1,3 +1,3 @@ -hwmem-objs := hwmem-main.o hwmem-ioctl.o cache_handler.o +hwmem-objs := hwmem-main.o hwmem-ioctl.o cache_handler.o contig_alloc.o obj-$(CONFIG_HWMEM) += hwmem.o diff --git a/drivers/misc/hwmem/contig_alloc.c b/drivers/misc/hwmem/contig_alloc.c new file mode 100644 index 00000000000..bc71ca08f0f --- /dev/null +++ b/drivers/misc/hwmem/contig_alloc.c @@ -0,0 +1,468 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Contiguous memory allocator + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>, + * Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/debugfs.h> +#include <linux/uaccess.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <asm/sizes.h> + +#define MAX_INSTANCE_NAME_LENGTH 31 + +struct alloc { + struct list_head list; + + bool in_use; + phys_addr_t paddr; + size_t size; +}; + +struct instance { + struct list_head list; + + char name[MAX_INSTANCE_NAME_LENGTH + 1]; + + phys_addr_t region_paddr; + void *region_kaddr; + size_t region_size; + + struct list_head alloc_list; + +#ifdef CONFIG_DEBUG_FS + struct inode *debugfs_inode; +#endif /* #ifdef CONFIG_DEBUG_FS */ +}; + +static LIST_HEAD(instance_list); + +static DEFINE_MUTEX(lock); + +void *cona_create(const char *name, phys_addr_t region_paddr, + size_t region_size); +void *cona_alloc(void *instance, size_t size); +void cona_free(void *instance, void *alloc); +phys_addr_t cona_get_alloc_paddr(void *alloc); +void *cona_get_alloc_kaddr(void *instance, void *alloc); +size_t cona_get_alloc_size(void *alloc); + +static int init_alloc_list(struct instance *instance); +static void clean_alloc_list(struct instance *instance); +static struct alloc *find_free_alloc_bestfit(struct instance *instance, + size_t size); +static struct alloc *split_allocation(struct alloc *alloc, + size_t new_alloc_size); +static phys_addr_t get_alloc_offset(struct instance *instance, + struct alloc *alloc); + +void *cona_create(const char *name, phys_addr_t region_paddr, + size_t region_size) +{ + int ret; + struct instance *instance; + struct vm_struct *vm_area; + + if (region_size == 0) + return ERR_PTR(-EINVAL); + + instance = kzalloc(sizeof(*instance), GFP_KERNEL); + if (instance == NULL) + return ERR_PTR(-ENOMEM); + + memcpy(instance->name, name, MAX_INSTANCE_NAME_LENGTH + 1); + /* Truncate name if necessary */ + instance->name[MAX_INSTANCE_NAME_LENGTH] = '\0'; + instance->region_paddr = region_paddr; + instance->region_size = region_size; + + vm_area = get_vm_area(region_size, VM_IOREMAP); + if (vm_area == NULL) { + printk(KERN_WARNING "CONA: Failed to allocate %u bytes" + " kernel virtual memory", region_size); + ret = -ENOMSG; + goto vmem_alloc_failed; + } + instance->region_kaddr = vm_area->addr; + + INIT_LIST_HEAD(&instance->alloc_list); + ret = init_alloc_list(instance); + if (ret < 0) + goto init_alloc_list_failed; + + mutex_lock(&lock); + list_add_tail(&instance->list, &instance_list); + mutex_unlock(&lock); + + return instance; + +init_alloc_list_failed: + vm_area = remove_vm_area(instance->region_kaddr); + if (vm_area == NULL) + printk(KERN_ERR "CONA: Failed to free kernel virtual memory," + " resource leak!\n"); + + kfree(vm_area); +vmem_alloc_failed: + kfree(instance); + + return ERR_PTR(ret); +} + +void *cona_alloc(void *instance, size_t size) +{ + struct instance *instance_l = (struct instance *)instance; + struct alloc *alloc; + + if (size == 0) + return ERR_PTR(-EINVAL); + + mutex_lock(&lock); + + alloc = find_free_alloc_bestfit(instance_l, size); + if (IS_ERR(alloc)) + goto out; + if (size < alloc->size) { + alloc = split_allocation(alloc, size); + if (IS_ERR(alloc)) + goto out; + } else { + alloc->in_use = true; + } + +out: + mutex_unlock(&lock); + + return alloc; +} + +void cona_free(void *instance, void *alloc) +{ + struct instance *instance_l = (struct instance *)instance; + struct alloc *alloc_l = (struct alloc *)alloc; + struct alloc *other; + + mutex_lock(&lock); + + alloc_l->in_use = false; + + other = list_entry(alloc_l->list.prev, struct alloc, list); + if ((alloc_l->list.prev != &instance_l->alloc_list) && + !other->in_use) { + other->size += alloc_l->size; + list_del(&alloc_l->list); + kfree(alloc_l); + alloc_l = other; + } + other = list_entry(alloc_l->list.next, struct alloc, list); + if ((alloc_l->list.next != &instance_l->alloc_list) && + !other->in_use) { + alloc_l->size += other->size; + list_del(&other->list); + kfree(other); + } + + mutex_unlock(&lock); +} + +phys_addr_t cona_get_alloc_paddr(void *alloc) +{ + return ((struct alloc *)alloc)->paddr; +} + +void *cona_get_alloc_kaddr(void *instance, void *alloc) +{ + struct instance *instance_l = (struct instance *)instance; + + return instance_l->region_kaddr + get_alloc_offset(instance_l, + (struct alloc *)alloc); +} + +size_t cona_get_alloc_size(void *alloc) +{ + return ((struct alloc *)alloc)->size; +} + +static int init_alloc_list(struct instance *instance) +{ + /* + * Hack to not get any allocs that cross a 64MiB boundary as B2R2 can't + * handle that. + */ + int ret; + u32 curr_pos = instance->region_paddr; + u32 region_end = instance->region_paddr + instance->region_size; + u32 next_64mib_boundary = (curr_pos + SZ_64M) & ~(SZ_64M - 1); + struct alloc *alloc; + + if (PAGE_SIZE >= SZ_64M) { + printk(KERN_WARNING "CONA: PAGE_SIZE >= 64MiB\n"); + return -ENOMSG; + } + + while (next_64mib_boundary < region_end) { + if (next_64mib_boundary - curr_pos > PAGE_SIZE) { + alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = next_64mib_boundary - curr_pos - + PAGE_SIZE; + alloc->in_use = false; + list_add_tail(&alloc->list, &instance->alloc_list); + curr_pos = alloc->paddr + alloc->size; + } + + alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = PAGE_SIZE; + alloc->in_use = true; + list_add_tail(&alloc->list, &instance->alloc_list); + curr_pos = alloc->paddr + alloc->size; + + next_64mib_boundary += SZ_64M; + } + + alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = region_end - curr_pos; + alloc->in_use = false; + list_add_tail(&alloc->list, &instance->alloc_list); + + return 0; + +error: + clean_alloc_list(instance); + + return ret; +} + +static void clean_alloc_list(struct instance *instance) +{ + while (list_empty(&instance->alloc_list) == 0) { + struct alloc *i = list_first_entry(&instance->alloc_list, + struct alloc, list); + + list_del(&i->list); + + kfree(i); + } +} + +static struct alloc *find_free_alloc_bestfit(struct instance *instance, + size_t size) +{ + size_t best_diff = ~(size_t)0; + struct alloc *alloc = NULL, *i; + + list_for_each_entry(i, &instance->alloc_list, list) { + size_t diff = i->size - size; + if (i->in_use || i->size < size) + continue; + if (diff < best_diff) { + alloc = i; + best_diff = diff; + } + } + + return alloc != NULL ? alloc : ERR_PTR(-ENOMEM); +} + +static struct alloc *split_allocation(struct alloc *alloc, + size_t new_alloc_size) +{ + struct alloc *new_alloc; + + new_alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (new_alloc == NULL) + return ERR_PTR(-ENOMEM); + + new_alloc->in_use = true; + new_alloc->paddr = alloc->paddr; + new_alloc->size = new_alloc_size; + alloc->size -= new_alloc_size; + alloc->paddr += new_alloc_size; + + list_add_tail(&new_alloc->list, &alloc->list); + + return new_alloc; +} + +static phys_addr_t get_alloc_offset(struct instance *instance, + struct alloc *alloc) +{ + return alloc->paddr - instance->region_paddr; +} + +/* Debug */ + +#ifdef CONFIG_DEBUG_FS + +static int print_alloc(struct alloc *alloc, char **buf, size_t buf_size); +static struct instance *get_instance_from_file(struct file *file); +static int debugfs_allocs_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos); + +static const struct file_operations debugfs_allocs_fops = { + .owner = THIS_MODULE, + .read = debugfs_allocs_read, +}; + +static int print_alloc(struct alloc *alloc, char **buf, size_t buf_size) +{ + int ret; + int i; + + for (i = 0; i < 2; i++) { + size_t buf_size_l; + if (i == 0) + buf_size_l = 0; + else + buf_size_l = buf_size; + + ret = snprintf(*buf, buf_size_l, "paddr: %10x\tsize: %10u\t" + "in use: %1u\n", alloc->paddr, alloc->size, + alloc->in_use); + if (ret < 0) + return -ENOMSG; + else if (ret + 1 > buf_size) + return -EINVAL; + } + + *buf += ret; + + return 0; +} + +static struct instance *get_instance_from_file(struct file *file) +{ + struct instance *curr_instance; + + list_for_each_entry(curr_instance, &instance_list, list) { + if (file->f_dentry->d_inode == curr_instance->debugfs_inode) + return curr_instance; + } + + return ERR_PTR(-ENOENT); +} + +static int debugfs_allocs_read(struct file *file, char __user *buf, + size_t count, loff_t *f_pos) +{ + /* + * We assume the supplied buffer and PAGE_SIZE is large enough to hold + * information about at least one alloc, if not no data will be + * returned. + */ + + int ret; + struct instance *instance; + struct alloc *curr_alloc; + char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + char *local_buf_pos = local_buf; + size_t available_space = min((size_t)PAGE_SIZE, count); + /* private_data is intialized to NULL in open which I assume is 0. */ + void **curr_pos = &file->private_data; + size_t bytes_read; + + if (local_buf == NULL) + return -ENOMEM; + + mutex_lock(&lock); + + instance = get_instance_from_file(file); + if (IS_ERR(instance)) { + ret = PTR_ERR(instance); + goto out; + } + + list_for_each_entry(curr_alloc, &instance->alloc_list, list) { + phys_addr_t alloc_offset = get_alloc_offset(instance, + curr_alloc); + if (alloc_offset < (phys_addr_t)*curr_pos) + continue; + + ret = print_alloc(curr_alloc, &local_buf_pos, available_space - + (size_t)(local_buf_pos - local_buf)); + if (ret == -EINVAL) /* No more room */ + break; + else if (ret < 0) + goto out; + + /* + * There could be an overflow issue here in the unlikely case + * where the region is placed at the end of the address range + * and the last alloc is 1 byte large. Since this is debug code + * and that case most likely never will happen I've chosen to + * defer fixing it till it happens. + */ + *curr_pos = (void *)(alloc_offset + 1); + } + + bytes_read = (size_t)(local_buf_pos - local_buf); + + ret = copy_to_user(buf, local_buf, bytes_read); + if (ret < 0) + goto out; + + ret = bytes_read; + +out: + kfree(local_buf); + + mutex_unlock(&lock); + + return ret; +} + +static int __init init_debugfs(void) +{ + struct instance *curr_instance; + struct dentry *debugfs_root_dir = debugfs_create_dir("cona", NULL); + + mutex_lock(&lock); + + list_for_each_entry(curr_instance, &instance_list, list) { + struct dentry *file_dentry; + char tmp_str[MAX_INSTANCE_NAME_LENGTH + 7 + 1]; + tmp_str[0] = '\0'; + strcat(tmp_str, curr_instance->name); + strcat(tmp_str, "_allocs"); + file_dentry = debugfs_create_file(tmp_str, 0444, + debugfs_root_dir, 0, &debugfs_allocs_fops); + if (file_dentry != NULL) + curr_instance->debugfs_inode = file_dentry->d_inode; + } + + mutex_unlock(&lock); + + return 0; +} +/* + * Must be executed after all instances have been created, hence the + * late_initcall. + */ +late_initcall(init_debugfs); + +#endif /* #ifdef CONFIG_DEBUG_FS */ diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c index 96cca3735dd..9162ff4fc91 100644 --- a/drivers/misc/hwmem/hwmem-main.c +++ b/drivers/misc/hwmem/hwmem-main.c @@ -25,11 +25,13 @@ #include <linux/hwmem.h> #include <linux/debugfs.h> #include <linux/uaccess.h> -#include <linux/vmalloc.h> #include <linux/io.h> -#include <asm/sizes.h> +#include <linux/kallsyms.h> +#include <linux/vmalloc.h> #include "cache_handler.h" +#define S32_MAX 2147483647 + struct hwmem_alloc_threadg_info { struct list_head list; @@ -42,10 +44,14 @@ struct hwmem_alloc { struct list_head list; atomic_t ref_cnt; + enum hwmem_alloc_flags flags; - u32 paddr; + struct hwmem_mem_type_struct *mem_type; + + void *allocator_hndl; + phys_addr_t paddr; void *kaddr; - u32 size; + size_t size; s32 name; /* Access control */ @@ -54,14 +60,16 @@ struct hwmem_alloc { /* Cache handling */ struct cach_buf cach_buf; + +#ifdef CONFIG_DEBUG_FS + /* Debug */ + void *creator; + pid_t creator_tgid; +#endif /* #ifdef CONFIG_DEBUG_FS */ }; static struct platform_device *hwdev; -static u32 hwmem_paddr; -static void *hwmem_kaddr; -static u32 hwmem_size; - static LIST_HEAD(alloc_list); static DEFINE_IDR(global_idr); static DEFINE_MUTEX(lock); @@ -73,28 +81,11 @@ static struct vm_operations_struct vm_ops = { .close = vm_close, }; -#ifdef CONFIG_DEBUG_FS - -static int debugfs_allocs_read(struct file *filp, char __user *buf, - size_t count, loff_t *f_pos); -static const struct file_operations debugfs_allocs_fops = { - .owner = THIS_MODULE, - .read = debugfs_allocs_read, -}; - -#endif /* #ifdef CONFIG_DEBUG_FS */ - -static void clean_alloc_list(void); static void kunmap_alloc(struct hwmem_alloc *alloc); /* Helpers */ -static u32 get_alloc_offset(struct hwmem_alloc *alloc) -{ - return alloc->paddr - hwmem_paddr; -} - -static void destroy_hwmem_alloc_threadg_info( +static void destroy_alloc_threadg_info( struct hwmem_alloc_threadg_info *info) { if (info->threadg_pid) @@ -103,14 +94,15 @@ static void destroy_hwmem_alloc_threadg_info( kfree(info); } -static void clean_hwmem_alloc_threadg_info_list(struct hwmem_alloc *alloc) +static void clean_alloc_threadg_info_list(struct hwmem_alloc *alloc) { struct hwmem_alloc_threadg_info *info; struct hwmem_alloc_threadg_info *tmp; - list_for_each_entry_safe(info, tmp, &(alloc->threadg_info_list), list) { + list_for_each_entry_safe(info, tmp, &(alloc->threadg_info_list), + list) { list_del(&info->list); - destroy_hwmem_alloc_threadg_info(info); + destroy_alloc_threadg_info(info); } } @@ -147,213 +139,45 @@ static void clear_alloc_mem(struct hwmem_alloc *alloc) memset(alloc->kaddr, 0, alloc->size); } -static void clean_alloc(struct hwmem_alloc *alloc) +static void destroy_alloc(struct hwmem_alloc *alloc) { - if (alloc->name) { + list_del(&alloc->list); + + if (alloc->name != 0) { idr_remove(&global_idr, alloc->name); alloc->name = 0; } - alloc->flags = 0; - atomic_set(&alloc->ref_cnt, 0); - - clean_hwmem_alloc_threadg_info_list(alloc); + clean_alloc_threadg_info_list(alloc); kunmap_alloc(alloc); -} -static void destroy_alloc(struct hwmem_alloc *alloc) -{ - clean_alloc(alloc); + if (!IS_ERR_OR_NULL(alloc->allocator_hndl)) + alloc->mem_type->allocator_api.free( + alloc->mem_type->allocator_instance, + alloc->allocator_hndl); kfree(alloc); } -static void __hwmem_release(struct hwmem_alloc *alloc) -{ - struct hwmem_alloc *other; - - clean_alloc(alloc); - - other = list_entry(alloc->list.prev, struct hwmem_alloc, list); - if ((alloc->list.prev != &alloc_list) && - atomic_read(&other->ref_cnt) == 0) { - other->size += alloc->size; - list_del(&alloc->list); - destroy_alloc(alloc); - alloc = other; - } - other = list_entry(alloc->list.next, struct hwmem_alloc, list); - if ((alloc->list.next != &alloc_list) && - atomic_read(&other->ref_cnt) == 0) { - alloc->size += other->size; - list_del(&other->list); - destroy_alloc(other); - } -} - -static struct hwmem_alloc *find_free_alloc_bestfit(u32 size) -{ - u32 best_diff = ~0; - struct hwmem_alloc *alloc = NULL, *i; - - list_for_each_entry(i, &alloc_list, list) { - u32 diff = i->size - size; - if (atomic_read(&i->ref_cnt) > 0 || i->size < size) - continue; - if (diff < best_diff) { - alloc = i; - best_diff = diff; - } - } - - return alloc != NULL ? alloc : ERR_PTR(-ENOMEM); -} - -static struct hwmem_alloc *split_allocation(struct hwmem_alloc *alloc, - u32 new_alloc_size) -{ - struct hwmem_alloc *new_alloc; - - new_alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); - if (new_alloc == NULL) - return ERR_PTR(-ENOMEM); - - atomic_inc(&new_alloc->ref_cnt); - INIT_LIST_HEAD(&new_alloc->threadg_info_list); - new_alloc->paddr = alloc->paddr; - new_alloc->size = new_alloc_size; - alloc->size -= new_alloc_size; - alloc->paddr += new_alloc_size; - - list_add_tail(&new_alloc->list, &alloc->list); - - return new_alloc; -} - -static int init_alloc_list(void) -{ - /* - * Hack to not get any allocs that cross a 64MiB boundary as B2R2 can't - * handle that. - */ - int ret; - u32 curr_pos = hwmem_paddr; - u32 hwmem_end = hwmem_paddr + hwmem_size; - u32 next_64mib_boundary = (curr_pos + SZ_64M) & ~(SZ_64M - 1); - struct hwmem_alloc *alloc; - - if (PAGE_SIZE >= SZ_64M) { - dev_err(&hwdev->dev, "PAGE_SIZE >= SZ_64M\n"); - return -ENOMSG; - } - - while (next_64mib_boundary < hwmem_end) { - if (next_64mib_boundary - curr_pos > PAGE_SIZE) { - alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); - if (alloc == NULL) { - ret = -ENOMEM; - goto error; - } - alloc->paddr = curr_pos; - alloc->size = next_64mib_boundary - curr_pos - - PAGE_SIZE; - INIT_LIST_HEAD(&alloc->threadg_info_list); - list_add_tail(&alloc->list, &alloc_list); - curr_pos = alloc->paddr + alloc->size; - } - - alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); - if (alloc == NULL) { - ret = -ENOMEM; - goto error; - } - alloc->paddr = curr_pos; - alloc->size = PAGE_SIZE; - atomic_inc(&alloc->ref_cnt); - INIT_LIST_HEAD(&alloc->threadg_info_list); - list_add_tail(&alloc->list, &alloc_list); - curr_pos = alloc->paddr + alloc->size; - - next_64mib_boundary += SZ_64M; - } - - alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); - if (alloc == NULL) { - ret = -ENOMEM; - goto error; - } - alloc->paddr = curr_pos; - alloc->size = hwmem_end - curr_pos; - INIT_LIST_HEAD(&alloc->threadg_info_list); - list_add_tail(&alloc->list, &alloc_list); - - return 0; - -error: - clean_alloc_list(); - - return ret; -} - -static void clean_alloc_list(void) -{ - while (list_empty(&alloc_list) == 0) { - struct hwmem_alloc *i = list_first_entry(&alloc_list, - struct hwmem_alloc, list); - - list_del(&i->list); - - destroy_alloc(i); - } -} - -static int alloc_kaddrs(void) -{ - struct vm_struct *area = get_vm_area(hwmem_size, VM_IOREMAP); - if (area == NULL) { - dev_info(&hwdev->dev, "Failed to allocate %u bytes kernel" - " virtual memory", hwmem_size); - return -ENOMSG; - } - - hwmem_kaddr = area->addr; - - return 0; -} - -static void free_kaddrs(void) -{ - struct vm_struct *area; - - if (hwmem_kaddr == NULL) - return; - - area = remove_vm_area(hwmem_kaddr); - if (area == NULL) - dev_err(&hwdev->dev, - "Failed to free kernel virtual memory," - " resource leak!\n"); - - kfree(area); - - hwmem_kaddr = NULL; -} - static int kmap_alloc(struct hwmem_alloc *alloc) { int ret; pgprot_t pgprot; - void *alloc_kaddr = hwmem_kaddr + get_alloc_offset(alloc); + void *alloc_kaddr; + + alloc_kaddr = alloc->mem_type->allocator_api.get_alloc_kaddr( + alloc->mem_type->allocator_instance, alloc->allocator_hndl); + if (IS_ERR(alloc_kaddr)) + return PTR_ERR(alloc_kaddr); pgprot = PAGE_KERNEL; cach_set_pgprot_cache_options(&alloc->cach_buf, &pgprot); ret = ioremap_page_range((unsigned long)alloc_kaddr, - (unsigned long)alloc_kaddr + alloc->size, alloc->paddr, - pgprot); + (unsigned long)alloc_kaddr + alloc->size, alloc->paddr, pgprot); if (ret < 0) { - dev_info(&hwdev->dev, "Failed to map %#x - %#x", alloc->paddr, + dev_warn(&hwdev->dev, "Failed to map %#x - %#x", alloc->paddr, alloc->paddr + alloc->size); return ret; } @@ -369,20 +193,33 @@ static void kunmap_alloc(struct hwmem_alloc *alloc) return; unmap_kernel_range((unsigned long)alloc->kaddr, alloc->size); + alloc->kaddr = NULL; } +static struct hwmem_mem_type_struct *resolve_mem_type( + enum hwmem_mem_type mem_type) +{ + unsigned int i; + for (i = 0; i < hwmem_num_mem_types; i++) { + if (hwmem_mem_types[i].id == mem_type) + return &hwmem_mem_types[i]; + } + + return ERR_PTR(-ENOENT); +} + /* HWMEM API */ -struct hwmem_alloc *hwmem_alloc(u32 size, enum hwmem_alloc_flags flags, +struct hwmem_alloc *hwmem_alloc(size_t size, enum hwmem_alloc_flags flags, enum hwmem_access def_access, enum hwmem_mem_type mem_type) { - struct hwmem_alloc *alloc; int ret; + struct hwmem_alloc *alloc; - if (!hwdev) { - printk(KERN_ERR "hwmem: Badly configured\n"); - return ERR_PTR(-EINVAL); + if (hwdev == NULL) { + printk(KERN_ERR "HWMEM: Badly configured\n"); + return ERR_PTR(-ENOMSG); } if (size == 0) @@ -392,38 +229,56 @@ struct hwmem_alloc *hwmem_alloc(u32 size, enum hwmem_alloc_flags flags, size = PAGE_ALIGN(size); - alloc = find_free_alloc_bestfit(size); - if (IS_ERR(alloc)) { - dev_info(&hwdev->dev, "Could not find slot for %u bytes" - " allocation\n", size); - goto no_slot; - } - - if (size < alloc->size) { - alloc = split_allocation(alloc, size); - if (IS_ERR(alloc)) - goto split_alloc_failed; - } else { - atomic_inc(&alloc->ref_cnt); + alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto alloc_alloc_failed; } + INIT_LIST_HEAD(&alloc->list); + atomic_inc(&alloc->ref_cnt); alloc->flags = flags; alloc->default_access = def_access; + INIT_LIST_HEAD(&alloc->threadg_info_list); + alloc->creator = __builtin_return_address(0); + alloc->creator_tgid = task_tgid_nr(current); + + alloc->mem_type = resolve_mem_type(mem_type); + if (IS_ERR(alloc->mem_type)) { + ret = PTR_ERR(alloc->mem_type); + goto resolve_mem_type_failed; + } + + alloc->allocator_hndl = alloc->mem_type->allocator_api.alloc( + alloc->mem_type->allocator_instance, size); + if (IS_ERR(alloc->allocator_hndl)) { + ret = PTR_ERR(alloc->allocator_hndl); + goto allocator_failed; + } + + alloc->paddr = alloc->mem_type->allocator_api.get_alloc_paddr( + alloc->allocator_hndl); + alloc->size = alloc->mem_type->allocator_api.get_alloc_size( + alloc->allocator_hndl); + cach_init_buf(&alloc->cach_buf, alloc->flags, alloc->size); ret = kmap_alloc(alloc); if (ret < 0) goto kmap_alloc_failed; cach_set_buf_addrs(&alloc->cach_buf, alloc->kaddr, alloc->paddr); + list_add_tail(&alloc->list, &alloc_list); + clear_alloc_mem(alloc); goto out; kmap_alloc_failed: - __hwmem_release(alloc); +allocator_failed: +resolve_mem_type_failed: + destroy_alloc(alloc); +alloc_alloc_failed: alloc = ERR_PTR(ret); -split_alloc_failed: -no_slot: out: mutex_unlock(&lock); @@ -437,7 +292,7 @@ void hwmem_release(struct hwmem_alloc *alloc) mutex_lock(&lock); if (atomic_dec_and_test(&alloc->ref_cnt)) - __hwmem_release(alloc); + destroy_alloc(alloc); mutex_unlock(&lock); } @@ -457,7 +312,7 @@ int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access, EXPORT_SYMBOL(hwmem_set_domain); int hwmem_pin(struct hwmem_alloc *alloc, struct hwmem_mem_chunk *mem_chunks, - u32 *mem_chunks_length) + u32 *mem_chunks_length) { if (*mem_chunks_length < 1) { *mem_chunks_length = 1; @@ -615,7 +470,7 @@ void hwmem_get_info(struct hwmem_alloc *alloc, u32 *size, if (size != NULL) *size = alloc->size; if (mem_type != NULL) - *mem_type = HWMEM_MEM_CONTIGUOUS_SYS; + *mem_type = alloc->mem_type->id; if (access != NULL) *access = get_access(alloc); @@ -623,7 +478,7 @@ void hwmem_get_info(struct hwmem_alloc *alloc, u32 *size, } EXPORT_SYMBOL(hwmem_get_info); -int hwmem_get_name(struct hwmem_alloc *alloc) +s32 hwmem_get_name(struct hwmem_alloc *alloc) { int ret = 0, name; @@ -647,11 +502,18 @@ int hwmem_get_name(struct hwmem_alloc *alloc) goto get_id_failed; } + if (name > S32_MAX) { + ret = -ENOMSG; + goto overflow; + } + alloc->name = name; ret = name; goto out; +overflow: + idr_remove(&global_idr, name); get_id_failed: pre_get_id_failed: @@ -688,29 +550,62 @@ EXPORT_SYMBOL(hwmem_resolve_by_name); /* Debug */ +#ifdef CONFIG_DEBUG_FS + +static int debugfs_allocs_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos); + +static const struct file_operations debugfs_allocs_fops = { + .owner = THIS_MODULE, + .read = debugfs_allocs_read, +}; + static int print_alloc(struct hwmem_alloc *alloc, char **buf, size_t buf_size) { int ret; - - if (buf_size < 134) - return -EINVAL; - - ret = sprintf(*buf, "paddr: %#10x\tsize: %10u\tref cnt: %2i\t" - "name: %#10x\tflags: %#4x\t$ settings: %#4x\t" - "def acc: %#3x\n", alloc->paddr, alloc->size, - atomic_read(&alloc->ref_cnt), alloc->name, - alloc->flags, alloc->cach_buf.cache_settings, - alloc->default_access); - if (ret < 0) - return -ENOMSG; + char creator[KSYM_SYMBOL_LEN]; + int i; + + if (sprint_symbol(creator, (unsigned long)alloc->creator) < 0) + creator[0] = '\0'; + + for (i = 0; i < 2; i++) { + size_t buf_size_l; + if (i == 0) + buf_size_l = 0; + else + buf_size_l = buf_size; + + ret = snprintf(*buf, buf_size_l, + "%#x\n" + "\tSize: %u\n" + "\tMemory type: %u\n" + "\tName: %#x\n" + "\tReference count: %i\n" + "\tAllocation flags: %#x\n" + "\t$ settings: %#x\n" + "\tDefault access: %#x\n" + "\tPhysical address: %#x\n" + "\tKernel virtual address: %#x\n" + "\tCreator: %s\n" + "\tCreator thread group id: %u\n", + (unsigned int)alloc, alloc->size, alloc->mem_type->id, + alloc->name, atomic_read(&alloc->ref_cnt), + alloc->flags, alloc->cach_buf.cache_settings, + alloc->default_access, alloc->paddr, + (unsigned int)alloc->kaddr, creator, + alloc->creator_tgid); + if (ret < 0) + return -ENOMSG; + else if (ret + 1 > buf_size) + return -EINVAL; + } *buf += ret; return 0; } -#ifdef CONFIG_DEBUG_FS - static int debugfs_allocs_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos) { @@ -721,12 +616,13 @@ static int debugfs_allocs_read(struct file *file, char __user *buf, */ int ret; + size_t i = 0; struct hwmem_alloc *curr_alloc; char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); char *local_buf_pos = local_buf; size_t available_space = min((size_t)PAGE_SIZE, count); /* private_data is intialized to NULL in open which I assume is 0. */ - u32 *curr_pos = (u32 *)&file->private_data; + void **curr_pos = &file->private_data; size_t bytes_read; if (local_buf == NULL) @@ -735,9 +631,7 @@ static int debugfs_allocs_read(struct file *file, char __user *buf, mutex_lock(&lock); list_for_each_entry(curr_alloc, &alloc_list, list) { - u32 alloc_offset = get_alloc_offset(curr_alloc); - - if (alloc_offset < *curr_pos) + if (i++ < (size_t)*curr_pos) continue; ret = print_alloc(curr_alloc, &local_buf_pos, available_space - @@ -747,7 +641,7 @@ static int debugfs_allocs_read(struct file *file, char __user *buf, else if (ret < 0) goto out; - *curr_pos = alloc_offset + 1; + *curr_pos = (void *)i; } bytes_read = (size_t)(local_buf_pos - local_buf); @@ -779,39 +673,17 @@ static void init_debugfs(void) /* Module */ extern int hwmem_ioctl_init(void); -extern void hwmem_ioctl_exit(void); static int __devinit hwmem_probe(struct platform_device *pdev) { - int ret = 0; - struct hwmem_platform_data *platform_data = pdev->dev.platform_data; - - if (sizeof(int) != 4 || sizeof(phys_addr_t) < 4 || - sizeof(void *) < 4 || sizeof(size_t) != 4) { - dev_err(&pdev->dev, "sizeof(int) != 4 || sizeof(phys_addr_t)" - " < 4 || sizeof(void *) < 4 || sizeof(size_t) !=" - " 4\n"); - return -ENOMSG; - } + int ret; - if (hwdev || platform_data->size == 0 || - platform_data->start != PAGE_ALIGN(platform_data->start) || - platform_data->size != PAGE_ALIGN(platform_data->size)) { - dev_err(&pdev->dev, "hwdev || platform_data->size == 0 ||" - "platform_data->start !=" - " PAGE_ALIGN(platform_data->start) ||" - "platform_data->size !=" - " PAGE_ALIGN(platform_data->size)\n"); + if (hwdev) { + dev_err(&pdev->dev, "Probed multiple times\n"); return -EINVAL; } hwdev = pdev; - hwmem_paddr = platform_data->start; - hwmem_size = platform_data->size; - - ret = alloc_kaddrs(); - if (ret < 0) - goto alloc_kaddrs_failed; /* * No need to flush the caches here. If we can keep track of the cache @@ -820,32 +692,18 @@ static int __devinit hwmem_probe(struct platform_device *pdev) * in the caches. */ - ret = init_alloc_list(); - if (ret < 0) - goto init_alloc_list_failed; - ret = hwmem_ioctl_init(); - if (ret) - goto ioctl_init_failed; + if (ret < 0) + dev_warn(&pdev->dev, "Failed to start hwmem-ioctl, continuing" + " anyway\n"); #ifdef CONFIG_DEBUG_FS init_debugfs(); #endif - dev_info(&pdev->dev, "Hwmem probed, device contains %#x bytes\n", - hwmem_size); - - goto out; - -ioctl_init_failed: - clean_alloc_list(); -init_alloc_list_failed: - free_kaddrs(); -alloc_kaddrs_failed: - hwdev = NULL; + dev_info(&pdev->dev, "Probed OK\n"); -out: - return ret; + return 0; } static struct platform_driver hwmem_driver = { diff --git a/include/linux/hwmem.h b/include/linux/hwmem.h index a2eb91d9d9a..ba4c116f4b9 100644 --- a/include/linux/hwmem.h +++ b/include/linux/hwmem.h @@ -569,15 +569,25 @@ s32 hwmem_get_name(struct hwmem_alloc *alloc); */ struct hwmem_alloc *hwmem_resolve_by_name(s32 name); -/* Internal */ +/* Integration */ + +struct hwmem_allocator_api { + void *(*alloc)(void *instance, size_t size); + void (*free)(void *instance, void *alloc); + phys_addr_t (*get_alloc_paddr)(void *alloc); + void *(*get_alloc_kaddr)(void *instance, void *alloc); + size_t (*get_alloc_size)(void *alloc); +}; -struct hwmem_platform_data { - /* Physical address of memory region */ - u32 start; - /* Size of memory region */ - u32 size; +struct hwmem_mem_type_struct { + enum hwmem_mem_type id; + struct hwmem_allocator_api allocator_api; + void *allocator_instance; }; +extern struct hwmem_mem_type_struct *hwmem_mem_types; +extern unsigned int hwmem_num_mem_types; + #endif #endif /* _HWMEM_H_ */ |