From e929ec194c1ebec41672965492f4e5de857f3909 Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Thu, 20 Oct 2011 10:37:40 +0200 Subject: hwmem: Add hardware memory driver to kernel This driver provides a way to allocate contiguous system memory which can be used by hardware. Signed-off-by: Robert Marklund --- drivers/misc/hwmem/Makefile | 3 + drivers/misc/hwmem/cache_handler.c | 507 ++++++++++++++++++++++ drivers/misc/hwmem/cache_handler.h | 61 +++ drivers/misc/hwmem/hwmem-ioctl.c | 477 +++++++++++++++++++++ drivers/misc/hwmem/hwmem-main.c | 833 +++++++++++++++++++++++++++++++++++++ include/linux/hwmem.h | 519 +++++++++++++++++++++++ 6 files changed, 2400 insertions(+) create mode 100644 drivers/misc/hwmem/Makefile create mode 100644 drivers/misc/hwmem/cache_handler.c create mode 100644 drivers/misc/hwmem/cache_handler.h create mode 100644 drivers/misc/hwmem/hwmem-ioctl.c create mode 100644 drivers/misc/hwmem/hwmem-main.c create mode 100644 include/linux/hwmem.h diff --git a/drivers/misc/hwmem/Makefile b/drivers/misc/hwmem/Makefile new file mode 100644 index 00000000000..18da2ad7817 --- /dev/null +++ b/drivers/misc/hwmem/Makefile @@ -0,0 +1,3 @@ +hwmem-objs := hwmem-main.o hwmem-ioctl.o cache_handler.o + +obj-$(CONFIG_HWMEM) += hwmem.o diff --git a/drivers/misc/hwmem/cache_handler.c b/drivers/misc/hwmem/cache_handler.c new file mode 100644 index 00000000000..b313da36aa4 --- /dev/null +++ b/drivers/misc/hwmem/cache_handler.c @@ -0,0 +1,507 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Cache handler + * + * Author: Johan Mossberg + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include + +#include + +#include + +#include "cache_handler.h" + +#define U32_MAX (~(u32)0) + +enum hwmem_alloc_flags cachi_get_cache_settings( + enum hwmem_alloc_flags requested_cache_settings); +void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings, + pgprot_t *pgprot); + +static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access, + struct hwmem_region *region); +static void sync_buf_post_cpu(struct cach_buf *buf, + enum hwmem_access next_access, struct hwmem_region *next_region); + +static void invalidate_cpu_cache(struct cach_buf *buf, + struct cach_range *range_2b_used); +static void clean_cpu_cache(struct cach_buf *buf, + struct cach_range *range_2b_used); +static void flush_cpu_cache(struct cach_buf *buf, + struct cach_range *range_2b_used); + +static void null_range(struct cach_range *range); +static void expand_range(struct cach_range *range, + struct cach_range *range_2_add); +/* + * Expands range to one of enclosing_range's two edges. The function will + * choose which of enclosing_range's edges to expand range to in such a + * way that the size of range is minimized. range must be located inside + * enclosing_range. + */ +static void expand_range_2_edge(struct cach_range *range, + struct cach_range *enclosing_range); +static void shrink_range(struct cach_range *range, + struct cach_range *range_2_remove); +static bool is_non_empty_range(struct cach_range *range); +static void intersect_range(struct cach_range *range_1, + struct cach_range *range_2, struct cach_range *intersection); +/* Align_up restrictions apply here to */ +static void align_range_up(struct cach_range *range, u32 alignment); +static u32 range_length(struct cach_range *range); +static void region_2_range(struct hwmem_region *region, u32 buffer_size, + struct cach_range *range); + +static void *offset_2_vaddr(struct cach_buf *buf, u32 offset); +static u32 offset_2_paddr(struct cach_buf *buf, u32 offset); + +/* Saturates, might return unaligned values when that happens */ +static u32 align_up(u32 value, u32 alignment); +static u32 align_down(u32 value, u32 alignment); + +static bool is_wb(enum hwmem_alloc_flags cache_settings); +static bool is_inner_only(enum hwmem_alloc_flags cache_settings); + +/* + * Exported functions + */ + +void cach_init_buf(struct cach_buf *buf, enum hwmem_alloc_flags cache_settings, + u32 size) +{ + buf->vstart = NULL; + buf->pstart = 0; + buf->size = size; + + buf->cache_settings = cachi_get_cache_settings(cache_settings); +} + +void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr) +{ + bool tmp; + + buf->vstart = vaddr; + buf->pstart = paddr; + + if (buf->cache_settings & HWMEM_ALLOC_CACHED) { + /* + * Keep whatever is in the cache. This way we avoid an + * unnecessary synch if CPU is the first user. + */ + buf->range_in_cpu_cache.start = 0; + buf->range_in_cpu_cache.end = buf->size; + align_range_up(&buf->range_in_cpu_cache, + get_dcache_granularity()); + buf->range_dirty_in_cpu_cache.start = 0; + buf->range_dirty_in_cpu_cache.end = buf->size; + align_range_up(&buf->range_dirty_in_cpu_cache, + get_dcache_granularity()); + } else { + flush_cpu_dcache(buf->vstart, buf->pstart, buf->size, false, + &tmp); + drain_cpu_write_buf(); + + null_range(&buf->range_in_cpu_cache); + null_range(&buf->range_dirty_in_cpu_cache); + } + null_range(&buf->range_invalid_in_cpu_cache); +} + +void cach_set_pgprot_cache_options(struct cach_buf *buf, pgprot_t *pgprot) +{ + cachi_set_pgprot_cache_options(buf->cache_settings, pgprot); +} + +void cach_set_domain(struct cach_buf *buf, enum hwmem_access access, + enum hwmem_domain domain, struct hwmem_region *region) +{ + struct hwmem_region *__region; + struct hwmem_region full_region; + + if (region != NULL) + __region = region; + else { + full_region.offset = 0; + full_region.count = 1; + full_region.start = 0; + full_region.end = buf->size; + full_region.size = buf->size; + + __region = &full_region; + } + + switch (domain) { + case HWMEM_DOMAIN_SYNC: + sync_buf_post_cpu(buf, access, __region); + + break; + + case HWMEM_DOMAIN_CPU: + sync_buf_pre_cpu(buf, access, __region); + + break; + } +} + +/* + * Local functions + */ + +enum hwmem_alloc_flags __attribute__((weak)) cachi_get_cache_settings( + enum hwmem_alloc_flags requested_cache_settings) +{ + enum hwmem_alloc_flags cache_settings = + requested_cache_settings & ~HWMEM_ALLOC_CACHE_HINT_MASK; + + if ((cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED) { + /* + * If the alloc is cached we'll use the default setting. We + * don't know what this setting is so we have to assume the + * worst case, ie write back inner and outer. + */ + cache_settings |= HWMEM_ALLOC_CACHE_HINT_WB; + } + + return cache_settings; +} + +void __attribute__((weak)) cachi_set_pgprot_cache_options( + enum hwmem_alloc_flags cache_settings, pgprot_t *pgprot) +{ + if ((cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED) + *pgprot = *pgprot; /* To silence compiler and checkpatch */ + else if (cache_settings & HWMEM_ALLOC_BUFFERED) + *pgprot = pgprot_writecombine(*pgprot); + else + *pgprot = pgprot_noncached(*pgprot); +} + +bool __attribute__((weak)) speculative_data_prefetch(void) +{ + /* We don't know so we go with the safe alternative */ + return true; +} + +static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access, + struct hwmem_region *region) +{ + bool write = access & HWMEM_ACCESS_WRITE; + bool read = access & HWMEM_ACCESS_READ; + + if (!write && !read) + return; + + if ((buf->cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED) { + struct cach_range region_range; + + region_2_range(region, buf->size, ®ion_range); + + if (read || (write && is_wb(buf->cache_settings))) + /* Perform defered invalidates */ + invalidate_cpu_cache(buf, ®ion_range); + if (read) + expand_range(&buf->range_in_cpu_cache, ®ion_range); + if (write && is_wb(buf->cache_settings)) { + expand_range(&buf->range_in_cpu_cache, ®ion_range); + expand_range(&buf->range_dirty_in_cpu_cache, + ®ion_range); + } + } + if (buf->cache_settings & HWMEM_ALLOC_BUFFERED) { + if (write) + buf->in_cpu_write_buf = true; + } +} + +static void sync_buf_post_cpu(struct cach_buf *buf, + enum hwmem_access next_access, struct hwmem_region *next_region) +{ + bool write = next_access & HWMEM_ACCESS_WRITE; + bool read = next_access & HWMEM_ACCESS_READ; + struct cach_range region_range; + + if (!write && !read) + return; + + region_2_range(next_region, buf->size, ®ion_range); + + if (write) { + if (speculative_data_prefetch()) { + /* Defer invalidate */ + struct cach_range intersection; + + intersect_range(&buf->range_in_cpu_cache, + ®ion_range, &intersection); + + expand_range(&buf->range_invalid_in_cpu_cache, + &intersection); + + clean_cpu_cache(buf, ®ion_range); + } else + flush_cpu_cache(buf, ®ion_range); + } + if (read) + clean_cpu_cache(buf, ®ion_range); + + if (buf->in_cpu_write_buf) { + drain_cpu_write_buf(); + + buf->in_cpu_write_buf = false; + } +} + +static void invalidate_cpu_cache(struct cach_buf *buf, struct cach_range *range) +{ + struct cach_range intersection; + + intersect_range(&buf->range_invalid_in_cpu_cache, range, + &intersection); + if (is_non_empty_range(&intersection)) { + bool flushed_everything; + + expand_range_2_edge(&intersection, + &buf->range_invalid_in_cpu_cache); + + /* + * Cache handler never uses invalidate to discard data in the + * cache so we can use flush instead which is considerably + * faster for large buffers. + */ + flush_cpu_dcache( + offset_2_vaddr(buf, intersection.start), + offset_2_paddr(buf, intersection.start), + range_length(&intersection), + is_inner_only(buf->cache_settings), + &flushed_everything); + + if (flushed_everything) { + null_range(&buf->range_invalid_in_cpu_cache); + null_range(&buf->range_dirty_in_cpu_cache); + } else + /* + * No need to shrink range_in_cpu_cache as invalidate + * is only used when we can't keep track of what's in + * the CPU cache. + */ + shrink_range(&buf->range_invalid_in_cpu_cache, + &intersection); + } +} + +static void clean_cpu_cache(struct cach_buf *buf, struct cach_range *range) +{ + struct cach_range intersection; + + intersect_range(&buf->range_dirty_in_cpu_cache, range, &intersection); + if (is_non_empty_range(&intersection)) { + bool cleaned_everything; + + expand_range_2_edge(&intersection, + &buf->range_dirty_in_cpu_cache); + + clean_cpu_dcache( + offset_2_vaddr(buf, intersection.start), + offset_2_paddr(buf, intersection.start), + range_length(&intersection), + is_inner_only(buf->cache_settings), + &cleaned_everything); + + if (cleaned_everything) + null_range(&buf->range_dirty_in_cpu_cache); + else + shrink_range(&buf->range_dirty_in_cpu_cache, + &intersection); + } +} + +static void flush_cpu_cache(struct cach_buf *buf, struct cach_range *range) +{ + struct cach_range intersection; + + intersect_range(&buf->range_in_cpu_cache, range, &intersection); + if (is_non_empty_range(&intersection)) { + bool flushed_everything; + + expand_range_2_edge(&intersection, &buf->range_in_cpu_cache); + + flush_cpu_dcache( + offset_2_vaddr(buf, intersection.start), + offset_2_paddr(buf, intersection.start), + range_length(&intersection), + is_inner_only(buf->cache_settings), + &flushed_everything); + + if (flushed_everything) { + if (!speculative_data_prefetch()) + null_range(&buf->range_in_cpu_cache); + null_range(&buf->range_dirty_in_cpu_cache); + null_range(&buf->range_invalid_in_cpu_cache); + } else { + if (!speculative_data_prefetch()) + shrink_range(&buf->range_in_cpu_cache, + &intersection); + shrink_range(&buf->range_dirty_in_cpu_cache, + &intersection); + shrink_range(&buf->range_invalid_in_cpu_cache, + &intersection); + } + } +} + +static void null_range(struct cach_range *range) +{ + range->start = U32_MAX; + range->end = 0; +} + +static void expand_range(struct cach_range *range, + struct cach_range *range_2_add) +{ + range->start = min(range->start, range_2_add->start); + range->end = max(range->end, range_2_add->end); +} + +/* + * Expands range to one of enclosing_range's two edges. The function will + * choose which of enclosing_range's edges to expand range to in such a + * way that the size of range is minimized. range must be located inside + * enclosing_range. + */ +static void expand_range_2_edge(struct cach_range *range, + struct cach_range *enclosing_range) +{ + u32 space_on_low_side = range->start - enclosing_range->start; + u32 space_on_high_side = enclosing_range->end - range->end; + + if (space_on_low_side < space_on_high_side) + range->start = enclosing_range->start; + else + range->end = enclosing_range->end; +} + +static void shrink_range(struct cach_range *range, + struct cach_range *range_2_remove) +{ + if (range_2_remove->start > range->start) + range->end = min(range->end, range_2_remove->start); + else + range->start = max(range->start, range_2_remove->end); + + if (range->start >= range->end) + null_range(range); +} + +static bool is_non_empty_range(struct cach_range *range) +{ + return range->end > range->start; +} + +static void intersect_range(struct cach_range *range_1, + struct cach_range *range_2, struct cach_range *intersection) +{ + intersection->start = max(range_1->start, range_2->start); + intersection->end = min(range_1->end, range_2->end); + + if (intersection->start >= intersection->end) + null_range(intersection); +} + +/* Align_up restrictions apply here to */ +static void align_range_up(struct cach_range *range, u32 alignment) +{ + if (!is_non_empty_range(range)) + return; + + range->start = align_down(range->start, alignment); + range->end = align_up(range->end, alignment); +} + +static u32 range_length(struct cach_range *range) +{ + if (is_non_empty_range(range)) + return range->end - range->start; + else + return 0; +} + +static void region_2_range(struct hwmem_region *region, u32 buffer_size, + struct cach_range *range) +{ + /* + * We don't care about invalid regions, instead we limit the region's + * range to the buffer's range. This should work good enough, worst + * case we synch the entire buffer when we get an invalid region which + * is acceptable. + */ + range->start = region->offset + region->start; + range->end = min(region->offset + (region->count * region->size) - + (region->size - region->end), buffer_size); + if (range->start >= range->end) { + null_range(range); + return; + } + + align_range_up(range, get_dcache_granularity()); +} + +static void *offset_2_vaddr(struct cach_buf *buf, u32 offset) +{ + return (void *)((u32)buf->vstart + offset); +} + +static u32 offset_2_paddr(struct cach_buf *buf, u32 offset) +{ + return buf->pstart + offset; +} + +/* Saturates, might return unaligned values when that happens */ +static u32 align_up(u32 value, u32 alignment) +{ + u32 remainder = value % alignment; + u32 value_2_add; + + if (remainder == 0) + return value; + + value_2_add = alignment - remainder; + + if (value_2_add > U32_MAX - value) /* Will overflow */ + return U32_MAX; + + return value + value_2_add; +} + +static u32 align_down(u32 value, u32 alignment) +{ + u32 remainder = value % alignment; + if (remainder == 0) + return value; + + return value - remainder; +} + +static bool is_wb(enum hwmem_alloc_flags cache_settings) +{ + u32 cache_hints = cache_settings & HWMEM_ALLOC_CACHE_HINT_MASK; + if (cache_hints == HWMEM_ALLOC_CACHE_HINT_WB || + cache_hints == HWMEM_ALLOC_CACHE_HINT_WB_INNER) + return true; + else + return false; +} + +static bool is_inner_only(enum hwmem_alloc_flags cache_settings) +{ + u32 cache_hints = cache_settings & HWMEM_ALLOC_CACHE_HINT_MASK; + if (cache_hints == HWMEM_ALLOC_CACHE_HINT_WT_INNER || + cache_hints == HWMEM_ALLOC_CACHE_HINT_WB_INNER) + return true; + else + return false; +} diff --git a/drivers/misc/hwmem/cache_handler.h b/drivers/misc/hwmem/cache_handler.h new file mode 100644 index 00000000000..792105196fa --- /dev/null +++ b/drivers/misc/hwmem/cache_handler.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Cache handler + * + * Author: Johan Mossberg + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +/* + * Cache handler can not handle simultaneous execution! The caller has to + * ensure such a situation does not occur. + */ + +#ifndef _CACHE_HANDLER_H_ +#define _CACHE_HANDLER_H_ + +#include +#include + +/* + * To not have to double all datatypes we've used hwmem datatypes. If someone + * want's to use cache handler but not hwmem then we'll have to define our own + * datatypes. + */ + +struct cach_range { + u32 start; /* Inclusive */ + u32 end; /* Exclusive */ +}; + +/* + * Internal, do not touch! + */ +struct cach_buf { + void *vstart; + u32 pstart; + u32 size; + + /* Remaining hints are active */ + enum hwmem_alloc_flags cache_settings; + + bool in_cpu_write_buf; + struct cach_range range_in_cpu_cache; + struct cach_range range_dirty_in_cpu_cache; + struct cach_range range_invalid_in_cpu_cache; +}; + +void cach_init_buf(struct cach_buf *buf, + enum hwmem_alloc_flags cache_settings, u32 size); + +void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr); + +void cach_set_pgprot_cache_options(struct cach_buf *buf, pgprot_t *pgprot); + +void cach_set_domain(struct cach_buf *buf, enum hwmem_access access, + enum hwmem_domain domain, struct hwmem_region *region); + +#endif /* _CACHE_HANDLER_H_ */ diff --git a/drivers/misc/hwmem/hwmem-ioctl.c b/drivers/misc/hwmem/hwmem-ioctl.c new file mode 100644 index 00000000000..8759c395147 --- /dev/null +++ b/drivers/misc/hwmem/hwmem-ioctl.c @@ -0,0 +1,477 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * + * Hardware memory driver, hwmem + * + * Author: Marcus Lorentzon + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * TODO: + * Count pin unpin at this level to ensure applications can't interfer + * with each other. + */ + +static int hwmem_open(struct inode *inode, struct file *file); +static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma); +static int hwmem_release_fop(struct inode *inode, struct file *file); +static long hwmem_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +static unsigned long hwmem_get_unmapped_area(struct file *file, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags); + +static const struct file_operations hwmem_fops = { + .open = hwmem_open, + .mmap = hwmem_ioctl_mmap, + .unlocked_ioctl = hwmem_ioctl, + .release = hwmem_release_fop, + .get_unmapped_area = hwmem_get_unmapped_area, +}; + +static struct miscdevice hwmem_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hwmem", + .fops = &hwmem_fops, +}; + +struct hwmem_file { + struct mutex lock; + struct idr idr; /* id -> struct hwmem_alloc*, ref counted */ + struct hwmem_alloc *fd_alloc; /* Ref counted */ +}; + +static int create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc) +{ + int id, ret; + + while (true) { + if (idr_pre_get(&hwfile->idr, GFP_KERNEL) == 0) + return -ENOMEM; + + ret = idr_get_new_above(&hwfile->idr, alloc, 1, &id); + if (ret == 0) + break; + else if (ret != -EAGAIN) + return -ENOMEM; + } + + /* + * IDR always returns the lowest free id so the only way we can fail + * here is if hwfile has 2^19 - 1 (524287) allocations. + */ + if (id >= 1 << (31 - PAGE_SHIFT)) { + dev_err(hwmem_device.this_device, "Out of IDs!\n"); + idr_remove(&hwfile->idr, id); + return -ENOMSG; + } + + return id << PAGE_SHIFT; +} + +static void remove_id(struct hwmem_file *hwfile, int id) +{ + idr_remove(&hwfile->idr, id >> PAGE_SHIFT); +} + +static struct hwmem_alloc *resolve_id(struct hwmem_file *hwfile, int id) +{ + struct hwmem_alloc *alloc; + + alloc = id ? idr_find(&hwfile->idr, id >> PAGE_SHIFT) : + hwfile->fd_alloc; + if (alloc == NULL) + alloc = ERR_PTR(-EINVAL); + + return alloc; +} + +static int alloc(struct hwmem_file *hwfile, struct hwmem_alloc_request *req) +{ + int ret = 0; + struct hwmem_alloc *alloc; + + alloc = hwmem_alloc(req->size, req->flags, req->default_access, + req->mem_type); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + ret = create_id(hwfile, alloc); + if (ret < 0) + hwmem_release(alloc); + + return ret; +} + +static int alloc_fd(struct hwmem_file *hwfile, struct hwmem_alloc_request *req) +{ + struct hwmem_alloc *alloc; + + if (hwfile->fd_alloc) + return -EBUSY; + + alloc = hwmem_alloc(req->size, req->flags, req->default_access, + req->mem_type); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + hwfile->fd_alloc = alloc; + + return 0; +} + +static int release(struct hwmem_file *hwfile, s32 id) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + remove_id(hwfile, id); + hwmem_release(alloc); + + return 0; +} + +static int hwmem_ioctl_set_domain(struct hwmem_file *hwfile, + struct hwmem_set_domain_request *req) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + return hwmem_set_domain(alloc, req->access, req->domain, &req->region); +} + +static int pin(struct hwmem_file *hwfile, struct hwmem_pin_request *req) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + return hwmem_pin(alloc, &req->phys_addr, req->scattered_addrs); +} + +static int unpin(struct hwmem_file *hwfile, s32 id) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + hwmem_unpin(alloc); + + return 0; +} + +static int set_access(struct hwmem_file *hwfile, + struct hwmem_set_access_request *req) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + return hwmem_set_access(alloc, req->access, req->pid); +} + +static int get_info(struct hwmem_file *hwfile, + struct hwmem_get_info_request *req) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + hwmem_get_info(alloc, &req->size, &req->mem_type, &req->access); + + return 0; +} + +static int export(struct hwmem_file *hwfile, s32 id) +{ + int ret; + struct hwmem_alloc *alloc; + + uint32_t size; + enum hwmem_mem_type mem_type; + enum hwmem_access access; + + alloc = resolve_id(hwfile, id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + /* + * The user could be about to send the buffer to a driver but + * there is a chance the current thread group don't have import rights + * if it gained access to the buffer via a inter-process fd transfer + * (fork, Android binder), if this is the case the driver will not be + * able to resolve the buffer name. To avoid this situation we give the + * current thread group import rights. This will not breach the + * security as the process already has access to the buffer (otherwise + * it would not be able to get here). + */ + hwmem_get_info(alloc, &size, &mem_type, &access); + + ret = hwmem_set_access(alloc, (access | HWMEM_ACCESS_IMPORT), + task_tgid_nr(current)); + if (ret < 0) + goto error; + + return hwmem_get_name(alloc); + +error: + return ret; +} + +static int import(struct hwmem_file *hwfile, s32 name) +{ + int ret = 0; + struct hwmem_alloc *alloc; + + uint32_t size; + enum hwmem_mem_type mem_type; + enum hwmem_access access; + + alloc = hwmem_resolve_by_name(name); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + /* Check access permissions for process */ + hwmem_get_info(alloc, &size, &mem_type, &access); + + if (!(access & HWMEM_ACCESS_IMPORT)) { + ret = -EPERM; + goto error; + } + + ret = create_id(hwfile, alloc); + if (ret < 0) + hwmem_release(alloc); + +error: + return ret; +} + +static int import_fd(struct hwmem_file *hwfile, s32 name) +{ + struct hwmem_alloc *alloc; + + if (hwfile->fd_alloc) + return -EBUSY; + + alloc = hwmem_resolve_by_name(name); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + hwfile->fd_alloc = alloc; + + return 0; +} + +static int hwmem_open(struct inode *inode, struct file *file) +{ + struct hwmem_file *hwfile; + + hwfile = kzalloc(sizeof(struct hwmem_file), GFP_KERNEL); + if (hwfile == NULL) + return -ENOMEM; + + idr_init(&hwfile->idr); + mutex_init(&hwfile->lock); + file->private_data = hwfile; + + return 0; +} + +static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma) +{ + int ret; + struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data; + struct hwmem_alloc *alloc; + + mutex_lock(&hwfile->lock); + + alloc = resolve_id(hwfile, vma->vm_pgoff << PAGE_SHIFT); + if (IS_ERR(alloc)) { + ret = PTR_ERR(alloc); + goto out; + } + + ret = hwmem_mmap(alloc, vma); + +out: + mutex_unlock(&hwfile->lock); + + return ret; +} + +static int hwmem_release_idr_for_each_wrapper(int id, void *ptr, void *data) +{ + hwmem_release((struct hwmem_alloc *)ptr); + + return 0; +} + +static int hwmem_release_fop(struct inode *inode, struct file *file) +{ + struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data; + + idr_for_each(&hwfile->idr, hwmem_release_idr_for_each_wrapper, NULL); + idr_remove_all(&hwfile->idr); + idr_destroy(&hwfile->idr); + + if (hwfile->fd_alloc) + hwmem_release(hwfile->fd_alloc); + + mutex_destroy(&hwfile->lock); + + kfree(hwfile); + + return 0; +} + +static long hwmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret = -ENOSYS; + struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data; + + mutex_lock(&hwfile->lock); + + switch (cmd) { + case HWMEM_ALLOC_IOC: + { + struct hwmem_alloc_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_alloc_request))) + ret = -EFAULT; + else + ret = alloc(hwfile, &req); + } + break; + case HWMEM_ALLOC_FD_IOC: + { + struct hwmem_alloc_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_alloc_request))) + ret = -EFAULT; + else + ret = alloc_fd(hwfile, &req); + } + break; + case HWMEM_RELEASE_IOC: + ret = release(hwfile, (s32)arg); + break; + case HWMEM_SET_DOMAIN_IOC: + { + struct hwmem_set_domain_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_set_domain_request))) + ret = -EFAULT; + else + ret = hwmem_ioctl_set_domain(hwfile, &req); + } + break; + case HWMEM_PIN_IOC: + { + struct hwmem_pin_request req; + /* + * TODO: Validate and copy scattered_addrs. Not a + * problem right now as it's never used. + */ + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_pin_request))) + ret = -EFAULT; + else + ret = pin(hwfile, &req); + if (ret == 0 && copy_to_user((void __user *)arg, &req, + sizeof(struct hwmem_pin_request))) + ret = -EFAULT; + } + break; + case HWMEM_UNPIN_IOC: + ret = unpin(hwfile, (s32)arg); + break; + case HWMEM_SET_ACCESS_IOC: + { + struct hwmem_set_access_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_set_access_request))) + ret = -EFAULT; + else + ret = set_access(hwfile, &req); + } + break; + case HWMEM_GET_INFO_IOC: + { + struct hwmem_get_info_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_get_info_request))) + ret = -EFAULT; + else + ret = get_info(hwfile, &req); + if (ret == 0 && copy_to_user((void __user *)arg, &req, + sizeof(struct hwmem_get_info_request))) + ret = -EFAULT; + } + break; + case HWMEM_EXPORT_IOC: + ret = export(hwfile, (s32)arg); + break; + case HWMEM_IMPORT_IOC: + ret = import(hwfile, (s32)arg); + break; + case HWMEM_IMPORT_FD_IOC: + ret = import_fd(hwfile, (s32)arg); + break; + } + + mutex_unlock(&hwfile->lock); + + return ret; +} + +static unsigned long hwmem_get_unmapped_area(struct file *file, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + /* + * pgoff will not be valid as it contains a buffer id (right shifted + * PAGE_SHIFT bits). To not confuse get_unmapped_area we'll not pass + * on file or pgoff. + */ + return current->mm->get_unmapped_area(NULL, addr, len, 0, flags); +} + +int __init hwmem_ioctl_init(void) +{ + return misc_register(&hwmem_device); +} + +void __exit hwmem_ioctl_exit(void) +{ + misc_deregister(&hwmem_device); +} diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c new file mode 100644 index 00000000000..0010e45ff52 --- /dev/null +++ b/drivers/misc/hwmem/hwmem-main.c @@ -0,0 +1,833 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * + * Hardware memory driver, hwmem + * + * Author: Marcus Lorentzon + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cache_handler.h" + +struct hwmem_alloc_threadg_info { + struct list_head list; + + struct pid *threadg_pid; /* Ref counted */ + + enum hwmem_access access; +}; + +struct hwmem_alloc { + struct list_head list; + + atomic_t ref_cnt; + enum hwmem_alloc_flags flags; + u32 paddr; + void *kaddr; + u32 size; + u32 name; + + /* Access control */ + enum hwmem_access default_access; + struct list_head threadg_info_list; + + /* Cache handling */ + struct cach_buf cach_buf; +}; + +static struct platform_device *hwdev; + +static u32 hwmem_paddr; +static u32 hwmem_size; + +static LIST_HEAD(alloc_list); +static DEFINE_IDR(global_idr); +static DEFINE_MUTEX(lock); + +static void vm_open(struct vm_area_struct *vma); +static void vm_close(struct vm_area_struct *vma); +static struct vm_operations_struct vm_ops = { + .open = vm_open, + .close = vm_close, +}; + +#ifdef CONFIG_DEBUG_FS + +static int debugfs_allocs_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos); +static const struct file_operations debugfs_allocs_fops = { + .owner = THIS_MODULE, + .read = debugfs_allocs_read, +}; + +#endif /* #ifdef CONFIG_DEBUG_FS */ + +static void clean_alloc_list(void); +static void kunmap_alloc(struct hwmem_alloc *alloc); + +/* Helpers */ + +static u32 get_alloc_offset(struct hwmem_alloc *alloc) +{ + return alloc->paddr - hwmem_paddr; +} + +static void destroy_hwmem_alloc_threadg_info( + struct hwmem_alloc_threadg_info *info) +{ + if (info->threadg_pid) + put_pid(info->threadg_pid); + + kfree(info); +} + +static void clean_hwmem_alloc_threadg_info_list(struct hwmem_alloc *alloc) +{ + struct hwmem_alloc_threadg_info *info; + struct hwmem_alloc_threadg_info *tmp; + + list_for_each_entry_safe(info, tmp, &(alloc->threadg_info_list), list) { + list_del(&info->list); + destroy_hwmem_alloc_threadg_info(info); + } +} + +static enum hwmem_access get_access(struct hwmem_alloc *alloc) +{ + struct hwmem_alloc_threadg_info *info; + struct pid *my_pid; + bool found = false; + + my_pid = find_get_pid(task_tgid_nr(current)); + if (!my_pid) + return 0; + + list_for_each_entry(info, &(alloc->threadg_info_list), list) { + if (info->threadg_pid == my_pid) { + found = true; + break; + } + } + + put_pid(my_pid); + + if (found) + return info->access; + else + return alloc->default_access; +} + +static void clear_alloc_mem(struct hwmem_alloc *alloc) +{ + cach_set_domain(&alloc->cach_buf, HWMEM_ACCESS_WRITE, + HWMEM_DOMAIN_CPU, NULL); + + memset(alloc->kaddr, 0, alloc->size); +} + +static void clean_alloc(struct hwmem_alloc *alloc) +{ + if (alloc->name) { + idr_remove(&global_idr, alloc->name); + alloc->name = 0; + } + + alloc->flags = 0; + + clean_hwmem_alloc_threadg_info_list(alloc); + + kunmap_alloc(alloc); +} + +static void destroy_alloc(struct hwmem_alloc *alloc) +{ + clean_alloc(alloc); + + kfree(alloc); +} + +static void __hwmem_release(struct hwmem_alloc *alloc) +{ + struct hwmem_alloc *other; + + clean_alloc(alloc); + + other = list_entry(alloc->list.prev, struct hwmem_alloc, list); + if ((alloc->list.prev != &alloc_list) && + atomic_read(&other->ref_cnt) == 0) { + other->size += alloc->size; + list_del(&alloc->list); + destroy_alloc(alloc); + alloc = other; + } + other = list_entry(alloc->list.next, struct hwmem_alloc, list); + if ((alloc->list.next != &alloc_list) && + atomic_read(&other->ref_cnt) == 0) { + alloc->size += other->size; + list_del(&other->list); + destroy_alloc(other); + } +} + +static struct hwmem_alloc *find_free_alloc_bestfit(u32 size) +{ + u32 best_diff = ~0; + struct hwmem_alloc *alloc = NULL, *i; + + list_for_each_entry(i, &alloc_list, list) { + u32 diff = i->size - size; + if (atomic_read(&i->ref_cnt) > 0 || i->size < size) + continue; + if (diff < best_diff) { + alloc = i; + best_diff = diff; + } + } + + return alloc != NULL ? alloc : ERR_PTR(-ENOMEM); +} + +static struct hwmem_alloc *split_allocation(struct hwmem_alloc *alloc, + u32 new_alloc_size) +{ + struct hwmem_alloc *new_alloc; + + new_alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); + if (new_alloc == NULL) + return ERR_PTR(-ENOMEM); + + atomic_inc(&new_alloc->ref_cnt); + INIT_LIST_HEAD(&new_alloc->threadg_info_list); + new_alloc->paddr = alloc->paddr; + new_alloc->size = new_alloc_size; + alloc->size -= new_alloc_size; + alloc->paddr += new_alloc_size; + + list_add_tail(&new_alloc->list, &alloc->list); + + return new_alloc; +} + +static int init_alloc_list(void) +{ + /* + * Hack to not get any allocs that cross a 64MiB boundary as B2R2 can't + * handle that. + */ + int ret; + u32 curr_pos = hwmem_paddr; + u32 hwmem_end = hwmem_paddr + hwmem_size; + u32 next_64mib_boundary = (curr_pos + SZ_64M) & ~(SZ_64M - 1); + struct hwmem_alloc *alloc; + + if (PAGE_SIZE >= SZ_64M) { + dev_err(&hwdev->dev, "PAGE_SIZE >= SZ_64M\n"); + return -ENOMSG; + } + + while (next_64mib_boundary < hwmem_end) { + if (next_64mib_boundary - curr_pos > PAGE_SIZE) { + alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = next_64mib_boundary - curr_pos - + PAGE_SIZE; + INIT_LIST_HEAD(&alloc->threadg_info_list); + list_add_tail(&alloc->list, &alloc_list); + curr_pos = alloc->paddr + alloc->size; + } + + alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = PAGE_SIZE; + atomic_inc(&alloc->ref_cnt); + INIT_LIST_HEAD(&alloc->threadg_info_list); + list_add_tail(&alloc->list, &alloc_list); + curr_pos = alloc->paddr + alloc->size; + + next_64mib_boundary += SZ_64M; + } + + alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = hwmem_end - curr_pos; + INIT_LIST_HEAD(&alloc->threadg_info_list); + list_add_tail(&alloc->list, &alloc_list); + + return 0; + +error: + clean_alloc_list(); + + return ret; +} + +static void clean_alloc_list(void) +{ + while (list_empty(&alloc_list) == 0) { + struct hwmem_alloc *i = list_first_entry(&alloc_list, + struct hwmem_alloc, list); + + list_del(&i->list); + + destroy_alloc(i); + } +} + +static int kmap_alloc(struct hwmem_alloc *alloc) +{ + int ret; + pgprot_t pgprot; + + struct vm_struct *area = get_vm_area(alloc->size, VM_IOREMAP); + if (area == NULL) { + dev_info(&hwdev->dev, "Failed to allocate %u bytes virtual" + " memory", alloc->size); + return -ENOMSG; + } + + pgprot = PAGE_KERNEL; + cach_set_pgprot_cache_options(&alloc->cach_buf, &pgprot); + + ret = ioremap_page_range((unsigned long)area->addr, + (unsigned long)area->addr + alloc->size, alloc->paddr, pgprot); + if (ret < 0) { + dev_info(&hwdev->dev, "Failed to map %#x - %#x", alloc->paddr, + alloc->paddr + alloc->size); + goto failed_to_map; + } + + alloc->kaddr = area->addr; + + return 0; + +failed_to_map: + area = remove_vm_area(area->addr); + if (area == NULL) + dev_err(&hwdev->dev, + "Failed to unmap alloc, resource leak!\n"); + + kfree(area); + + return ret; +} + +static void kunmap_alloc(struct hwmem_alloc *alloc) +{ + struct vm_struct *area; + + if (alloc->kaddr == NULL) + return; + + area = remove_vm_area(alloc->kaddr); + if (area == NULL) { + dev_err(&hwdev->dev, + "Failed to unmap alloc, resource leak!\n"); + return; + } + + kfree(area); + + alloc->kaddr = NULL; +} + +/* HWMEM API */ + +struct hwmem_alloc *hwmem_alloc(u32 size, enum hwmem_alloc_flags flags, + enum hwmem_access def_access, enum hwmem_mem_type mem_type) +{ + struct hwmem_alloc *alloc; + int ret; + + if (!hwdev) { + printk(KERN_ERR "hwmem: Badly configured\n"); + return ERR_PTR(-EINVAL); + } + + if (size == 0) + return ERR_PTR(-EINVAL); + + mutex_lock(&lock); + + size = PAGE_ALIGN(size); + + alloc = find_free_alloc_bestfit(size); + if (IS_ERR(alloc)) { + dev_info(&hwdev->dev, "Allocation failed, no free slot\n"); + goto no_slot; + } + + if (size < alloc->size) { + alloc = split_allocation(alloc, size); + if (IS_ERR(alloc)) + goto split_alloc_failed; + } else { + atomic_inc(&alloc->ref_cnt); + } + + alloc->flags = flags; + alloc->default_access = def_access; + cach_init_buf(&alloc->cach_buf, alloc->flags, alloc->size); + ret = kmap_alloc(alloc); + if (ret < 0) + goto kmap_alloc_failed; + cach_set_buf_addrs(&alloc->cach_buf, alloc->kaddr, alloc->paddr); + + clear_alloc_mem(alloc); + + goto out; + +kmap_alloc_failed: + __hwmem_release(alloc); + alloc = ERR_PTR(ret); +split_alloc_failed: +no_slot: + +out: + mutex_unlock(&lock); + + return alloc; +} +EXPORT_SYMBOL(hwmem_alloc); + +void hwmem_release(struct hwmem_alloc *alloc) +{ + mutex_lock(&lock); + + if (atomic_dec_and_test(&alloc->ref_cnt)) + __hwmem_release(alloc); + + mutex_unlock(&lock); +} +EXPORT_SYMBOL(hwmem_release); + +int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access, + enum hwmem_domain domain, struct hwmem_region *region) +{ + mutex_lock(&lock); + + cach_set_domain(&alloc->cach_buf, access, domain, region); + + mutex_unlock(&lock); + + return 0; +} +EXPORT_SYMBOL(hwmem_set_domain); + +int hwmem_pin(struct hwmem_alloc *alloc, uint32_t *phys_addr, + uint32_t *scattered_phys_addrs) +{ + mutex_lock(&lock); + + *phys_addr = alloc->paddr; + + mutex_unlock(&lock); + + return 0; +} +EXPORT_SYMBOL(hwmem_pin); + +void hwmem_unpin(struct hwmem_alloc *alloc) +{ +} +EXPORT_SYMBOL(hwmem_unpin); + +static void vm_open(struct vm_area_struct *vma) +{ + atomic_inc(&((struct hwmem_alloc *)vma->vm_private_data)->ref_cnt); +} + +static void vm_close(struct vm_area_struct *vma) +{ + hwmem_release((struct hwmem_alloc *)vma->vm_private_data); +} + +int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma) +{ + int ret = 0; + unsigned long vma_size = vma->vm_end - vma->vm_start; + enum hwmem_access access; + mutex_lock(&lock); + + access = get_access(alloc); + + /* Check permissions */ + if ((!(access & HWMEM_ACCESS_WRITE) && + (vma->vm_flags & VM_WRITE)) || + (!(access & HWMEM_ACCESS_READ) && + (vma->vm_flags & VM_READ))) { + ret = -EPERM; + goto illegal_access; + } + + if (vma_size > (unsigned long)alloc->size) { + ret = -EINVAL; + goto illegal_size; + } + + /* + * We don't want Linux to do anything (merging etc) with our VMAs as + * the offset is not necessarily valid + */ + vma->vm_flags |= VM_SPECIAL; + cach_set_pgprot_cache_options(&alloc->cach_buf, &vma->vm_page_prot); + vma->vm_private_data = (void *)alloc; + atomic_inc(&alloc->ref_cnt); + vma->vm_ops = &vm_ops; + + ret = remap_pfn_range(vma, vma->vm_start, alloc->paddr >> PAGE_SHIFT, + min(vma_size, (unsigned long)alloc->size), vma->vm_page_prot); + if (ret < 0) + goto map_failed; + + goto out; + +map_failed: + atomic_dec(&alloc->ref_cnt); +illegal_size: +illegal_access: + +out: + mutex_unlock(&lock); + + return ret; +} +EXPORT_SYMBOL(hwmem_mmap); + +void *hwmem_kmap(struct hwmem_alloc *alloc) +{ + void *ret; + + mutex_lock(&lock); + + ret = alloc->kaddr; + + mutex_unlock(&lock); + + return ret; +} +EXPORT_SYMBOL(hwmem_kmap); + +void hwmem_kunmap(struct hwmem_alloc *alloc) +{ +} +EXPORT_SYMBOL(hwmem_kunmap); + +int hwmem_set_access(struct hwmem_alloc *alloc, + enum hwmem_access access, pid_t pid_nr) +{ + int ret; + struct hwmem_alloc_threadg_info *info; + struct pid *pid; + bool found = false; + + pid = find_get_pid(pid_nr); + if (!pid) { + ret = -EINVAL; + goto error_get_pid; + } + + list_for_each_entry(info, &(alloc->threadg_info_list), list) { + if (info->threadg_pid == pid) { + found = true; + break; + } + } + + if (!found) { + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto error_alloc_info; + } + + info->threadg_pid = pid; + info->access = access; + + list_add_tail(&(info->list), &(alloc->threadg_info_list)); + } else { + info->access = access; + } + + return 0; + +error_alloc_info: + put_pid(pid); +error_get_pid: + return ret; +} +EXPORT_SYMBOL(hwmem_set_access); + +void hwmem_get_info(struct hwmem_alloc *alloc, uint32_t *size, + enum hwmem_mem_type *mem_type, enum hwmem_access *access) +{ + mutex_lock(&lock); + + *size = alloc->size; + *mem_type = HWMEM_MEM_CONTIGUOUS_SYS; + *access = get_access(alloc); + + mutex_unlock(&lock); +} +EXPORT_SYMBOL(hwmem_get_info); + +int hwmem_get_name(struct hwmem_alloc *alloc) +{ + int ret = 0, name; + + mutex_lock(&lock); + + if (alloc->name != 0) { + ret = alloc->name; + goto out; + } + + while (true) { + if (idr_pre_get(&global_idr, GFP_KERNEL) == 0) { + ret = -ENOMEM; + goto pre_get_id_failed; + } + + ret = idr_get_new_above(&global_idr, alloc, 1, &name); + if (ret == 0) + break; + else if (ret != -EAGAIN) + goto get_id_failed; + } + + alloc->name = name; + + ret = name; + goto out; + +get_id_failed: +pre_get_id_failed: + +out: + mutex_unlock(&lock); + + return ret; +} +EXPORT_SYMBOL(hwmem_get_name); + +struct hwmem_alloc *hwmem_resolve_by_name(s32 name) +{ + struct hwmem_alloc *alloc; + + mutex_lock(&lock); + + alloc = idr_find(&global_idr, name); + if (alloc == NULL) { + alloc = ERR_PTR(-EINVAL); + goto find_failed; + } + atomic_inc(&alloc->ref_cnt); + + goto out; + +find_failed: + +out: + mutex_unlock(&lock); + + return alloc; +} +EXPORT_SYMBOL(hwmem_resolve_by_name); + +/* Debug */ + +static int print_alloc(struct hwmem_alloc *alloc, char **buf, size_t buf_size) +{ + int ret; + + if (buf_size < 134) + return -EINVAL; + + ret = sprintf(*buf, "paddr: %#10x\tsize: %10u\tref cnt: %2i\t" + "name: %#10x\tflags: %#4x\t$ settings: %#4x\t" + "def acc: %#3x\n", alloc->paddr, alloc->size, + atomic_read(&alloc->ref_cnt), alloc->name, + alloc->flags, alloc->cach_buf.cache_settings, + alloc->default_access); + if (ret < 0) + return -ENOMSG; + + *buf += ret; + + return 0; +} + +#ifdef CONFIG_DEBUG_FS + +static int debugfs_allocs_read(struct file *file, char __user *buf, + size_t count, loff_t *f_pos) +{ + /* + * We assume the supplied buffer and PAGE_SIZE is large enough to hold + * information about at least one alloc, if not no data will be + * returned. + */ + + int ret; + struct hwmem_alloc *curr_alloc; + char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + char *local_buf_pos = local_buf; + size_t available_space = min((size_t)PAGE_SIZE, count); + /* private_data is intialized to NULL in open which I assume is 0. */ + u32 *curr_pos = (u32 *)&file->private_data; + size_t bytes_read; + + if (local_buf == NULL) + return -ENOMEM; + + mutex_lock(&lock); + + list_for_each_entry(curr_alloc, &alloc_list, list) { + u32 alloc_offset = get_alloc_offset(curr_alloc); + + if (alloc_offset < *curr_pos) + continue; + + ret = print_alloc(curr_alloc, &local_buf_pos, available_space - + (size_t)(local_buf_pos - local_buf)); + if (ret == -EINVAL) /* No more room */ + break; + else if (ret < 0) + goto out; + + *curr_pos = alloc_offset + 1; + } + + bytes_read = (size_t)(local_buf_pos - local_buf); + + ret = copy_to_user(buf, local_buf, bytes_read); + if (ret < 0) + goto out; + + ret = bytes_read; + +out: + kfree(local_buf); + + mutex_unlock(&lock); + + return ret; +} + +static void init_debugfs(void) +{ + /* Hwmem is never unloaded so dropping the dentrys is ok. */ + struct dentry *debugfs_root_dir = debugfs_create_dir("hwmem", NULL); + (void)debugfs_create_file("allocs", 0444, debugfs_root_dir, 0, + &debugfs_allocs_fops); +} + +#endif /* #ifdef CONFIG_DEBUG_FS */ + +/* Module */ + +extern int hwmem_ioctl_init(void); +extern void hwmem_ioctl_exit(void); + +static int __devinit hwmem_probe(struct platform_device *pdev) +{ + int ret = 0; + struct hwmem_platform_data *platform_data = pdev->dev.platform_data; + + if (hwdev || platform_data->size == 0 || + platform_data->start != PAGE_ALIGN(platform_data->start) || + platform_data->size != PAGE_ALIGN(platform_data->size)) { + dev_err(&pdev->dev, "hwdev || platform_data->size == 0 ||" + "platform_data->start !=" + " PAGE_ALIGN(platform_data->start) ||" + "platform_data->size !=" + " PAGE_ALIGN(platform_data->size)\n"); + return -EINVAL; + } + + hwdev = pdev; + hwmem_paddr = platform_data->start; + hwmem_size = platform_data->size; + + /* + * No need to flush the caches here. If we can keep track of the cache + * content then none of our memory will be in the caches, if we can't + * keep track of the cache content we always assume all our memory is + * in the caches. + */ + + ret = init_alloc_list(); + if (ret < 0) + goto init_alloc_list_failed; + + ret = hwmem_ioctl_init(); + if (ret) + goto ioctl_init_failed; + +#ifdef CONFIG_DEBUG_FS + init_debugfs(); +#endif + + dev_info(&pdev->dev, "Hwmem probed, device contains %#x bytes\n", + hwmem_size); + + goto out; + +ioctl_init_failed: + clean_alloc_list(); +init_alloc_list_failed: + hwdev = NULL; + +out: + return ret; +} + +static struct platform_driver hwmem_driver = { + .probe = hwmem_probe, + .driver = { + .name = "hwmem", + }, +}; + +static int __init hwmem_init(void) +{ + return platform_driver_register(&hwmem_driver); +} +subsys_initcall(hwmem_init); + +MODULE_AUTHOR("Marcus Lorentzon "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Hardware memory driver"); + diff --git a/include/linux/hwmem.h b/include/linux/hwmem.h new file mode 100644 index 00000000000..bc0a26a30a6 --- /dev/null +++ b/include/linux/hwmem.h @@ -0,0 +1,519 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * + * ST-Ericsson HW memory driver + * + * Author: Marcus Lorentzon + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef _HWMEM_H_ +#define _HWMEM_H_ + +#if !defined(__KERNEL__) && !defined(_KERNEL) +#include +#include +#else +#include +#include +#endif + +#define HWMEM_DEFAULT_DEVICE_NAME "hwmem" + +/** + * @brief Flags defining behavior of allocation + */ +enum hwmem_alloc_flags { + /** + * @brief Buffer will not be cached and not buffered + */ + HWMEM_ALLOC_UNCACHED = (0 << 0), + /** + * @brief Buffer will be buffered, but not cached + */ + HWMEM_ALLOC_BUFFERED = (1 << 0), + /** + * @brief Buffer will be cached and buffered, use cache hints to be + * more specific + */ + HWMEM_ALLOC_CACHED = (3 << 0), + /** + * @brief Buffer should be cached write-back in both level 1 and 2 cache + */ + HWMEM_ALLOC_CACHE_HINT_WB = (1 << 2), + /** + * @brief Buffer should be cached write-through in both level 1 and + * 2 cache + */ + HWMEM_ALLOC_CACHE_HINT_WT = (2 << 2), + /** + * @brief Buffer should be cached write-back in level 1 cache + */ + HWMEM_ALLOC_CACHE_HINT_WB_INNER = (3 << 2), + /** + * @brief Buffer should be cached write-through in level 1 cache + */ + HWMEM_ALLOC_CACHE_HINT_WT_INNER = (4 << 2), + HWMEM_ALLOC_CACHE_HINT_MASK = 0x1C, +}; + +/** + * @brief Flags defining buffer access mode. + */ +enum hwmem_access { + /** + * @brief Buffer will be read from. + */ + HWMEM_ACCESS_READ = (1 << 0), + /** + * @brief Buffer will be written to. + */ + HWMEM_ACCESS_WRITE = (1 << 1), + /** + * @brief Buffer will be imported. + */ + HWMEM_ACCESS_IMPORT = (1 << 2), +}; + +/** + * @brief Flags defining memory type. + */ +enum hwmem_mem_type { + /** + * @brief Scattered system memory. Currently not supported! + */ + HWMEM_MEM_SCATTERED_SYS = (1 << 0), + /** + * @brief Contiguous system memory. + */ + HWMEM_MEM_CONTIGUOUS_SYS = (1 << 1), +}; + +/** + * @brief Values defining memory domain. + */ +enum hwmem_domain { + /** + * @brief This value specifies the neutral memory domain. Setting this + * domain will syncronize all supported memory domains (currently CPU). + */ + HWMEM_DOMAIN_SYNC = 0, + /** + * @brief This value specifies the CPU memory domain. + */ + HWMEM_DOMAIN_CPU = 1, +}; + +/** + * @brief Structure defining a region of a memory buffer. + * + * A buffer is defined to contain a number of equally sized blocks. Each block + * has a part of it included in the region [-). That is + * - bytes. Each block is bytes long. Total number of bytes + * in the region is ( - ) * . First byte of the region is + * + bytes into the buffer. + * + * Here's an example of a region in a graphics buffer (X = buffer, R = region): + * + * XXXXXXXXXXXXXXXXXXXX \ + * XXXXXXXXXXXXXXXXXXXX |-- offset = 60 + * XXXXXXXXXXXXXXXXXXXX / + * XXRRRRRRRRXXXXXXXXXX \ + * XXRRRRRRRRXXXXXXXXXX |-- count = 4 + * XXRRRRRRRRXXXXXXXXXX | + * XXRRRRRRRRXXXXXXXXXX / + * XXXXXXXXXXXXXXXXXXXX + * --| start = 2 + * ----------| end = 10 + * --------------------| size = 20 + */ +struct hwmem_region { + /** + * @brief The first block's offset from beginning of buffer. + */ + uint32_t offset; + /** + * @brief The number of blocks included in this region. + */ + uint32_t count; + /** + * @brief The index of the first byte included in this block. + */ + uint32_t start; + /** + * @brief The index of the last byte included in this block plus one. + */ + uint32_t end; + /** + * @brief The size in bytes of each block. + */ + uint32_t size; +}; + +/* User space API */ + +/** + * @brief Alloc request data. + */ +struct hwmem_alloc_request { + /** + * @brief [in] Size of requested allocation in bytes. Size will be + * aligned to PAGE_SIZE bytes. + */ + uint32_t size; + /** + * @brief [in] Flags describing requested allocation options. + */ + uint32_t flags; /* enum hwmem_alloc_flags */ + /** + * @brief [in] Default access rights for buffer. + */ + uint32_t default_access; /* enum hwmem_access */ + /** + * @brief [in] Memory type of the buffer. + */ + uint32_t mem_type; /* enum hwmem_mem_type */ +}; + +/** + * @brief Set domain request data. + */ +struct hwmem_set_domain_request { + /** + * @brief [in] Identifier of buffer to be prepared. If 0 is specified + * the buffer associated with the current file instance will be used. + */ + int32_t id; + /** + * @brief [in] Value specifying the new memory domain. + */ + uint32_t domain; /* enum hwmem_domain */ + /** + * @brief [in] Flags specifying access mode of the operation. + * + * One of HWMEM_ACCESS_READ and HWMEM_ACCESS_WRITE is required. + * For details, @see enum hwmem_access. + */ + uint32_t access; /* enum hwmem_access */ + /** + * @brief [in] The region of bytes to be prepared. + * + * For details, @see struct hwmem_region. + */ + struct hwmem_region region; +}; + +/** + * @brief Pin request data. + */ +struct hwmem_pin_request { + /** + * @brief [in] Identifier of buffer to be pinned. If 0 is specified, + * the buffer associated with the current file instance will be used. + */ + int32_t id; + /** + * @brief [out] Physical address of first word in buffer. + */ + uint32_t phys_addr; + /** + * @brief [in] Pointer to buffer for physical addresses of pinned + * scattered buffer. Buffer must be (buffer_size / page_size) * + * sizeof(uint32_t) bytes. + * This field can be NULL for physically contiguos buffers. + */ + uint32_t *scattered_addrs; +}; + +/** + * @brief Set access rights request data. + */ +struct hwmem_set_access_request { + /** + * @brief [in] Identifier of buffer to be pinned. If 0 is specified, + * the buffer associated with the current file instance will be used. + */ + int32_t id; + /** + * @param access Access value indicating what is allowed. + */ + uint32_t access; /* enum hwmem_access */ + /** + * @param pid Process ID to set rights for. + */ + pid_t pid; +}; + +/** + * @brief Get info request data. + */ +struct hwmem_get_info_request { + /** + * @brief [in] Identifier of buffer to get info about. If 0 is specified, + * the buffer associated with the current file instance will be used. + */ + int32_t id; + /** + * @brief [out] Size in bytes of buffer. + */ + uint32_t size; + /** + * @brief [out] Memory type of buffer. + */ + uint32_t mem_type; /* enum hwmem_mem_type */ + /** + * @brief [out] Access rights for buffer. + */ + uint32_t access; /* enum hwmem_access */ +}; + +/** + * @brief Allocates number of bytes and returns a buffer identifier. + * + * Input is a pointer to a hwmem_alloc_request struct. + * + * @return A buffer identifier on success, or a negative error code. + */ +#define HWMEM_ALLOC_IOC _IOW('W', 1, struct hwmem_alloc_request) + +/** + * @brief Allocates number of bytes and associates the created buffer + * with the current file instance. + * + * If the current file instance is already associated with a buffer the call + * will fail. Buffers referenced through files instances shall not be released + * with HWMEM_RELEASE_IOC, instead the file instance shall be closed. + * + * Input is a pointer to a hwmem_alloc_request struct. + * + * @return Zero on success, or a negative error code. + */ +#define HWMEM_ALLOC_FD_IOC _IOW('W', 2, struct hwmem_alloc_request) + +/** + * @brief Releases buffer. + * + * Buffers are reference counted and will not be destroyed until the last + * reference is released. Bufferes allocated with ALLOC_FD_IOC not allowed. + * + * Input is the buffer identifier. + * + * @return Zero on success, or a negative error code. + */ +#define HWMEM_RELEASE_IOC _IO('W', 3) + +/** + * @brief Set the buffer's memory domain and prepares it for access. + * + * Input is a pointer to a hwmem_set_domain_request struct. + * + * @return Zero on success, or a negative error code. + */ +#define HWMEM_SET_DOMAIN_IOC _IOR('W', 4, struct hwmem_set_domain_request) + +/** + * @brief Pins the buffer and returns the physical address of the buffer. + * + * @return Zero on success, or a negative error code. + */ +#define HWMEM_PIN_IOC _IOWR('W', 5, struct hwmem_pin_request) + +/** + * @brief Unpins the buffer. + * + * @return Zero on success, or a negative error code. + */ +#define HWMEM_UNPIN_IOC _IO('W', 6) + +/** + * @brief Set access rights for buffer. + * + * @return Zero on success, or a negative error code. + */ +#define HWMEM_SET_ACCESS_IOC _IOW('W', 7, struct hwmem_set_access_request) + +/** + * @brief Get buffer information. + * + * Input is the buffer identifier. If 0 is specified the buffer associated + * with the current file instance will be used. + * + * @return Zero on success, or a negative error code. + */ +#define HWMEM_GET_INFO_IOC _IOWR('W', 8, struct hwmem_get_info_request) + +/** + * @brief Export the buffer identifier for use in another process. + * + * The global name will not increase the buffers reference count and will + * therefore not keep the buffer alive. + * + * Input is the buffer identifier. If 0 is specified the buffer associated with + * the current file instance will be exported. + * + * @return A global buffer name on success, or a negative error code. + */ +#define HWMEM_EXPORT_IOC _IO('W', 9) + +/** + * @brief Import a buffer to allow local access to the buffer. + * + * Input is the buffer's global name. + * + * @return The imported buffer's identifier on success, or a negative error code. + */ +#define HWMEM_IMPORT_IOC _IO('W', 10) + +/** + * @brief Import a buffer to allow local access to the buffer using fd. + * + * Input is the buffer's global name. + * + * @return Zero on success, or a negative error code. + */ +#define HWMEM_IMPORT_FD_IOC _IO('W', 11) + +#ifdef __KERNEL__ + +/* Kernel API */ + +struct hwmem_alloc; + +/** + * @brief Allocates number of bytes. + * + * @param size Number of bytes to allocate. All allocations are page aligned. + * @param flags Allocation options. + * @param def_access Default buffer access rights. + * @param mem_type Memory type. + * + * @return Pointer to allocation, or a negative error code. + */ +struct hwmem_alloc *hwmem_alloc(u32 size, enum hwmem_alloc_flags flags, + enum hwmem_access def_access, enum hwmem_mem_type mem_type); + +/** + * @brief Release a previously allocated buffer. + * When last reference is released, the buffer will be freed. + * + * @param alloc Buffer to be released. + */ +void hwmem_release(struct hwmem_alloc *alloc); + +/** + * @brief Set the buffer domain and prepare it for access. + * + * @param alloc Buffer to be prepared. + * @param access Flags defining memory access mode of the call. + * @param domain Value specifying the memory domain. + * @param region Structure defining the minimum area of the buffer to be + * prepared. + * + * @return Zero on success, or a negative error code. + */ +int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access, + enum hwmem_domain domain, struct hwmem_region *region); + +/** + * @brief Pins the buffer. + * + * @param alloc Buffer to be pinned. + * @param phys_addr Reference to variable to receive physical address. + * @param scattered_phys_addrs Pointer to buffer to receive physical addresses + * of all pages in the scattered buffer. Can be NULL if buffer is contigous. + * Buffer size must be (buffer_size / page_size) * sizeof(uint32_t) bytes. + */ +int hwmem_pin(struct hwmem_alloc *alloc, uint32_t *phys_addr, + uint32_t *scattered_phys_addrs); + +/** + * @brief Unpins the buffer. + * + * @param alloc Buffer to be unpinned. + */ +void hwmem_unpin(struct hwmem_alloc *alloc); + +/** + * @brief Map the buffer to user space. + * + * @param alloc Buffer to be unpinned. + */ +int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma); + +/** + * @brief Map the buffer for use in the kernel. + * + * This function implicitly pins the buffer. + * + * @param alloc Buffer to be mapped. + * + * @return Pointer to buffer, or a negative error code. + */ +void *hwmem_kmap(struct hwmem_alloc *alloc); + +/** + * @brief Un-map a buffer previously mapped with hwmem_kmap. + * + * This function implicitly unpins the buffer. + * + * @param alloc Buffer to be un-mapped. + */ +void hwmem_kunmap(struct hwmem_alloc *alloc); + +/** + * @brief Set access rights for buffer. + * + * @param alloc Buffer to set rights for. + * @param access Access value indicating what is allowed. + * @param pid Process ID to set rights for. + */ +int hwmem_set_access(struct hwmem_alloc *alloc, enum hwmem_access access, + pid_t pid); + +/** + * @brief Get buffer information. + * + * @param alloc Buffer to get information about. + * @param size Pointer to size output variable. + * @param size Pointer to memory type output variable. + * @param size Pointer to access rights output variable. + */ +void hwmem_get_info(struct hwmem_alloc *alloc, uint32_t *size, + enum hwmem_mem_type *mem_type, enum hwmem_access *access); + +/** + * @brief Allocate a global buffer name. + * Generated buffer name is valid in all processes. Consecutive calls will get + * the same name for the same buffer. + * + * @param alloc Buffer to be made public. + * + * @return Positive global name on success, or a negative error code. + */ +int hwmem_get_name(struct hwmem_alloc *alloc); + +/** + * @brief Import the global buffer name to allow local access to the buffer. + * This call will add a buffer reference. Resulting buffer should be + * released with a call to hwmem_release. + * + * @param name A valid global buffer name. + * + * @return Pointer to allocation, or a negative error code. + */ +struct hwmem_alloc *hwmem_resolve_by_name(s32 name); + +/* Internal */ + +struct hwmem_platform_data { + /* Starting physical address of memory region */ + unsigned long start; + /* Size of memory region */ + unsigned long size; +}; + +#endif + +#endif /* _HWMEM_H_ */ -- cgit v1.2.3 From 2e48c687b44dc3c062ed54ca333508dcc32df339 Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Thu, 20 Oct 2011 10:47:00 +0200 Subject: mach-ux500: add dcache necessary for hwmem Signed-off-by: Philippe Langlais --- arch/arm/mach-ux500/dcache.c | 294 ++++++++++++++++++++++++++++++ arch/arm/mach-ux500/include/mach/dcache.h | 26 +++ 2 files changed, 320 insertions(+) create mode 100644 arch/arm/mach-ux500/dcache.c create mode 100644 arch/arm/mach-ux500/include/mach/dcache.h diff --git a/arch/arm/mach-ux500/dcache.c b/arch/arm/mach-ux500/dcache.c new file mode 100644 index 00000000000..cb4f8329f32 --- /dev/null +++ b/arch/arm/mach-ux500/dcache.c @@ -0,0 +1,294 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Cache handler integration and data cache helpers. + * + * Author: Johan Mossberg + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include +#include + +#include +#include +#include +#include + +/* + * Values are derived from measurements on HREFP_1.1_V32_OM_S10 running + * u8500-android-2.2_r1.1_v0.21. + * + * A lot of time can be spent trying to figure out the perfect breakpoints but + * for now I've chosen the following simple way. + * + * breakpoint = best_case + (worst_case - best_case) * 0.666 + * The breakpoint is moved slightly towards the worst case because a full + * clean/flush affects the entire system so we should be a bit careful. + * + * BEST CASE: + * Best case is that the cache is empty and the system is idling. The case + * where the cache contains only targeted data could be better in some cases + * but it's hard to do measurements and calculate on that case so I choose the + * easier alternative. + * + * inner_clean_breakpoint = time_2_range_clean_on_empty_cache( + * complete_clean_on_empty_cache_time) + * inner_flush_breakpoint = time_2_range_flush_on_empty_cache( + * complete_flush_on_empty_cache_time) + * + * outer_clean_breakpoint = time_2_range_clean_on_empty_cache( + * complete_clean_on_empty_cache_time) + * outer_flush_breakpoint = time_2_range_flush_on_empty_cache( + * complete_flush_on_empty_cache_time) + * + * WORST CASE: + * Worst case is that the cache is filled with dirty non targeted data that + * will be used after the synchronization and the system is under heavy load. + * + * inner_clean_breakpoint = time_2_range_clean_on_empty_cache( + * complete_clean_on_full_cache_time * 1.5) + * Times 1.5 because it runs on both cores half the time. + * inner_flush_breakpoint = time_2_range_flush_on_empty_cache( + * complete_flush_on_full_cache_time * 1.5 + + * complete_flush_on_full_cache_time / 2) + * Plus "complete_flush_on_full_cache_time / 2" because all data has to be read + * back, here we assume that both cores can fill their cache simultaneously + * (seems to be the case as operations on full and empty inner cache takes + * roughly the same amount of time ie the bus to outer is not the bottle neck). + * + * outer_clean_breakpoint = time_2_range_clean_on_empty_cache( + * complete_clean_on_full_cache_time + + * (complete_clean_on_full_cache_time - + * complete_clean_on_empty_cache_time)) + * Plus "(complete_flush_on_full_cache_time - + * complete_flush_on_empty_cache_time)" because no one else can work when we + * hog the bus with our unecessary transfer. + * outer_flush_breakpoint = time_2_range_flush_on_empty_cache( + * complete_flush_on_full_cache_time * 2 + + * (complete_flush_on_full_cache_time - + * complete_flush_on_empty_cache_time) * 2) + * + * These values might have to be updated if changes are made to the CPU, L2$, + * memory bus or memory. + */ +/* 28930 */ +static const u32 inner_clean_breakpoint = 21324 + (32744 - 21324) * 0.666; +/* 36224 */ +static const u32 inner_flush_breakpoint = 21324 + (43697 - 21324) * 0.666; +/* 254069 */ +static const u32 outer_clean_breakpoint = 68041 + (347363 - 68041) * 0.666; +/* 485414 */ +static const u32 outer_flush_breakpoint = 68041 + (694727 - 68041) * 0.666; + +static bool is_wt(enum hwmem_alloc_flags cache_settings); + +static void __clean_inner_dcache_all(void *param); +static void clean_inner_dcache_all(void); + +static void __flush_inner_dcache_all(void *param); +static void flush_inner_dcache_all(void); + +static bool is_cache_exclusive(void); + +enum hwmem_alloc_flags cachi_get_cache_settings( + enum hwmem_alloc_flags requested_cache_settings) +{ + enum hwmem_alloc_flags cache_settings = + requested_cache_settings & ~HWMEM_ALLOC_CACHE_HINT_MASK; + + if ((cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED) { + if (is_wt(requested_cache_settings)) + cache_settings |= HWMEM_ALLOC_CACHE_HINT_WT; + else + cache_settings |= HWMEM_ALLOC_CACHE_HINT_WB; + } + + return cache_settings; +} + +void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings, + pgprot_t *pgprot) +{ + if ((cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED) { + if (is_wt(cache_settings)) + *pgprot = __pgprot_modify(*pgprot, L_PTE_MT_MASK, + L_PTE_MT_WRITETHROUGH); + else + *pgprot = __pgprot_modify(*pgprot, L_PTE_MT_MASK, + L_PTE_MT_WRITEBACK); + } else if (cache_settings & HWMEM_ALLOC_BUFFERED) + *pgprot = pgprot_writecombine(*pgprot); + else + *pgprot = pgprot_noncached(*pgprot); +} + +void drain_cpu_write_buf(void) +{ + dsb(); + outer_cache.sync(); +} + +void clean_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only, + bool *cleaned_everything) +{ + /* + * There is no problem with exclusive caches here as the Cortex-A9 + * documentation (8.1.4. Exclusive L2 cache) says that when a dirty + * line is moved from L2 to L1 it is first written to mem. Because + * of this there is no way a line can avoid the clean by jumping + * between the cache levels. + */ + *cleaned_everything = true; + + if (length < inner_clean_breakpoint) { + /* Inner clean range */ + dmac_map_area(vaddr, length, DMA_TO_DEVICE); + *cleaned_everything = false; + } else + clean_inner_dcache_all(); + + if (!inner_only) { + /* + * There is currently no outer_cache.clean_all() so we use + * flush instead, which is ok as clean is a subset of flush. + * Clean range and flush range take the same amount of time + * so we can use outer_flush_breakpoint here. + */ + if (length < outer_flush_breakpoint) { + outer_cache.clean_range(paddr, paddr + length); + *cleaned_everything = false; + } else + outer_cache.flush_all(); + } +} + +void flush_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only, + bool *flushed_everything) +{ + /* + * There might still be stale data in the caches after this call if the + * cache levels are exclusive. The follwing can happen. + * 1. Clean L1 moves the data to L2. + * 2. Speculative prefetch, preemption or loads on the other core moves + * all the data back to L1, any dirty data will be written to mem as a + * result of this. + * 3. Flush L2 does nothing as there is no targeted data in L2. + * 4. Flush L1 moves the data to L2. Notice that this does not happen + * when the cache levels are non-exclusive as clean pages are not + * written to L2 in that case. + * 5. Stale data is still present in L2! + * I see two possible solutions, don't use exclusive caches or + * (temporarily) disable prefetching to L1, preeemption and the other + * core. + * + * A situation can occur where the operation does not seem atomic from + * the other core's point of view, even on a non-exclusive cache setup. + * Replace step 2 in the previous scenarion with a write from the other + * core. The other core will write on top of the old data but the + * result will not be written to memory. One would expect either that + * the write was performed on top of the old data and was written to + * memory (the write occured before the flush) or that the write was + * performed on top of the new data and was not written to memory (the + * write occured after the flush). The same problem can occur with one + * core if kernel preemption is enabled. The solution is to + * (temporarily) disable the other core and preemption. I can't think + * of any situation where this would be a problem and disabling the + * other core for the duration of this call is mighty expensive so for + * now I just ignore the problem. + */ + + *flushed_everything = true; + + if (!inner_only) { + /* + * Beautiful solution for the exclusive problems :) + */ + if (is_cache_exclusive()) + panic("%s can't handle exclusive CPU caches\n", + __func__); + + if (length < inner_clean_breakpoint) { + /* Inner clean range */ + dmac_map_area(vaddr, length, DMA_TO_DEVICE); + *flushed_everything = false; + } else + clean_inner_dcache_all(); + + if (length < outer_flush_breakpoint) { + outer_cache.flush_range(paddr, paddr + length); + *flushed_everything = false; + } else + outer_cache.flush_all(); + } + + if (length < inner_flush_breakpoint) { + /* Inner flush range */ + dmac_flush_range(vaddr, (void *)((u32)vaddr + length)); + *flushed_everything = false; + } else + flush_inner_dcache_all(); +} + +bool speculative_data_prefetch(void) +{ + return true; +} + +u32 get_dcache_granularity(void) +{ + return 32; +} + +/* + * Local functions + */ + +static bool is_wt(enum hwmem_alloc_flags cache_settings) +{ + u32 cache_hints = cache_settings & HWMEM_ALLOC_CACHE_HINT_MASK; + if (cache_hints == HWMEM_ALLOC_CACHE_HINT_WT || + cache_hints == HWMEM_ALLOC_CACHE_HINT_WT_INNER) + return true; + else + return false; +} + +static void __clean_inner_dcache_all(void *param) +{ + __cpuc_clean_dcache_all(); +} + +static void clean_inner_dcache_all(void) +{ + on_each_cpu(__clean_inner_dcache_all, NULL, 1); +} + +static void __flush_inner_dcache_all(void *param) +{ + __cpuc_flush_dcache_all(); +} + +static void flush_inner_dcache_all(void) +{ + on_each_cpu(__flush_inner_dcache_all, NULL, 1); +} + +static bool is_cache_exclusive(void) +{ + static const u32 CA9_ACTLR_EXCL = 0x80; + + u32 armv7_actlr; + + asm ( + "mrc p15, 0, %0, c1, c0, 1" + : "=r" (armv7_actlr) + ); + + if (armv7_actlr & CA9_ACTLR_EXCL) + return true; + else + return false; +} diff --git a/arch/arm/mach-ux500/include/mach/dcache.h b/arch/arm/mach-ux500/include/mach/dcache.h new file mode 100644 index 00000000000..83fe618b04f --- /dev/null +++ b/arch/arm/mach-ux500/include/mach/dcache.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Data cache helpers + * + * Author: Johan Mossberg + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef _MACH_UX500_DCACHE_H_ +#define _MACH_UX500_DCACHE_H_ + +#include + +void drain_cpu_write_buf(void); +void clean_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only, + bool *cleaned_everything); +void flush_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only, + bool *flushed_everything); +bool speculative_data_prefetch(void); +/* Returns 1 if no cache is present */ +u32 get_dcache_granularity(void); + +#endif /* _MACH_UX500_DCACHE_H_ */ -- cgit v1.2.3 From 9d15824abc358f8a609cdfdaefcc8a591015e45c Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Tue, 11 Oct 2011 15:55:36 +0200 Subject: HWMEM: Update API Perform queued hwmem API changes. One commit to ease dependency handling. Depends-On: I13f249cf5f51f9f138171e8d6f59e1d5d2f72de1, I31030bcfda7cf76d15402c2137576da4f3fb2761, I2dc7e6aa5686492550b5164e50c06ed750ac9e16, Ia12bbb9f378c331cfb9b1376dedb3b7b65f56429, Ibc3404df4876971d8b69272c63120e2fe3bb2787 ST-Ericsson ID: AP 327001 ST-Ericsson FOSS-OUT ID: STETL-FOSS-OUT-10068 Change-Id: I9a45ad54a0cc8a5cdb1e3b9038ad50aeacb3f9c3 Signed-off-by: Johan Mossberg Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/17518 Reviewed-by: Robert FEKETE Conflicts: drivers/misc/dispdev/dispdev.c Conflicts: drivers/video/b2r2/b2r2_blt_main.c drivers/video/mcde/mcde_fb.c --- arch/arm/mach-ux500/dcache.c | 98 +++++++---- drivers/misc/hwmem/cache_handler.c | 109 ++++++------ drivers/misc/hwmem/hwmem-ioctl.c | 155 +++++++++++------ drivers/misc/hwmem/hwmem-main.c | 42 +++-- include/linux/hwmem.h | 346 ++++++++++++++++++++++--------------- 5 files changed, 460 insertions(+), 290 deletions(-) diff --git a/arch/arm/mach-ux500/dcache.c b/arch/arm/mach-ux500/dcache.c index cb4f8329f32..b1c3942c181 100644 --- a/arch/arm/mach-ux500/dcache.c +++ b/arch/arm/mach-ux500/dcache.c @@ -83,8 +83,6 @@ static const u32 outer_clean_breakpoint = 68041 + (347363 - 68041) * 0.666; /* 485414 */ static const u32 outer_flush_breakpoint = 68041 + (694727 - 68041) * 0.666; -static bool is_wt(enum hwmem_alloc_flags cache_settings); - static void __clean_inner_dcache_all(void *param); static void clean_inner_dcache_all(void); @@ -96,15 +94,48 @@ static bool is_cache_exclusive(void); enum hwmem_alloc_flags cachi_get_cache_settings( enum hwmem_alloc_flags requested_cache_settings) { - enum hwmem_alloc_flags cache_settings = - requested_cache_settings & ~HWMEM_ALLOC_CACHE_HINT_MASK; - - if ((cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED) { - if (is_wt(requested_cache_settings)) - cache_settings |= HWMEM_ALLOC_CACHE_HINT_WT; - else - cache_settings |= HWMEM_ALLOC_CACHE_HINT_WB; - } + static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED | + HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT | + HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW | + HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE | + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY; + + enum hwmem_alloc_flags cache_settings; + + if (!(requested_cache_settings & CACHE_ON_FLAGS_MASK) && + requested_cache_settings & (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE | + HWMEM_ALLOC_HINT_UNCACHED | HWMEM_ALLOC_HINT_WRITE_COMBINE)) + /* + * We never use uncached as it's extremely slow and there is + * no scenario where it would be better than buffered memory. + */ + return HWMEM_ALLOC_HINT_WRITE_COMBINE; + + /* + * The user has specified cached or nothing at all, both are treated as + * cached. + */ + cache_settings = (requested_cache_settings & + ~(HWMEM_ALLOC_HINT_UNCACHED | + HWMEM_ALLOC_HINT_NO_WRITE_COMBINE | + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY | + HWMEM_ALLOC_HINT_CACHE_NAOW)) | + HWMEM_ALLOC_HINT_WRITE_COMBINE | HWMEM_ALLOC_HINT_CACHED | + HWMEM_ALLOC_HINT_CACHE_AOW | + HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE; + if (!(cache_settings & (HWMEM_ALLOC_HINT_CACHE_WB | + HWMEM_ALLOC_HINT_CACHE_WT))) + cache_settings |= HWMEM_ALLOC_HINT_CACHE_WB; + /* + * On ARMv7 "alloc on write" is just a hint so we need to assume the + * worst case ie "alloc on write". We would however like to remember + * the requested "alloc on write" setting so that we can pass it on to + * the hardware, we use the reserved bit in the alloc flags to do that. + */ + if (requested_cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW) + cache_settings |= HWMEM_ALLOC_RESERVED_CHI; + else + cache_settings &= ~HWMEM_ALLOC_RESERVED_CHI; return cache_settings; } @@ -112,17 +143,21 @@ enum hwmem_alloc_flags cachi_get_cache_settings( void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings, pgprot_t *pgprot) { - if ((cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED) { - if (is_wt(cache_settings)) + if (cache_settings & HWMEM_ALLOC_HINT_CACHED) { + if (cache_settings & HWMEM_ALLOC_HINT_CACHE_WT) *pgprot = __pgprot_modify(*pgprot, L_PTE_MT_MASK, L_PTE_MT_WRITETHROUGH); - else - *pgprot = __pgprot_modify(*pgprot, L_PTE_MT_MASK, - L_PTE_MT_WRITEBACK); - } else if (cache_settings & HWMEM_ALLOC_BUFFERED) + else { + if (cache_settings & HWMEM_ALLOC_RESERVED_CHI) + *pgprot = __pgprot_modify(*pgprot, + L_PTE_MT_MASK, L_PTE_MT_WRITEALLOC); + else + *pgprot = __pgprot_modify(*pgprot, + L_PTE_MT_MASK, L_PTE_MT_WRITEBACK); + } + } else { *pgprot = pgprot_writecombine(*pgprot); - else - *pgprot = pgprot_noncached(*pgprot); + } } void drain_cpu_write_buf(void) @@ -147,8 +182,9 @@ void clean_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only, /* Inner clean range */ dmac_map_area(vaddr, length, DMA_TO_DEVICE); *cleaned_everything = false; - } else + } else { clean_inner_dcache_all(); + } if (!inner_only) { /* @@ -160,8 +196,9 @@ void clean_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only, if (length < outer_flush_breakpoint) { outer_cache.clean_range(paddr, paddr + length); *cleaned_everything = false; - } else + } else { outer_cache.flush_all(); + } } } @@ -214,22 +251,25 @@ void flush_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only, /* Inner clean range */ dmac_map_area(vaddr, length, DMA_TO_DEVICE); *flushed_everything = false; - } else + } else { clean_inner_dcache_all(); + } if (length < outer_flush_breakpoint) { outer_cache.flush_range(paddr, paddr + length); *flushed_everything = false; - } else + } else { outer_cache.flush_all(); + } } if (length < inner_flush_breakpoint) { /* Inner flush range */ dmac_flush_range(vaddr, (void *)((u32)vaddr + length)); *flushed_everything = false; - } else + } else { flush_inner_dcache_all(); + } } bool speculative_data_prefetch(void) @@ -246,16 +286,6 @@ u32 get_dcache_granularity(void) * Local functions */ -static bool is_wt(enum hwmem_alloc_flags cache_settings) -{ - u32 cache_hints = cache_settings & HWMEM_ALLOC_CACHE_HINT_MASK; - if (cache_hints == HWMEM_ALLOC_CACHE_HINT_WT || - cache_hints == HWMEM_ALLOC_CACHE_HINT_WT_INNER) - return true; - else - return false; -} - static void __clean_inner_dcache_all(void *param) { __cpuc_clean_dcache_all(); diff --git a/drivers/misc/hwmem/cache_handler.c b/drivers/misc/hwmem/cache_handler.c index b313da36aa4..e0ab4ee6cf8 100644 --- a/drivers/misc/hwmem/cache_handler.c +++ b/drivers/misc/hwmem/cache_handler.c @@ -65,9 +65,6 @@ static u32 offset_2_paddr(struct cach_buf *buf, u32 offset); static u32 align_up(u32 value, u32 alignment); static u32 align_down(u32 value, u32 alignment); -static bool is_wb(enum hwmem_alloc_flags cache_settings); -static bool is_inner_only(enum hwmem_alloc_flags cache_settings); - /* * Exported functions */ @@ -89,7 +86,7 @@ void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr) buf->vstart = vaddr; buf->pstart = paddr; - if (buf->cache_settings & HWMEM_ALLOC_CACHED) { + if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) { /* * Keep whatever is in the cache. This way we avoid an * unnecessary synch if CPU is the first user. @@ -124,9 +121,9 @@ void cach_set_domain(struct cach_buf *buf, enum hwmem_access access, struct hwmem_region *__region; struct hwmem_region full_region; - if (region != NULL) + if (region != NULL) { __region = region; - else { + } else { full_region.offset = 0; full_region.count = 1; full_region.start = 0; @@ -156,27 +153,39 @@ void cach_set_domain(struct cach_buf *buf, enum hwmem_access access, enum hwmem_alloc_flags __attribute__((weak)) cachi_get_cache_settings( enum hwmem_alloc_flags requested_cache_settings) { - enum hwmem_alloc_flags cache_settings = - requested_cache_settings & ~HWMEM_ALLOC_CACHE_HINT_MASK; - - if ((cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED) { - /* - * If the alloc is cached we'll use the default setting. We - * don't know what this setting is so we have to assume the - * worst case, ie write back inner and outer. - */ - cache_settings |= HWMEM_ALLOC_CACHE_HINT_WB; - } - - return cache_settings; + static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED | + HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT | + HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW | + HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE | + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY; + /* We don't know the cache setting so we assume worst case. */ + static const u32 CACHE_SETTING = HWMEM_ALLOC_HINT_WRITE_COMBINE | + HWMEM_ALLOC_HINT_CACHED | HWMEM_ALLOC_HINT_CACHE_WB | + HWMEM_ALLOC_HINT_CACHE_AOW | + HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE; + + if (requested_cache_settings & CACHE_ON_FLAGS_MASK) + return CACHE_SETTING; + else if (requested_cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE || + (requested_cache_settings & HWMEM_ALLOC_HINT_UNCACHED && + !(requested_cache_settings & + HWMEM_ALLOC_HINT_NO_WRITE_COMBINE))) + return HWMEM_ALLOC_HINT_WRITE_COMBINE; + else if (requested_cache_settings & + (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE | + HWMEM_ALLOC_HINT_UNCACHED)) + return 0; + else + /* Nothing specified, use cached */ + return CACHE_SETTING; } void __attribute__((weak)) cachi_set_pgprot_cache_options( enum hwmem_alloc_flags cache_settings, pgprot_t *pgprot) { - if ((cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED) + if (cache_settings & HWMEM_ALLOC_HINT_CACHED) *pgprot = *pgprot; /* To silence compiler and checkpatch */ - else if (cache_settings & HWMEM_ALLOC_BUFFERED) + else if (cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE) *pgprot = pgprot_writecombine(*pgprot); else *pgprot = pgprot_noncached(*pgprot); @@ -197,23 +206,32 @@ static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access, if (!write && !read) return; - if ((buf->cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED) { + if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) { struct cach_range region_range; region_2_range(region, buf->size, ®ion_range); - if (read || (write && is_wb(buf->cache_settings))) + if (read || (write && buf->cache_settings & + HWMEM_ALLOC_HINT_CACHE_WB)) /* Perform defered invalidates */ invalidate_cpu_cache(buf, ®ion_range); - if (read) - expand_range(&buf->range_in_cpu_cache, ®ion_range); - if (write && is_wb(buf->cache_settings)) { + if (read || (write && buf->cache_settings & + HWMEM_ALLOC_HINT_CACHE_AOW)) expand_range(&buf->range_in_cpu_cache, ®ion_range); + if (write && buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_WB) { + struct cach_range dirty_range_addition; + + if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW) + dirty_range_addition = region_range; + else + intersect_range(&buf->range_in_cpu_cache, + ®ion_range, &dirty_range_addition); + expand_range(&buf->range_dirty_in_cpu_cache, - ®ion_range); + &dirty_range_addition); } } - if (buf->cache_settings & HWMEM_ALLOC_BUFFERED) { + if (buf->cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE) { if (write) buf->in_cpu_write_buf = true; } @@ -243,8 +261,9 @@ static void sync_buf_post_cpu(struct cach_buf *buf, &intersection); clean_cpu_cache(buf, ®ion_range); - } else + } else { flush_cpu_cache(buf, ®ion_range); + } } if (read) clean_cpu_cache(buf, ®ion_range); @@ -277,13 +296,14 @@ static void invalidate_cpu_cache(struct cach_buf *buf, struct cach_range *range) offset_2_vaddr(buf, intersection.start), offset_2_paddr(buf, intersection.start), range_length(&intersection), - is_inner_only(buf->cache_settings), + buf->cache_settings & + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY, &flushed_everything); if (flushed_everything) { null_range(&buf->range_invalid_in_cpu_cache); null_range(&buf->range_dirty_in_cpu_cache); - } else + } else { /* * No need to shrink range_in_cpu_cache as invalidate * is only used when we can't keep track of what's in @@ -291,6 +311,7 @@ static void invalidate_cpu_cache(struct cach_buf *buf, struct cach_range *range) */ shrink_range(&buf->range_invalid_in_cpu_cache, &intersection); + } } } @@ -309,7 +330,8 @@ static void clean_cpu_cache(struct cach_buf *buf, struct cach_range *range) offset_2_vaddr(buf, intersection.start), offset_2_paddr(buf, intersection.start), range_length(&intersection), - is_inner_only(buf->cache_settings), + buf->cache_settings & + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY, &cleaned_everything); if (cleaned_everything) @@ -334,7 +356,8 @@ static void flush_cpu_cache(struct cach_buf *buf, struct cach_range *range) offset_2_vaddr(buf, intersection.start), offset_2_paddr(buf, intersection.start), range_length(&intersection), - is_inner_only(buf->cache_settings), + buf->cache_settings & + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY, &flushed_everything); if (flushed_everything) { @@ -485,23 +508,3 @@ static u32 align_down(u32 value, u32 alignment) return value - remainder; } - -static bool is_wb(enum hwmem_alloc_flags cache_settings) -{ - u32 cache_hints = cache_settings & HWMEM_ALLOC_CACHE_HINT_MASK; - if (cache_hints == HWMEM_ALLOC_CACHE_HINT_WB || - cache_hints == HWMEM_ALLOC_CACHE_HINT_WB_INNER) - return true; - else - return false; -} - -static bool is_inner_only(enum hwmem_alloc_flags cache_settings) -{ - u32 cache_hints = cache_settings & HWMEM_ALLOC_CACHE_HINT_MASK; - if (cache_hints == HWMEM_ALLOC_CACHE_HINT_WT_INNER || - cache_hints == HWMEM_ALLOC_CACHE_HINT_WB_INNER) - return true; - else - return false; -} diff --git a/drivers/misc/hwmem/hwmem-ioctl.c b/drivers/misc/hwmem/hwmem-ioctl.c index 8759c395147..e9e50de78bd 100644 --- a/drivers/misc/hwmem/hwmem-ioctl.c +++ b/drivers/misc/hwmem/hwmem-ioctl.c @@ -1,5 +1,5 @@ /* - * Copyright (C) ST-Ericsson AB 2010 + * Copyright (C) ST-Ericsson SA 2010 * * Hardware memory driver, hwmem * @@ -21,12 +21,6 @@ #include #include -/* - * TODO: - * Count pin unpin at this level to ensure applications can't interfer - * with each other. - */ - static int hwmem_open(struct inode *inode, struct file *file); static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma); static int hwmem_release_fop(struct inode *inode, struct file *file); @@ -56,7 +50,7 @@ struct hwmem_file { struct hwmem_alloc *fd_alloc; /* Ref counted */ }; -static int create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc) +static s32 create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc) { int id, ret; @@ -72,42 +66,42 @@ static int create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc) } /* - * IDR always returns the lowest free id so the only way we can fail - * here is if hwfile has 2^19 - 1 (524287) allocations. + * IDR always returns the lowest free id so there is no wrapping issue + * because of this. */ - if (id >= 1 << (31 - PAGE_SHIFT)) { + if (id >= (s32)1 << (31 - PAGE_SHIFT)) { dev_err(hwmem_device.this_device, "Out of IDs!\n"); idr_remove(&hwfile->idr, id); return -ENOMSG; } - return id << PAGE_SHIFT; + return (s32)id << PAGE_SHIFT; } -static void remove_id(struct hwmem_file *hwfile, int id) +static void remove_id(struct hwmem_file *hwfile, s32 id) { idr_remove(&hwfile->idr, id >> PAGE_SHIFT); } -static struct hwmem_alloc *resolve_id(struct hwmem_file *hwfile, int id) +static struct hwmem_alloc *resolve_id(struct hwmem_file *hwfile, s32 id) { struct hwmem_alloc *alloc; alloc = id ? idr_find(&hwfile->idr, id >> PAGE_SHIFT) : - hwfile->fd_alloc; + hwfile->fd_alloc; if (alloc == NULL) alloc = ERR_PTR(-EINVAL); return alloc; } -static int alloc(struct hwmem_file *hwfile, struct hwmem_alloc_request *req) +static s32 alloc(struct hwmem_file *hwfile, struct hwmem_alloc_request *req) { - int ret = 0; + s32 ret = 0; struct hwmem_alloc *alloc; alloc = hwmem_alloc(req->size, req->flags, req->default_access, - req->mem_type); + req->mem_type); if (IS_ERR(alloc)) return PTR_ERR(alloc); @@ -123,10 +117,10 @@ static int alloc_fd(struct hwmem_file *hwfile, struct hwmem_alloc_request *req) struct hwmem_alloc *alloc; if (hwfile->fd_alloc) - return -EBUSY; + return -EINVAL; alloc = hwmem_alloc(req->size, req->flags, req->default_access, - req->mem_type); + req->mem_type); if (IS_ERR(alloc)) return PTR_ERR(alloc); @@ -139,6 +133,9 @@ static int release(struct hwmem_file *hwfile, s32 id) { struct hwmem_alloc *alloc; + if (id == 0) + return -EINVAL; + alloc = resolve_id(hwfile, id); if (IS_ERR(alloc)) return PTR_ERR(alloc); @@ -149,7 +146,20 @@ static int release(struct hwmem_file *hwfile, s32 id) return 0; } -static int hwmem_ioctl_set_domain(struct hwmem_file *hwfile, +static int set_cpu_domain(struct hwmem_file *hwfile, + struct hwmem_set_domain_request *req) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_CPU, + (struct hwmem_region *)&req->region); +} + +static int set_sync_domain(struct hwmem_file *hwfile, struct hwmem_set_domain_request *req) { struct hwmem_alloc *alloc; @@ -158,18 +168,33 @@ static int hwmem_ioctl_set_domain(struct hwmem_file *hwfile, if (IS_ERR(alloc)) return PTR_ERR(alloc); - return hwmem_set_domain(alloc, req->access, req->domain, &req->region); + return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_SYNC, + (struct hwmem_region *)&req->region); } static int pin(struct hwmem_file *hwfile, struct hwmem_pin_request *req) { + int ret; struct hwmem_alloc *alloc; + enum hwmem_mem_type mem_type; + struct hwmem_mem_chunk mem_chunk; + size_t mem_chunk_length = 1; alloc = resolve_id(hwfile, req->id); if (IS_ERR(alloc)) return PTR_ERR(alloc); - return hwmem_pin(alloc, &req->phys_addr, req->scattered_addrs); + hwmem_get_info(alloc, NULL, &mem_type, NULL); + if (mem_type != HWMEM_MEM_CONTIGUOUS_SYS) + return -EINVAL; + + ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length); + if (ret < 0) + return ret; + + req->phys_addr = mem_chunk.paddr; + + return 0; } static int unpin(struct hwmem_file *hwfile, s32 id) @@ -211,13 +236,10 @@ static int get_info(struct hwmem_file *hwfile, return 0; } -static int export(struct hwmem_file *hwfile, s32 id) +static s32 export(struct hwmem_file *hwfile, s32 id) { - int ret; + s32 ret; struct hwmem_alloc *alloc; - - uint32_t size; - enum hwmem_mem_type mem_type; enum hwmem_access access; alloc = resolve_id(hwfile, id); @@ -234,26 +256,20 @@ static int export(struct hwmem_file *hwfile, s32 id) * security as the process already has access to the buffer (otherwise * it would not be able to get here). */ - hwmem_get_info(alloc, &size, &mem_type, &access); + hwmem_get_info(alloc, NULL, NULL, &access); ret = hwmem_set_access(alloc, (access | HWMEM_ACCESS_IMPORT), - task_tgid_nr(current)); + task_tgid_nr(current)); if (ret < 0) - goto error; + return ret; return hwmem_get_name(alloc); - -error: - return ret; } -static int import(struct hwmem_file *hwfile, s32 name) +static s32 import(struct hwmem_file *hwfile, s32 name) { - int ret = 0; + s32 ret = 0; struct hwmem_alloc *alloc; - - uint32_t size; - enum hwmem_mem_type mem_type; enum hwmem_access access; alloc = hwmem_resolve_by_name(name); @@ -261,8 +277,7 @@ static int import(struct hwmem_file *hwfile, s32 name) return PTR_ERR(alloc); /* Check access permissions for process */ - hwmem_get_info(alloc, &size, &mem_type, &access); - + hwmem_get_info(alloc, NULL, NULL, &access); if (!(access & HWMEM_ACCESS_IMPORT)) { ret = -EPERM; goto error; @@ -270,26 +285,44 @@ static int import(struct hwmem_file *hwfile, s32 name) ret = create_id(hwfile, alloc); if (ret < 0) - hwmem_release(alloc); + goto error; + + return ret; error: + hwmem_release(alloc); + return ret; } static int import_fd(struct hwmem_file *hwfile, s32 name) { + int ret; struct hwmem_alloc *alloc; + enum hwmem_access access; if (hwfile->fd_alloc) - return -EBUSY; + return -EINVAL; alloc = hwmem_resolve_by_name(name); if (IS_ERR(alloc)) return PTR_ERR(alloc); + /* Check access permissions for process */ + hwmem_get_info(alloc, NULL, NULL, &access); + if (!(access & HWMEM_ACCESS_IMPORT)) { + ret = -EPERM; + goto error; + } + hwfile->fd_alloc = alloc; return 0; + +error: + hwmem_release(alloc); + + return ret; } static int hwmem_open(struct inode *inode, struct file *file) @@ -315,7 +348,7 @@ static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma) mutex_lock(&hwfile->lock); - alloc = resolve_id(hwfile, vma->vm_pgoff << PAGE_SHIFT); + alloc = resolve_id(hwfile, (s32)vma->vm_pgoff << PAGE_SHIFT); if (IS_ERR(alloc)) { ret = PTR_ERR(alloc); goto out; @@ -385,23 +418,29 @@ static long hwmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case HWMEM_RELEASE_IOC: ret = release(hwfile, (s32)arg); break; - case HWMEM_SET_DOMAIN_IOC: + case HWMEM_SET_CPU_DOMAIN_IOC: { struct hwmem_set_domain_request req; if (copy_from_user(&req, (void __user *)arg, sizeof(struct hwmem_set_domain_request))) ret = -EFAULT; else - ret = hwmem_ioctl_set_domain(hwfile, &req); + ret = set_cpu_domain(hwfile, &req); + } + break; + case HWMEM_SET_SYNC_DOMAIN_IOC: + { + struct hwmem_set_domain_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_set_domain_request))) + ret = -EFAULT; + else + ret = set_sync_domain(hwfile, &req); } break; case HWMEM_PIN_IOC: { struct hwmem_pin_request req; - /* - * TODO: Validate and copy scattered_addrs. Not a - * problem right now as it's never used. - */ if (copy_from_user(&req, (void __user *)arg, sizeof(struct hwmem_pin_request))) ret = -EFAULT; @@ -468,6 +507,22 @@ static unsigned long hwmem_get_unmapped_area(struct file *file, int __init hwmem_ioctl_init(void) { + if (PAGE_SHIFT < 1 || PAGE_SHIFT > 30 || sizeof(size_t) != 4 || + sizeof(int) > 4 || sizeof(enum hwmem_alloc_flags) != 4 || + sizeof(enum hwmem_access) != 4 || + sizeof(enum hwmem_mem_type) != 4) { + dev_err(hwmem_device.this_device, "PAGE_SHIFT < 1 || PAGE_SHIFT" + " > 30 || sizeof(size_t) != 4 || sizeof(int) > 4 ||" + " sizeof(enum hwmem_alloc_flags) != 4 || sizeof(enum" + " hwmem_access) != 4 || sizeof(enum hwmem_mem_type)" + " != 4\n"); + return -ENOMSG; + } + if (PAGE_SHIFT > 15) + dev_warn(hwmem_device.this_device, "Due to the page size only" + " %u id:s per file instance are available\n", + ((u32)1 << (31 - PAGE_SHIFT)) - 1); + return misc_register(&hwmem_device); } diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c index 0010e45ff52..fbfd8502a1d 100644 --- a/drivers/misc/hwmem/hwmem-main.c +++ b/drivers/misc/hwmem/hwmem-main.c @@ -1,10 +1,10 @@ /* - * Copyright (C) ST-Ericsson AB 2010 + * Copyright (C) ST-Ericsson SA 2010 * * Hardware memory driver, hwmem * - * Author: Marcus Lorentzon - * for ST-Ericsson. + * Author: Marcus Lorentzon , + * Johan Mossberg for ST-Ericsson. * * License terms: GNU General Public License (GPL), version 2. */ @@ -46,7 +46,7 @@ struct hwmem_alloc { u32 paddr; void *kaddr; u32 size; - u32 name; + s32 name; /* Access control */ enum hwmem_access default_access; @@ -446,12 +446,19 @@ int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access, } EXPORT_SYMBOL(hwmem_set_domain); -int hwmem_pin(struct hwmem_alloc *alloc, uint32_t *phys_addr, - uint32_t *scattered_phys_addrs) +int hwmem_pin(struct hwmem_alloc *alloc, struct hwmem_mem_chunk *mem_chunks, + u32 *mem_chunks_length) { + if (*mem_chunks_length < 1) { + *mem_chunks_length = 1; + return -ENOSPC; + } + mutex_lock(&lock); - *phys_addr = alloc->paddr; + mem_chunks[0].paddr = alloc->paddr; + mem_chunks[0].size = alloc->size; + *mem_chunks_length = 1; mutex_unlock(&lock); @@ -492,7 +499,7 @@ int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma) goto illegal_access; } - if (vma_size > (unsigned long)alloc->size) { + if (vma_size > alloc->size) { ret = -EINVAL; goto illegal_size; } @@ -590,14 +597,17 @@ error_get_pid: } EXPORT_SYMBOL(hwmem_set_access); -void hwmem_get_info(struct hwmem_alloc *alloc, uint32_t *size, +void hwmem_get_info(struct hwmem_alloc *alloc, u32 *size, enum hwmem_mem_type *mem_type, enum hwmem_access *access) { mutex_lock(&lock); - *size = alloc->size; - *mem_type = HWMEM_MEM_CONTIGUOUS_SYS; - *access = get_access(alloc); + if (size != NULL) + *size = alloc->size; + if (mem_type != NULL) + *mem_type = HWMEM_MEM_CONTIGUOUS_SYS; + if (access != NULL) + *access = get_access(alloc); mutex_unlock(&lock); } @@ -766,6 +776,14 @@ static int __devinit hwmem_probe(struct platform_device *pdev) int ret = 0; struct hwmem_platform_data *platform_data = pdev->dev.platform_data; + if (sizeof(int) != 4 || sizeof(phys_addr_t) < 4 || + sizeof(void *) < 4 || sizeof(size_t) != 4) { + dev_err(&pdev->dev, "sizeof(int) != 4 || sizeof(phys_addr_t)" + " < 4 || sizeof(void *) < 4 || sizeof(size_t) !=" + " 4\n"); + return -ENOMSG; + } + if (hwdev || platform_data->size == 0 || platform_data->start != PAGE_ALIGN(platform_data->start) || platform_data->size != PAGE_ALIGN(platform_data->size)) { diff --git a/include/linux/hwmem.h b/include/linux/hwmem.h index bc0a26a30a6..6756085f72a 100644 --- a/include/linux/hwmem.h +++ b/include/linux/hwmem.h @@ -1,7 +1,7 @@ /* - * Copyright (C) ST-Ericsson AB 2010 + * Copyright (C) ST-Ericsson SA 2010 * - * ST-Ericsson HW memory driver + * Hardware memory driver, hwmem * * Author: Marcus Lorentzon * for ST-Ericsson. @@ -27,36 +27,49 @@ */ enum hwmem_alloc_flags { /** - * @brief Buffer will not be cached and not buffered + * @brief Buffered */ - HWMEM_ALLOC_UNCACHED = (0 << 0), + HWMEM_ALLOC_HINT_WRITE_COMBINE = (1 << 0), /** - * @brief Buffer will be buffered, but not cached + * @brief Non-buffered */ - HWMEM_ALLOC_BUFFERED = (1 << 0), + HWMEM_ALLOC_HINT_NO_WRITE_COMBINE = (1 << 1), /** - * @brief Buffer will be cached and buffered, use cache hints to be - * more specific + * @brief Cached */ - HWMEM_ALLOC_CACHED = (3 << 0), + HWMEM_ALLOC_HINT_CACHED = (1 << 2), /** - * @brief Buffer should be cached write-back in both level 1 and 2 cache + * @brief Uncached */ - HWMEM_ALLOC_CACHE_HINT_WB = (1 << 2), + HWMEM_ALLOC_HINT_UNCACHED = (1 << 3), /** - * @brief Buffer should be cached write-through in both level 1 and - * 2 cache + * @brief Write back */ - HWMEM_ALLOC_CACHE_HINT_WT = (2 << 2), + HWMEM_ALLOC_HINT_CACHE_WB = (1 << 4), /** - * @brief Buffer should be cached write-back in level 1 cache + * @brief Write through */ - HWMEM_ALLOC_CACHE_HINT_WB_INNER = (3 << 2), + HWMEM_ALLOC_HINT_CACHE_WT = (1 << 5), /** - * @brief Buffer should be cached write-through in level 1 cache + * @brief No alloc on write */ - HWMEM_ALLOC_CACHE_HINT_WT_INNER = (4 << 2), - HWMEM_ALLOC_CACHE_HINT_MASK = 0x1C, + HWMEM_ALLOC_HINT_CACHE_NAOW = (1 << 6), + /** + * @brief Alloc on write + */ + HWMEM_ALLOC_HINT_CACHE_AOW = (1 << 7), + /** + * @brief Inner and outer cache + */ + HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE = (1 << 8), + /** + * @brief Inner cache only + */ + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY = (1 << 9), + /** + * @brief Reserved for use by the cache handler integration + */ + HWMEM_ALLOC_RESERVED_CHI = (1 << 31), }; /** @@ -78,82 +91,32 @@ enum hwmem_access { }; /** - * @brief Flags defining memory type. + * @brief Values defining memory types. */ enum hwmem_mem_type { /** - * @brief Scattered system memory. Currently not supported! + * @brief Scattered system memory. */ - HWMEM_MEM_SCATTERED_SYS = (1 << 0), + HWMEM_MEM_SCATTERED_SYS, /** * @brief Contiguous system memory. */ - HWMEM_MEM_CONTIGUOUS_SYS = (1 << 1), + HWMEM_MEM_CONTIGUOUS_SYS, }; -/** - * @brief Values defining memory domain. - */ -enum hwmem_domain { - /** - * @brief This value specifies the neutral memory domain. Setting this - * domain will syncronize all supported memory domains (currently CPU). - */ - HWMEM_DOMAIN_SYNC = 0, - /** - * @brief This value specifies the CPU memory domain. - */ - HWMEM_DOMAIN_CPU = 1, -}; +/* User space API */ /** - * @brief Structure defining a region of a memory buffer. - * - * A buffer is defined to contain a number of equally sized blocks. Each block - * has a part of it included in the region [-). That is - * - bytes. Each block is bytes long. Total number of bytes - * in the region is ( - ) * . First byte of the region is - * + bytes into the buffer. - * - * Here's an example of a region in a graphics buffer (X = buffer, R = region): - * - * XXXXXXXXXXXXXXXXXXXX \ - * XXXXXXXXXXXXXXXXXXXX |-- offset = 60 - * XXXXXXXXXXXXXXXXXXXX / - * XXRRRRRRRRXXXXXXXXXX \ - * XXRRRRRRRRXXXXXXXXXX |-- count = 4 - * XXRRRRRRRRXXXXXXXXXX | - * XXRRRRRRRRXXXXXXXXXX / - * XXXXXXXXXXXXXXXXXXXX - * --| start = 2 - * ----------| end = 10 - * --------------------| size = 20 + * @see struct hwmem_region. */ -struct hwmem_region { - /** - * @brief The first block's offset from beginning of buffer. - */ - uint32_t offset; - /** - * @brief The number of blocks included in this region. - */ - uint32_t count; - /** - * @brief The index of the first byte included in this block. - */ - uint32_t start; - /** - * @brief The index of the last byte included in this block plus one. - */ - uint32_t end; - /** - * @brief The size in bytes of each block. - */ - uint32_t size; +struct hwmem_region_us { + __u32 offset; + __u32 count; + __u32 start; + __u32 end; + __u32 size; }; -/* User space API */ - /** * @brief Alloc request data. */ @@ -162,19 +125,19 @@ struct hwmem_alloc_request { * @brief [in] Size of requested allocation in bytes. Size will be * aligned to PAGE_SIZE bytes. */ - uint32_t size; + __u32 size; /** * @brief [in] Flags describing requested allocation options. */ - uint32_t flags; /* enum hwmem_alloc_flags */ + __u32 flags; /* enum hwmem_alloc_flags */ /** * @brief [in] Default access rights for buffer. */ - uint32_t default_access; /* enum hwmem_access */ + __u32 default_access; /* enum hwmem_access */ /** * @brief [in] Memory type of the buffer. */ - uint32_t mem_type; /* enum hwmem_mem_type */ + __u32 mem_type; /* enum hwmem_mem_type */ }; /** @@ -185,24 +148,20 @@ struct hwmem_set_domain_request { * @brief [in] Identifier of buffer to be prepared. If 0 is specified * the buffer associated with the current file instance will be used. */ - int32_t id; - /** - * @brief [in] Value specifying the new memory domain. - */ - uint32_t domain; /* enum hwmem_domain */ + __s32 id; /** * @brief [in] Flags specifying access mode of the operation. * * One of HWMEM_ACCESS_READ and HWMEM_ACCESS_WRITE is required. * For details, @see enum hwmem_access. */ - uint32_t access; /* enum hwmem_access */ + __u32 access; /* enum hwmem_access */ /** * @brief [in] The region of bytes to be prepared. * * For details, @see struct hwmem_region. */ - struct hwmem_region region; + struct hwmem_region_us region; }; /** @@ -213,18 +172,11 @@ struct hwmem_pin_request { * @brief [in] Identifier of buffer to be pinned. If 0 is specified, * the buffer associated with the current file instance will be used. */ - int32_t id; + __s32 id; /** * @brief [out] Physical address of first word in buffer. */ - uint32_t phys_addr; - /** - * @brief [in] Pointer to buffer for physical addresses of pinned - * scattered buffer. Buffer must be (buffer_size / page_size) * - * sizeof(uint32_t) bytes. - * This field can be NULL for physically contiguos buffers. - */ - uint32_t *scattered_addrs; + __u32 phys_addr; }; /** @@ -232,14 +184,15 @@ struct hwmem_pin_request { */ struct hwmem_set_access_request { /** - * @brief [in] Identifier of buffer to be pinned. If 0 is specified, - * the buffer associated with the current file instance will be used. + * @brief [in] Identifier of buffer to set access rights for. If 0 is + * specified, the buffer associated with the current file instance will + * be used. */ - int32_t id; + __s32 id; /** * @param access Access value indicating what is allowed. */ - uint32_t access; /* enum hwmem_access */ + __u32 access; /* enum hwmem_access */ /** * @param pid Process ID to set rights for. */ @@ -254,19 +207,19 @@ struct hwmem_get_info_request { * @brief [in] Identifier of buffer to get info about. If 0 is specified, * the buffer associated with the current file instance will be used. */ - int32_t id; + __s32 id; /** * @brief [out] Size in bytes of buffer. */ - uint32_t size; + __u32 size; /** * @brief [out] Memory type of buffer. */ - uint32_t mem_type; /* enum hwmem_mem_type */ + __u32 mem_type; /* enum hwmem_mem_type */ /** * @brief [out] Access rights for buffer. */ - uint32_t access; /* enum hwmem_access */ + __u32 access; /* enum hwmem_access */ }; /** @@ -296,7 +249,8 @@ struct hwmem_get_info_request { * @brief Releases buffer. * * Buffers are reference counted and will not be destroyed until the last - * reference is released. Bufferes allocated with ALLOC_FD_IOC not allowed. + * reference is released. Buffers allocated with ALLOC_FD_IOC shall not be + * released with this IOC, @see HWMEM_ALLOC_FD_IOC. * * Input is the buffer identifier. * @@ -305,44 +259,72 @@ struct hwmem_get_info_request { #define HWMEM_RELEASE_IOC _IO('W', 3) /** - * @brief Set the buffer's memory domain and prepares it for access. + * Memory Mapping + * + * To map a hwmem buffer mmap the hwmem fd and supply the buffer identifier as + * the offset. If the buffer is linked to the fd and thus have no buffer + * identifier supply 0 as the offset. Note that the offset feature of mmap is + * disabled in both cases, you can only mmap starting a position 0. + */ + +/** + * @brief Prepares the buffer for CPU access. + * + * Input is a pointer to a hwmem_set_domain_request struct. + * + * @return Zero on success, or a negative error code. + */ +#define HWMEM_SET_CPU_DOMAIN_IOC _IOW('W', 4, struct hwmem_set_domain_request) + +/** + * DEPRECATED: Set sync domain from driver instead! + * + * @brief Prepares the buffer for access by any DMA hardware. * * Input is a pointer to a hwmem_set_domain_request struct. * * @return Zero on success, or a negative error code. */ -#define HWMEM_SET_DOMAIN_IOC _IOR('W', 4, struct hwmem_set_domain_request) +#define HWMEM_SET_SYNC_DOMAIN_IOC _IOW('W', 5, struct hwmem_set_domain_request) /** - * @brief Pins the buffer and returns the physical address of the buffer. + * DEPRECATED: Pin from driver instead! + * + * @brief Pins the buffer. + * + * Input is a pointer to a hwmem_pin_request struct. Only contiguous buffers + * can be pinned from user space. * * @return Zero on success, or a negative error code. */ -#define HWMEM_PIN_IOC _IOWR('W', 5, struct hwmem_pin_request) +#define HWMEM_PIN_IOC _IOWR('W', 6, struct hwmem_pin_request) /** + * DEPRECATED: Unpin from driver instead! + * * @brief Unpins the buffer. * * @return Zero on success, or a negative error code. */ -#define HWMEM_UNPIN_IOC _IO('W', 6) +#define HWMEM_UNPIN_IOC _IO('W', 7) /** * @brief Set access rights for buffer. * + * Input is a pointer to a hwmem_set_access_request struct. + * * @return Zero on success, or a negative error code. */ -#define HWMEM_SET_ACCESS_IOC _IOW('W', 7, struct hwmem_set_access_request) +#define HWMEM_SET_ACCESS_IOC _IOW('W', 8, struct hwmem_set_access_request) /** * @brief Get buffer information. * - * Input is the buffer identifier. If 0 is specified the buffer associated - * with the current file instance will be used. + * Input is a pointer to a hwmem_get_info_request struct. * * @return Zero on success, or a negative error code. */ -#define HWMEM_GET_INFO_IOC _IOWR('W', 8, struct hwmem_get_info_request) +#define HWMEM_GET_INFO_IOC _IOWR('W', 9, struct hwmem_get_info_request) /** * @brief Export the buffer identifier for use in another process. @@ -355,32 +337,100 @@ struct hwmem_get_info_request { * * @return A global buffer name on success, or a negative error code. */ -#define HWMEM_EXPORT_IOC _IO('W', 9) +#define HWMEM_EXPORT_IOC _IO('W', 10) /** * @brief Import a buffer to allow local access to the buffer. * * Input is the buffer's global name. * - * @return The imported buffer's identifier on success, or a negative error code. + * @return The imported buffer's identifier on success, or a negative error + * code. */ -#define HWMEM_IMPORT_IOC _IO('W', 10) +#define HWMEM_IMPORT_IOC _IO('W', 11) /** - * @brief Import a buffer to allow local access to the buffer using fd. + * @brief Import a buffer to allow local access to the buffer using the current + * fd. * * Input is the buffer's global name. * * @return Zero on success, or a negative error code. */ -#define HWMEM_IMPORT_FD_IOC _IO('W', 11) +#define HWMEM_IMPORT_FD_IOC _IO('W', 12) #ifdef __KERNEL__ /* Kernel API */ +/** + * @brief Values defining memory domain. + */ +enum hwmem_domain { + /** + * @brief This value specifies the neutral memory domain. Setting this + * domain will syncronize all supported memory domains. + */ + HWMEM_DOMAIN_SYNC = 0, + /** + * @brief This value specifies the CPU memory domain. + */ + HWMEM_DOMAIN_CPU, +}; + struct hwmem_alloc; +/** + * @brief Structure defining a region of a memory buffer. + * + * A buffer is defined to contain a number of equally sized blocks. Each block + * has a part of it included in the region [-). That is + * - bytes. Each block is bytes long. Total number of bytes + * in the region is ( - ) * . First byte of the region is + * + bytes into the buffer. + * + * Here's an example of a region in a graphics buffer (X = buffer, R = region): + * + * XXXXXXXXXXXXXXXXXXXX \ + * XXXXXXXXXXXXXXXXXXXX |-- offset = 60 + * XXXXXXXXXXXXXXXXXXXX / + * XXRRRRRRRRXXXXXXXXXX \ + * XXRRRRRRRRXXXXXXXXXX |-- count = 4 + * XXRRRRRRRRXXXXXXXXXX | + * XXRRRRRRRRXXXXXXXXXX / + * XXXXXXXXXXXXXXXXXXXX + * --| start = 2 + * ----------| end = 10 + * --------------------| size = 20 + */ +struct hwmem_region { + /** + * @brief The first block's offset from beginning of buffer. + */ + size_t offset; + /** + * @brief The number of blocks included in this region. + */ + size_t count; + /** + * @brief The index of the first byte included in this block. + */ + size_t start; + /** + * @brief The index of the last byte included in this block plus one. + */ + size_t end; + /** + * @brief The size in bytes of each block. + */ + size_t size; +}; + +struct hwmem_mem_chunk { + phys_addr_t paddr; + size_t size; +}; + /** * @brief Allocates number of bytes. * @@ -391,7 +441,7 @@ struct hwmem_alloc; * * @return Pointer to allocation, or a negative error code. */ -struct hwmem_alloc *hwmem_alloc(u32 size, enum hwmem_alloc_flags flags, +struct hwmem_alloc *hwmem_alloc(size_t size, enum hwmem_alloc_flags flags, enum hwmem_access def_access, enum hwmem_mem_type mem_type); /** @@ -419,14 +469,26 @@ int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access, /** * @brief Pins the buffer. * + * Notice that the number of mem chunks a buffer consists of can change at any + * time if the buffer is not pinned. Because of this one can not assume that + * pin will succeed if has the length specified by a previous call + * to pin as the buffer layout may have changed between the calls. There are + * two ways of handling this situation, keep redoing the pin procedure till it + * succeeds or allocate enough mem chunks for the worst case ("buffer size" / + * "page size" mem chunks). Contiguous buffers always require only one mem + * chunk. + * * @param alloc Buffer to be pinned. - * @param phys_addr Reference to variable to receive physical address. - * @param scattered_phys_addrs Pointer to buffer to receive physical addresses - * of all pages in the scattered buffer. Can be NULL if buffer is contigous. - * Buffer size must be (buffer_size / page_size) * sizeof(uint32_t) bytes. + * @param mem_chunks Pointer to array of mem chunks. + * @param mem_chunks_length Pointer to variable that contains the length of + * array. On success the number of written mem chunks will be + * stored in this variable. If the call fails with -ENOSPC the required length + * of will be stored in this variable. + * + * @return Zero on success, or a negative error code. */ -int hwmem_pin(struct hwmem_alloc *alloc, uint32_t *phys_addr, - uint32_t *scattered_phys_addrs); +int hwmem_pin(struct hwmem_alloc *alloc, struct hwmem_mem_chunk *mem_chunks, + size_t *mem_chunks_length); /** * @brief Unpins the buffer. @@ -438,7 +500,9 @@ void hwmem_unpin(struct hwmem_alloc *alloc); /** * @brief Map the buffer to user space. * - * @param alloc Buffer to be unpinned. + * @param alloc Buffer to be mapped. + * + * @return Zero on success, or a negative error code. */ int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma); @@ -476,12 +540,12 @@ int hwmem_set_access(struct hwmem_alloc *alloc, enum hwmem_access access, * @brief Get buffer information. * * @param alloc Buffer to get information about. - * @param size Pointer to size output variable. - * @param size Pointer to memory type output variable. - * @param size Pointer to access rights output variable. + * @param size Pointer to size output variable. Can be NULL. + * @param size Pointer to memory type output variable. Can be NULL. + * @param size Pointer to access rights output variable. Can be NULL. */ -void hwmem_get_info(struct hwmem_alloc *alloc, uint32_t *size, - enum hwmem_mem_type *mem_type, enum hwmem_access *access); +void hwmem_get_info(struct hwmem_alloc *alloc, size_t *size, + enum hwmem_mem_type *mem_type, enum hwmem_access *access); /** * @brief Allocate a global buffer name. @@ -492,7 +556,7 @@ void hwmem_get_info(struct hwmem_alloc *alloc, uint32_t *size, * * @return Positive global name on success, or a negative error code. */ -int hwmem_get_name(struct hwmem_alloc *alloc); +s32 hwmem_get_name(struct hwmem_alloc *alloc); /** * @brief Import the global buffer name to allow local access to the buffer. @@ -508,10 +572,10 @@ struct hwmem_alloc *hwmem_resolve_by_name(s32 name); /* Internal */ struct hwmem_platform_data { - /* Starting physical address of memory region */ - unsigned long start; + /* Physical address of memory region */ + u32 start; /* Size of memory region */ - unsigned long size; + u32 size; }; #endif -- cgit v1.2.3 From 0b58138244e1c5652dc2f3128e64e48ab51f03ce Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Tue, 11 Oct 2011 16:03:19 +0200 Subject: HWMEM: Make user space include linux/types.h __x32 are defined in linux/types.h which is therefore needed in user space also. This patch also adds the uncached flag when allocating the FB to ensure we don't get a cached FB. Depends-On: I9a45ad54a0cc8a5cdb1e3b9038ad50aeacb3f9c3 ST-Ericsson ID: AP 327001 ST-Ericsson FOSS-OUT ID: STETL-FOSS-OUT-10068 Change-Id: I3df94e161be96dd2f55928daab3eb20837b92c1d Signed-off-by: Johan Mossberg Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/18551 Reviewed-by: Jonas ABERG --- include/linux/hwmem.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/hwmem.h b/include/linux/hwmem.h index 6756085f72a..a2eb91d9d9a 100644 --- a/include/linux/hwmem.h +++ b/include/linux/hwmem.h @@ -12,11 +12,11 @@ #ifndef _HWMEM_H_ #define _HWMEM_H_ +#include + #if !defined(__KERNEL__) && !defined(_KERNEL) -#include #include #else -#include #include #endif -- cgit v1.2.3 From b4ec7730eb503ed3b49c41584d0112ac0e6c54ec Mon Sep 17 00:00:00 2001 From: Johan Mossberg Date: Wed, 23 Mar 2011 12:03:36 +0100 Subject: HWMEM: Reset ref count in clean_alloc When hwmem_alloc fails to alloc virtual memory for the kernel mapping it will call clean_alloc with a non-zero ref count. If the ref count is not reset in that case we will leak HWMEM memory. ST-Ericsson ID: 330377 ST-Ericsson FOSS-OUT ID: STETL-FOSS-OUT-10068 Change-Id: I4c203bbd090a119d1e99cc7d294e827ab8a6e9e7 Signed-off-by: Johan Mossberg Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/18942 Reviewed-by: QATOOLS Reviewed-by: Jonas ABERG --- drivers/misc/hwmem/hwmem-main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c index fbfd8502a1d..9223bdd013b 100644 --- a/drivers/misc/hwmem/hwmem-main.c +++ b/drivers/misc/hwmem/hwmem-main.c @@ -154,6 +154,7 @@ static void clean_alloc(struct hwmem_alloc *alloc) } alloc->flags = 0; + atomic_set(&alloc->ref_cnt, 0); clean_hwmem_alloc_threadg_info_list(alloc); -- cgit v1.2.3 From cb9288607f4d59a18097c417a4649d2aabcc32cc Mon Sep 17 00:00:00 2001 From: Johan Mossberg Date: Thu, 31 Mar 2011 17:19:22 +0200 Subject: HWMEM: Alloc kernel vaddrs on boot By allocating all the kernel virtual addresses on boot fragmentation of the vmalloc area can not cause HWMEM allocs to fail. ST-Ericsson ID: 333457 ST-Ericsson FOSS-OUT ID: STETL-FOSS-OUT-10068 Change-Id: I04a8b4a2804df9a9bbad24e4874107f86a1efdb9 Signed-off-by: Johan Mossberg Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/19682 Reviewed-by: Jonas ABERG --- drivers/misc/hwmem/hwmem-main.c | 81 ++++++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 33 deletions(-) diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c index 9223bdd013b..96cca3735dd 100644 --- a/drivers/misc/hwmem/hwmem-main.c +++ b/drivers/misc/hwmem/hwmem-main.c @@ -59,6 +59,7 @@ struct hwmem_alloc { static struct platform_device *hwdev; static u32 hwmem_paddr; +static void *hwmem_kaddr; static u32 hwmem_size; static LIST_HEAD(alloc_list); @@ -307,60 +308,67 @@ static void clean_alloc_list(void) } } -static int kmap_alloc(struct hwmem_alloc *alloc) +static int alloc_kaddrs(void) { - int ret; - pgprot_t pgprot; - - struct vm_struct *area = get_vm_area(alloc->size, VM_IOREMAP); + struct vm_struct *area = get_vm_area(hwmem_size, VM_IOREMAP); if (area == NULL) { - dev_info(&hwdev->dev, "Failed to allocate %u bytes virtual" - " memory", alloc->size); + dev_info(&hwdev->dev, "Failed to allocate %u bytes kernel" + " virtual memory", hwmem_size); return -ENOMSG; } - pgprot = PAGE_KERNEL; - cach_set_pgprot_cache_options(&alloc->cach_buf, &pgprot); + hwmem_kaddr = area->addr; - ret = ioremap_page_range((unsigned long)area->addr, - (unsigned long)area->addr + alloc->size, alloc->paddr, pgprot); - if (ret < 0) { - dev_info(&hwdev->dev, "Failed to map %#x - %#x", alloc->paddr, - alloc->paddr + alloc->size); - goto failed_to_map; - } + return 0; +} - alloc->kaddr = area->addr; +static void free_kaddrs(void) +{ + struct vm_struct *area; - return 0; + if (hwmem_kaddr == NULL) + return; -failed_to_map: - area = remove_vm_area(area->addr); + area = remove_vm_area(hwmem_kaddr); if (area == NULL) dev_err(&hwdev->dev, - "Failed to unmap alloc, resource leak!\n"); + "Failed to free kernel virtual memory," + " resource leak!\n"); kfree(area); - return ret; + hwmem_kaddr = NULL; } -static void kunmap_alloc(struct hwmem_alloc *alloc) +static int kmap_alloc(struct hwmem_alloc *alloc) { - struct vm_struct *area; + int ret; + pgprot_t pgprot; + void *alloc_kaddr = hwmem_kaddr + get_alloc_offset(alloc); - if (alloc->kaddr == NULL) - return; + pgprot = PAGE_KERNEL; + cach_set_pgprot_cache_options(&alloc->cach_buf, &pgprot); - area = remove_vm_area(alloc->kaddr); - if (area == NULL) { - dev_err(&hwdev->dev, - "Failed to unmap alloc, resource leak!\n"); - return; + ret = ioremap_page_range((unsigned long)alloc_kaddr, + (unsigned long)alloc_kaddr + alloc->size, alloc->paddr, + pgprot); + if (ret < 0) { + dev_info(&hwdev->dev, "Failed to map %#x - %#x", alloc->paddr, + alloc->paddr + alloc->size); + return ret; } - kfree(area); + alloc->kaddr = alloc_kaddr; + return 0; +} + +static void kunmap_alloc(struct hwmem_alloc *alloc) +{ + if (alloc->kaddr == NULL) + return; + + unmap_kernel_range((unsigned long)alloc->kaddr, alloc->size); alloc->kaddr = NULL; } @@ -386,7 +394,8 @@ struct hwmem_alloc *hwmem_alloc(u32 size, enum hwmem_alloc_flags flags, alloc = find_free_alloc_bestfit(size); if (IS_ERR(alloc)) { - dev_info(&hwdev->dev, "Allocation failed, no free slot\n"); + dev_info(&hwdev->dev, "Could not find slot for %u bytes" + " allocation\n", size); goto no_slot; } @@ -800,6 +809,10 @@ static int __devinit hwmem_probe(struct platform_device *pdev) hwmem_paddr = platform_data->start; hwmem_size = platform_data->size; + ret = alloc_kaddrs(); + if (ret < 0) + goto alloc_kaddrs_failed; + /* * No need to flush the caches here. If we can keep track of the cache * content then none of our memory will be in the caches, if we can't @@ -827,6 +840,8 @@ static int __devinit hwmem_probe(struct platform_device *pdev) ioctl_init_failed: clean_alloc_list(); init_alloc_list_failed: + free_kaddrs(); +alloc_kaddrs_failed: hwdev = NULL; out: -- cgit v1.2.3 From 432d56acd506e2143e0ca59ea0143b3ddccb3034 Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Thu, 20 Oct 2011 10:48:17 +0200 Subject: HWMEM: Break out allocator Break out allocator in order to be able to run it in multiple instances ie support multiple regions. Multiple regions can be used to battle fragmentation by letting clients allocating in a non fragmenting way allocate from their own region. For STE this will probably mean letting camera/video have their own region. A positive side effect is that we can use HWMEM as an allocator for SRAM if we want to. ST-Ericsson ID: 334992 ST-Ericsson FOSS-OUT ID: STETL-FOSS-OUT-10068 Change-Id: I449f2dde8f1ceeb05dd55384dd4070e91997276f Signed-off-by: Johan Mossberg Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/20638 Reviewed-by: QATEST Reviewed-by: Jonas ABERG --- arch/arm/mach-ux500/dcache.c | 70 ------ arch/arm/mach-ux500/hwmem-int.c | 165 ++++++++++++++ drivers/misc/hwmem/Makefile | 2 +- drivers/misc/hwmem/contig_alloc.c | 468 ++++++++++++++++++++++++++++++++++++++ drivers/misc/hwmem/hwmem-main.c | 462 +++++++++++++------------------------ include/linux/hwmem.h | 22 +- 6 files changed, 810 insertions(+), 379 deletions(-) create mode 100644 arch/arm/mach-ux500/hwmem-int.c create mode 100644 drivers/misc/hwmem/contig_alloc.c diff --git a/arch/arm/mach-ux500/dcache.c b/arch/arm/mach-ux500/dcache.c index b1c3942c181..b117d4e8283 100644 --- a/arch/arm/mach-ux500/dcache.c +++ b/arch/arm/mach-ux500/dcache.c @@ -9,7 +9,6 @@ * License terms: GNU General Public License (GPL), version 2. */ -#include #include #include @@ -91,75 +90,6 @@ static void flush_inner_dcache_all(void); static bool is_cache_exclusive(void); -enum hwmem_alloc_flags cachi_get_cache_settings( - enum hwmem_alloc_flags requested_cache_settings) -{ - static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED | - HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT | - HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW | - HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE | - HWMEM_ALLOC_HINT_INNER_CACHE_ONLY; - - enum hwmem_alloc_flags cache_settings; - - if (!(requested_cache_settings & CACHE_ON_FLAGS_MASK) && - requested_cache_settings & (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE | - HWMEM_ALLOC_HINT_UNCACHED | HWMEM_ALLOC_HINT_WRITE_COMBINE)) - /* - * We never use uncached as it's extremely slow and there is - * no scenario where it would be better than buffered memory. - */ - return HWMEM_ALLOC_HINT_WRITE_COMBINE; - - /* - * The user has specified cached or nothing at all, both are treated as - * cached. - */ - cache_settings = (requested_cache_settings & - ~(HWMEM_ALLOC_HINT_UNCACHED | - HWMEM_ALLOC_HINT_NO_WRITE_COMBINE | - HWMEM_ALLOC_HINT_INNER_CACHE_ONLY | - HWMEM_ALLOC_HINT_CACHE_NAOW)) | - HWMEM_ALLOC_HINT_WRITE_COMBINE | HWMEM_ALLOC_HINT_CACHED | - HWMEM_ALLOC_HINT_CACHE_AOW | - HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE; - if (!(cache_settings & (HWMEM_ALLOC_HINT_CACHE_WB | - HWMEM_ALLOC_HINT_CACHE_WT))) - cache_settings |= HWMEM_ALLOC_HINT_CACHE_WB; - /* - * On ARMv7 "alloc on write" is just a hint so we need to assume the - * worst case ie "alloc on write". We would however like to remember - * the requested "alloc on write" setting so that we can pass it on to - * the hardware, we use the reserved bit in the alloc flags to do that. - */ - if (requested_cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW) - cache_settings |= HWMEM_ALLOC_RESERVED_CHI; - else - cache_settings &= ~HWMEM_ALLOC_RESERVED_CHI; - - return cache_settings; -} - -void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings, - pgprot_t *pgprot) -{ - if (cache_settings & HWMEM_ALLOC_HINT_CACHED) { - if (cache_settings & HWMEM_ALLOC_HINT_CACHE_WT) - *pgprot = __pgprot_modify(*pgprot, L_PTE_MT_MASK, - L_PTE_MT_WRITETHROUGH); - else { - if (cache_settings & HWMEM_ALLOC_RESERVED_CHI) - *pgprot = __pgprot_modify(*pgprot, - L_PTE_MT_MASK, L_PTE_MT_WRITEALLOC); - else - *pgprot = __pgprot_modify(*pgprot, - L_PTE_MT_MASK, L_PTE_MT_WRITEBACK); - } - } else { - *pgprot = pgprot_writecombine(*pgprot); - } -} - void drain_cpu_write_buf(void) { dsb(); diff --git a/arch/arm/mach-ux500/hwmem-int.c b/arch/arm/mach-ux500/hwmem-int.c new file mode 100644 index 00000000000..c23049df4a6 --- /dev/null +++ b/arch/arm/mach-ux500/hwmem-int.c @@ -0,0 +1,165 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Hardware memory driver integration + * + * Author: Johan Mossberg for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include +#include +#include +#include +#include + +/* CONA API */ +void *cona_create(const char *name, phys_addr_t region_paddr, + size_t region_size); +void *cona_alloc(void *instance, size_t size); +void cona_free(void *instance, void *alloc); +phys_addr_t cona_get_alloc_paddr(void *alloc); +void *cona_get_alloc_kaddr(void *instance, void *alloc); +size_t cona_get_alloc_size(void *alloc); + +struct hwmem_mem_type_struct *hwmem_mem_types; +unsigned int hwmem_num_mem_types; + +static phys_addr_t hwmem_paddr; +static size_t hwmem_size; + +static int __init parse_hwmem_param(char *p) +{ + hwmem_size = memparse(p, &p); + + if (*p != '@') + goto no_at; + + hwmem_paddr = memparse(p + 1, &p); + + return 0; + +no_at: + hwmem_size = 0; + + return -EINVAL; +} +early_param("hwmem", parse_hwmem_param); + +static int __init setup_hwmem(void) +{ + static const unsigned int NUM_MEM_TYPES = 2; + + int ret; + + if (hwmem_paddr != PAGE_ALIGN(hwmem_paddr) || + hwmem_size != PAGE_ALIGN(hwmem_size) || hwmem_size == 0) { + printk(KERN_WARNING "HWMEM: hwmem_paddr !=" + " PAGE_ALIGN(hwmem_paddr) || hwmem_size !=" + " PAGE_ALIGN(hwmem_size) || hwmem_size == 0\n"); + return -ENOMSG; + } + + hwmem_mem_types = kzalloc(sizeof(struct hwmem_mem_type_struct) * + NUM_MEM_TYPES, GFP_KERNEL); + if (hwmem_mem_types == NULL) + return -ENOMEM; + + hwmem_mem_types[0].id = HWMEM_MEM_SCATTERED_SYS; + hwmem_mem_types[0].allocator_api.alloc = cona_alloc; + hwmem_mem_types[0].allocator_api.free = cona_free; + hwmem_mem_types[0].allocator_api.get_alloc_paddr = + cona_get_alloc_paddr; + hwmem_mem_types[0].allocator_api.get_alloc_kaddr = + cona_get_alloc_kaddr; + hwmem_mem_types[0].allocator_api.get_alloc_size = cona_get_alloc_size; + hwmem_mem_types[0].allocator_instance = cona_create("hwmem", + hwmem_paddr, hwmem_size); + if (IS_ERR(hwmem_mem_types[0].allocator_instance)) { + ret = PTR_ERR(hwmem_mem_types[0].allocator_instance); + goto hwmem_ima_init_failed; + } + + hwmem_mem_types[1] = hwmem_mem_types[0]; + hwmem_mem_types[1].id = HWMEM_MEM_CONTIGUOUS_SYS; + + hwmem_num_mem_types = NUM_MEM_TYPES; + + return 0; + +hwmem_ima_init_failed: + kfree(hwmem_mem_types); + + return ret; +} +arch_initcall_sync(setup_hwmem); + +enum hwmem_alloc_flags cachi_get_cache_settings( + enum hwmem_alloc_flags requested_cache_settings) +{ + static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED | + HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT | + HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW | + HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE | + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY; + + enum hwmem_alloc_flags cache_settings; + + if (!(requested_cache_settings & CACHE_ON_FLAGS_MASK) && + requested_cache_settings & (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE | + HWMEM_ALLOC_HINT_UNCACHED | HWMEM_ALLOC_HINT_WRITE_COMBINE)) + /* + * We never use uncached as it's extremely slow and there is + * no scenario where it would be better than buffered memory. + */ + return HWMEM_ALLOC_HINT_WRITE_COMBINE; + + /* + * The user has specified cached or nothing at all, both are treated as + * cached. + */ + cache_settings = (requested_cache_settings & + ~(HWMEM_ALLOC_HINT_UNCACHED | + HWMEM_ALLOC_HINT_NO_WRITE_COMBINE | + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY | + HWMEM_ALLOC_HINT_CACHE_NAOW)) | + HWMEM_ALLOC_HINT_WRITE_COMBINE | HWMEM_ALLOC_HINT_CACHED | + HWMEM_ALLOC_HINT_CACHE_AOW | + HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE; + if (!(cache_settings & (HWMEM_ALLOC_HINT_CACHE_WB | + HWMEM_ALLOC_HINT_CACHE_WT))) + cache_settings |= HWMEM_ALLOC_HINT_CACHE_WB; + /* + * On ARMv7 "alloc on write" is just a hint so we need to assume the + * worst case ie "alloc on write". We would however like to remember + * the requested "alloc on write" setting so that we can pass it on to + * the hardware, we use the reserved bit in the alloc flags to do that. + */ + if (requested_cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW) + cache_settings |= HWMEM_ALLOC_RESERVED_CHI; + else + cache_settings &= ~HWMEM_ALLOC_RESERVED_CHI; + + return cache_settings; +} + +void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings, + pgprot_t *pgprot) +{ + if (cache_settings & HWMEM_ALLOC_HINT_CACHED) { + if (cache_settings & HWMEM_ALLOC_HINT_CACHE_WT) + *pgprot = __pgprot_modify(*pgprot, L_PTE_MT_MASK, + L_PTE_MT_WRITETHROUGH); + else { + if (cache_settings & HWMEM_ALLOC_RESERVED_CHI) + *pgprot = __pgprot_modify(*pgprot, + L_PTE_MT_MASK, L_PTE_MT_WRITEALLOC); + else + *pgprot = __pgprot_modify(*pgprot, + L_PTE_MT_MASK, L_PTE_MT_WRITEBACK); + } + } else { + *pgprot = pgprot_writecombine(*pgprot); + } +} diff --git a/drivers/misc/hwmem/Makefile b/drivers/misc/hwmem/Makefile index 18da2ad7817..c307616a181 100644 --- a/drivers/misc/hwmem/Makefile +++ b/drivers/misc/hwmem/Makefile @@ -1,3 +1,3 @@ -hwmem-objs := hwmem-main.o hwmem-ioctl.o cache_handler.o +hwmem-objs := hwmem-main.o hwmem-ioctl.o cache_handler.o contig_alloc.o obj-$(CONFIG_HWMEM) += hwmem.o diff --git a/drivers/misc/hwmem/contig_alloc.c b/drivers/misc/hwmem/contig_alloc.c new file mode 100644 index 00000000000..bc71ca08f0f --- /dev/null +++ b/drivers/misc/hwmem/contig_alloc.c @@ -0,0 +1,468 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Contiguous memory allocator + * + * Author: Marcus Lorentzon , + * Johan Mossberg for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_INSTANCE_NAME_LENGTH 31 + +struct alloc { + struct list_head list; + + bool in_use; + phys_addr_t paddr; + size_t size; +}; + +struct instance { + struct list_head list; + + char name[MAX_INSTANCE_NAME_LENGTH + 1]; + + phys_addr_t region_paddr; + void *region_kaddr; + size_t region_size; + + struct list_head alloc_list; + +#ifdef CONFIG_DEBUG_FS + struct inode *debugfs_inode; +#endif /* #ifdef CONFIG_DEBUG_FS */ +}; + +static LIST_HEAD(instance_list); + +static DEFINE_MUTEX(lock); + +void *cona_create(const char *name, phys_addr_t region_paddr, + size_t region_size); +void *cona_alloc(void *instance, size_t size); +void cona_free(void *instance, void *alloc); +phys_addr_t cona_get_alloc_paddr(void *alloc); +void *cona_get_alloc_kaddr(void *instance, void *alloc); +size_t cona_get_alloc_size(void *alloc); + +static int init_alloc_list(struct instance *instance); +static void clean_alloc_list(struct instance *instance); +static struct alloc *find_free_alloc_bestfit(struct instance *instance, + size_t size); +static struct alloc *split_allocation(struct alloc *alloc, + size_t new_alloc_size); +static phys_addr_t get_alloc_offset(struct instance *instance, + struct alloc *alloc); + +void *cona_create(const char *name, phys_addr_t region_paddr, + size_t region_size) +{ + int ret; + struct instance *instance; + struct vm_struct *vm_area; + + if (region_size == 0) + return ERR_PTR(-EINVAL); + + instance = kzalloc(sizeof(*instance), GFP_KERNEL); + if (instance == NULL) + return ERR_PTR(-ENOMEM); + + memcpy(instance->name, name, MAX_INSTANCE_NAME_LENGTH + 1); + /* Truncate name if necessary */ + instance->name[MAX_INSTANCE_NAME_LENGTH] = '\0'; + instance->region_paddr = region_paddr; + instance->region_size = region_size; + + vm_area = get_vm_area(region_size, VM_IOREMAP); + if (vm_area == NULL) { + printk(KERN_WARNING "CONA: Failed to allocate %u bytes" + " kernel virtual memory", region_size); + ret = -ENOMSG; + goto vmem_alloc_failed; + } + instance->region_kaddr = vm_area->addr; + + INIT_LIST_HEAD(&instance->alloc_list); + ret = init_alloc_list(instance); + if (ret < 0) + goto init_alloc_list_failed; + + mutex_lock(&lock); + list_add_tail(&instance->list, &instance_list); + mutex_unlock(&lock); + + return instance; + +init_alloc_list_failed: + vm_area = remove_vm_area(instance->region_kaddr); + if (vm_area == NULL) + printk(KERN_ERR "CONA: Failed to free kernel virtual memory," + " resource leak!\n"); + + kfree(vm_area); +vmem_alloc_failed: + kfree(instance); + + return ERR_PTR(ret); +} + +void *cona_alloc(void *instance, size_t size) +{ + struct instance *instance_l = (struct instance *)instance; + struct alloc *alloc; + + if (size == 0) + return ERR_PTR(-EINVAL); + + mutex_lock(&lock); + + alloc = find_free_alloc_bestfit(instance_l, size); + if (IS_ERR(alloc)) + goto out; + if (size < alloc->size) { + alloc = split_allocation(alloc, size); + if (IS_ERR(alloc)) + goto out; + } else { + alloc->in_use = true; + } + +out: + mutex_unlock(&lock); + + return alloc; +} + +void cona_free(void *instance, void *alloc) +{ + struct instance *instance_l = (struct instance *)instance; + struct alloc *alloc_l = (struct alloc *)alloc; + struct alloc *other; + + mutex_lock(&lock); + + alloc_l->in_use = false; + + other = list_entry(alloc_l->list.prev, struct alloc, list); + if ((alloc_l->list.prev != &instance_l->alloc_list) && + !other->in_use) { + other->size += alloc_l->size; + list_del(&alloc_l->list); + kfree(alloc_l); + alloc_l = other; + } + other = list_entry(alloc_l->list.next, struct alloc, list); + if ((alloc_l->list.next != &instance_l->alloc_list) && + !other->in_use) { + alloc_l->size += other->size; + list_del(&other->list); + kfree(other); + } + + mutex_unlock(&lock); +} + +phys_addr_t cona_get_alloc_paddr(void *alloc) +{ + return ((struct alloc *)alloc)->paddr; +} + +void *cona_get_alloc_kaddr(void *instance, void *alloc) +{ + struct instance *instance_l = (struct instance *)instance; + + return instance_l->region_kaddr + get_alloc_offset(instance_l, + (struct alloc *)alloc); +} + +size_t cona_get_alloc_size(void *alloc) +{ + return ((struct alloc *)alloc)->size; +} + +static int init_alloc_list(struct instance *instance) +{ + /* + * Hack to not get any allocs that cross a 64MiB boundary as B2R2 can't + * handle that. + */ + int ret; + u32 curr_pos = instance->region_paddr; + u32 region_end = instance->region_paddr + instance->region_size; + u32 next_64mib_boundary = (curr_pos + SZ_64M) & ~(SZ_64M - 1); + struct alloc *alloc; + + if (PAGE_SIZE >= SZ_64M) { + printk(KERN_WARNING "CONA: PAGE_SIZE >= 64MiB\n"); + return -ENOMSG; + } + + while (next_64mib_boundary < region_end) { + if (next_64mib_boundary - curr_pos > PAGE_SIZE) { + alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = next_64mib_boundary - curr_pos - + PAGE_SIZE; + alloc->in_use = false; + list_add_tail(&alloc->list, &instance->alloc_list); + curr_pos = alloc->paddr + alloc->size; + } + + alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = PAGE_SIZE; + alloc->in_use = true; + list_add_tail(&alloc->list, &instance->alloc_list); + curr_pos = alloc->paddr + alloc->size; + + next_64mib_boundary += SZ_64M; + } + + alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = region_end - curr_pos; + alloc->in_use = false; + list_add_tail(&alloc->list, &instance->alloc_list); + + return 0; + +error: + clean_alloc_list(instance); + + return ret; +} + +static void clean_alloc_list(struct instance *instance) +{ + while (list_empty(&instance->alloc_list) == 0) { + struct alloc *i = list_first_entry(&instance->alloc_list, + struct alloc, list); + + list_del(&i->list); + + kfree(i); + } +} + +static struct alloc *find_free_alloc_bestfit(struct instance *instance, + size_t size) +{ + size_t best_diff = ~(size_t)0; + struct alloc *alloc = NULL, *i; + + list_for_each_entry(i, &instance->alloc_list, list) { + size_t diff = i->size - size; + if (i->in_use || i->size < size) + continue; + if (diff < best_diff) { + alloc = i; + best_diff = diff; + } + } + + return alloc != NULL ? alloc : ERR_PTR(-ENOMEM); +} + +static struct alloc *split_allocation(struct alloc *alloc, + size_t new_alloc_size) +{ + struct alloc *new_alloc; + + new_alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (new_alloc == NULL) + return ERR_PTR(-ENOMEM); + + new_alloc->in_use = true; + new_alloc->paddr = alloc->paddr; + new_alloc->size = new_alloc_size; + alloc->size -= new_alloc_size; + alloc->paddr += new_alloc_size; + + list_add_tail(&new_alloc->list, &alloc->list); + + return new_alloc; +} + +static phys_addr_t get_alloc_offset(struct instance *instance, + struct alloc *alloc) +{ + return alloc->paddr - instance->region_paddr; +} + +/* Debug */ + +#ifdef CONFIG_DEBUG_FS + +static int print_alloc(struct alloc *alloc, char **buf, size_t buf_size); +static struct instance *get_instance_from_file(struct file *file); +static int debugfs_allocs_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos); + +static const struct file_operations debugfs_allocs_fops = { + .owner = THIS_MODULE, + .read = debugfs_allocs_read, +}; + +static int print_alloc(struct alloc *alloc, char **buf, size_t buf_size) +{ + int ret; + int i; + + for (i = 0; i < 2; i++) { + size_t buf_size_l; + if (i == 0) + buf_size_l = 0; + else + buf_size_l = buf_size; + + ret = snprintf(*buf, buf_size_l, "paddr: %10x\tsize: %10u\t" + "in use: %1u\n", alloc->paddr, alloc->size, + alloc->in_use); + if (ret < 0) + return -ENOMSG; + else if (ret + 1 > buf_size) + return -EINVAL; + } + + *buf += ret; + + return 0; +} + +static struct instance *get_instance_from_file(struct file *file) +{ + struct instance *curr_instance; + + list_for_each_entry(curr_instance, &instance_list, list) { + if (file->f_dentry->d_inode == curr_instance->debugfs_inode) + return curr_instance; + } + + return ERR_PTR(-ENOENT); +} + +static int debugfs_allocs_read(struct file *file, char __user *buf, + size_t count, loff_t *f_pos) +{ + /* + * We assume the supplied buffer and PAGE_SIZE is large enough to hold + * information about at least one alloc, if not no data will be + * returned. + */ + + int ret; + struct instance *instance; + struct alloc *curr_alloc; + char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + char *local_buf_pos = local_buf; + size_t available_space = min((size_t)PAGE_SIZE, count); + /* private_data is intialized to NULL in open which I assume is 0. */ + void **curr_pos = &file->private_data; + size_t bytes_read; + + if (local_buf == NULL) + return -ENOMEM; + + mutex_lock(&lock); + + instance = get_instance_from_file(file); + if (IS_ERR(instance)) { + ret = PTR_ERR(instance); + goto out; + } + + list_for_each_entry(curr_alloc, &instance->alloc_list, list) { + phys_addr_t alloc_offset = get_alloc_offset(instance, + curr_alloc); + if (alloc_offset < (phys_addr_t)*curr_pos) + continue; + + ret = print_alloc(curr_alloc, &local_buf_pos, available_space - + (size_t)(local_buf_pos - local_buf)); + if (ret == -EINVAL) /* No more room */ + break; + else if (ret < 0) + goto out; + + /* + * There could be an overflow issue here in the unlikely case + * where the region is placed at the end of the address range + * and the last alloc is 1 byte large. Since this is debug code + * and that case most likely never will happen I've chosen to + * defer fixing it till it happens. + */ + *curr_pos = (void *)(alloc_offset + 1); + } + + bytes_read = (size_t)(local_buf_pos - local_buf); + + ret = copy_to_user(buf, local_buf, bytes_read); + if (ret < 0) + goto out; + + ret = bytes_read; + +out: + kfree(local_buf); + + mutex_unlock(&lock); + + return ret; +} + +static int __init init_debugfs(void) +{ + struct instance *curr_instance; + struct dentry *debugfs_root_dir = debugfs_create_dir("cona", NULL); + + mutex_lock(&lock); + + list_for_each_entry(curr_instance, &instance_list, list) { + struct dentry *file_dentry; + char tmp_str[MAX_INSTANCE_NAME_LENGTH + 7 + 1]; + tmp_str[0] = '\0'; + strcat(tmp_str, curr_instance->name); + strcat(tmp_str, "_allocs"); + file_dentry = debugfs_create_file(tmp_str, 0444, + debugfs_root_dir, 0, &debugfs_allocs_fops); + if (file_dentry != NULL) + curr_instance->debugfs_inode = file_dentry->d_inode; + } + + mutex_unlock(&lock); + + return 0; +} +/* + * Must be executed after all instances have been created, hence the + * late_initcall. + */ +late_initcall(init_debugfs); + +#endif /* #ifdef CONFIG_DEBUG_FS */ diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c index 96cca3735dd..9162ff4fc91 100644 --- a/drivers/misc/hwmem/hwmem-main.c +++ b/drivers/misc/hwmem/hwmem-main.c @@ -25,11 +25,13 @@ #include #include #include -#include #include -#include +#include +#include #include "cache_handler.h" +#define S32_MAX 2147483647 + struct hwmem_alloc_threadg_info { struct list_head list; @@ -42,10 +44,14 @@ struct hwmem_alloc { struct list_head list; atomic_t ref_cnt; + enum hwmem_alloc_flags flags; - u32 paddr; + struct hwmem_mem_type_struct *mem_type; + + void *allocator_hndl; + phys_addr_t paddr; void *kaddr; - u32 size; + size_t size; s32 name; /* Access control */ @@ -54,14 +60,16 @@ struct hwmem_alloc { /* Cache handling */ struct cach_buf cach_buf; + +#ifdef CONFIG_DEBUG_FS + /* Debug */ + void *creator; + pid_t creator_tgid; +#endif /* #ifdef CONFIG_DEBUG_FS */ }; static struct platform_device *hwdev; -static u32 hwmem_paddr; -static void *hwmem_kaddr; -static u32 hwmem_size; - static LIST_HEAD(alloc_list); static DEFINE_IDR(global_idr); static DEFINE_MUTEX(lock); @@ -73,28 +81,11 @@ static struct vm_operations_struct vm_ops = { .close = vm_close, }; -#ifdef CONFIG_DEBUG_FS - -static int debugfs_allocs_read(struct file *filp, char __user *buf, - size_t count, loff_t *f_pos); -static const struct file_operations debugfs_allocs_fops = { - .owner = THIS_MODULE, - .read = debugfs_allocs_read, -}; - -#endif /* #ifdef CONFIG_DEBUG_FS */ - -static void clean_alloc_list(void); static void kunmap_alloc(struct hwmem_alloc *alloc); /* Helpers */ -static u32 get_alloc_offset(struct hwmem_alloc *alloc) -{ - return alloc->paddr - hwmem_paddr; -} - -static void destroy_hwmem_alloc_threadg_info( +static void destroy_alloc_threadg_info( struct hwmem_alloc_threadg_info *info) { if (info->threadg_pid) @@ -103,14 +94,15 @@ static void destroy_hwmem_alloc_threadg_info( kfree(info); } -static void clean_hwmem_alloc_threadg_info_list(struct hwmem_alloc *alloc) +static void clean_alloc_threadg_info_list(struct hwmem_alloc *alloc) { struct hwmem_alloc_threadg_info *info; struct hwmem_alloc_threadg_info *tmp; - list_for_each_entry_safe(info, tmp, &(alloc->threadg_info_list), list) { + list_for_each_entry_safe(info, tmp, &(alloc->threadg_info_list), + list) { list_del(&info->list); - destroy_hwmem_alloc_threadg_info(info); + destroy_alloc_threadg_info(info); } } @@ -147,213 +139,45 @@ static void clear_alloc_mem(struct hwmem_alloc *alloc) memset(alloc->kaddr, 0, alloc->size); } -static void clean_alloc(struct hwmem_alloc *alloc) +static void destroy_alloc(struct hwmem_alloc *alloc) { - if (alloc->name) { + list_del(&alloc->list); + + if (alloc->name != 0) { idr_remove(&global_idr, alloc->name); alloc->name = 0; } - alloc->flags = 0; - atomic_set(&alloc->ref_cnt, 0); - - clean_hwmem_alloc_threadg_info_list(alloc); + clean_alloc_threadg_info_list(alloc); kunmap_alloc(alloc); -} -static void destroy_alloc(struct hwmem_alloc *alloc) -{ - clean_alloc(alloc); + if (!IS_ERR_OR_NULL(alloc->allocator_hndl)) + alloc->mem_type->allocator_api.free( + alloc->mem_type->allocator_instance, + alloc->allocator_hndl); kfree(alloc); } -static void __hwmem_release(struct hwmem_alloc *alloc) -{ - struct hwmem_alloc *other; - - clean_alloc(alloc); - - other = list_entry(alloc->list.prev, struct hwmem_alloc, list); - if ((alloc->list.prev != &alloc_list) && - atomic_read(&other->ref_cnt) == 0) { - other->size += alloc->size; - list_del(&alloc->list); - destroy_alloc(alloc); - alloc = other; - } - other = list_entry(alloc->list.next, struct hwmem_alloc, list); - if ((alloc->list.next != &alloc_list) && - atomic_read(&other->ref_cnt) == 0) { - alloc->size += other->size; - list_del(&other->list); - destroy_alloc(other); - } -} - -static struct hwmem_alloc *find_free_alloc_bestfit(u32 size) -{ - u32 best_diff = ~0; - struct hwmem_alloc *alloc = NULL, *i; - - list_for_each_entry(i, &alloc_list, list) { - u32 diff = i->size - size; - if (atomic_read(&i->ref_cnt) > 0 || i->size < size) - continue; - if (diff < best_diff) { - alloc = i; - best_diff = diff; - } - } - - return alloc != NULL ? alloc : ERR_PTR(-ENOMEM); -} - -static struct hwmem_alloc *split_allocation(struct hwmem_alloc *alloc, - u32 new_alloc_size) -{ - struct hwmem_alloc *new_alloc; - - new_alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); - if (new_alloc == NULL) - return ERR_PTR(-ENOMEM); - - atomic_inc(&new_alloc->ref_cnt); - INIT_LIST_HEAD(&new_alloc->threadg_info_list); - new_alloc->paddr = alloc->paddr; - new_alloc->size = new_alloc_size; - alloc->size -= new_alloc_size; - alloc->paddr += new_alloc_size; - - list_add_tail(&new_alloc->list, &alloc->list); - - return new_alloc; -} - -static int init_alloc_list(void) -{ - /* - * Hack to not get any allocs that cross a 64MiB boundary as B2R2 can't - * handle that. - */ - int ret; - u32 curr_pos = hwmem_paddr; - u32 hwmem_end = hwmem_paddr + hwmem_size; - u32 next_64mib_boundary = (curr_pos + SZ_64M) & ~(SZ_64M - 1); - struct hwmem_alloc *alloc; - - if (PAGE_SIZE >= SZ_64M) { - dev_err(&hwdev->dev, "PAGE_SIZE >= SZ_64M\n"); - return -ENOMSG; - } - - while (next_64mib_boundary < hwmem_end) { - if (next_64mib_boundary - curr_pos > PAGE_SIZE) { - alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); - if (alloc == NULL) { - ret = -ENOMEM; - goto error; - } - alloc->paddr = curr_pos; - alloc->size = next_64mib_boundary - curr_pos - - PAGE_SIZE; - INIT_LIST_HEAD(&alloc->threadg_info_list); - list_add_tail(&alloc->list, &alloc_list); - curr_pos = alloc->paddr + alloc->size; - } - - alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); - if (alloc == NULL) { - ret = -ENOMEM; - goto error; - } - alloc->paddr = curr_pos; - alloc->size = PAGE_SIZE; - atomic_inc(&alloc->ref_cnt); - INIT_LIST_HEAD(&alloc->threadg_info_list); - list_add_tail(&alloc->list, &alloc_list); - curr_pos = alloc->paddr + alloc->size; - - next_64mib_boundary += SZ_64M; - } - - alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); - if (alloc == NULL) { - ret = -ENOMEM; - goto error; - } - alloc->paddr = curr_pos; - alloc->size = hwmem_end - curr_pos; - INIT_LIST_HEAD(&alloc->threadg_info_list); - list_add_tail(&alloc->list, &alloc_list); - - return 0; - -error: - clean_alloc_list(); - - return ret; -} - -static void clean_alloc_list(void) -{ - while (list_empty(&alloc_list) == 0) { - struct hwmem_alloc *i = list_first_entry(&alloc_list, - struct hwmem_alloc, list); - - list_del(&i->list); - - destroy_alloc(i); - } -} - -static int alloc_kaddrs(void) -{ - struct vm_struct *area = get_vm_area(hwmem_size, VM_IOREMAP); - if (area == NULL) { - dev_info(&hwdev->dev, "Failed to allocate %u bytes kernel" - " virtual memory", hwmem_size); - return -ENOMSG; - } - - hwmem_kaddr = area->addr; - - return 0; -} - -static void free_kaddrs(void) -{ - struct vm_struct *area; - - if (hwmem_kaddr == NULL) - return; - - area = remove_vm_area(hwmem_kaddr); - if (area == NULL) - dev_err(&hwdev->dev, - "Failed to free kernel virtual memory," - " resource leak!\n"); - - kfree(area); - - hwmem_kaddr = NULL; -} - static int kmap_alloc(struct hwmem_alloc *alloc) { int ret; pgprot_t pgprot; - void *alloc_kaddr = hwmem_kaddr + get_alloc_offset(alloc); + void *alloc_kaddr; + + alloc_kaddr = alloc->mem_type->allocator_api.get_alloc_kaddr( + alloc->mem_type->allocator_instance, alloc->allocator_hndl); + if (IS_ERR(alloc_kaddr)) + return PTR_ERR(alloc_kaddr); pgprot = PAGE_KERNEL; cach_set_pgprot_cache_options(&alloc->cach_buf, &pgprot); ret = ioremap_page_range((unsigned long)alloc_kaddr, - (unsigned long)alloc_kaddr + alloc->size, alloc->paddr, - pgprot); + (unsigned long)alloc_kaddr + alloc->size, alloc->paddr, pgprot); if (ret < 0) { - dev_info(&hwdev->dev, "Failed to map %#x - %#x", alloc->paddr, + dev_warn(&hwdev->dev, "Failed to map %#x - %#x", alloc->paddr, alloc->paddr + alloc->size); return ret; } @@ -369,20 +193,33 @@ static void kunmap_alloc(struct hwmem_alloc *alloc) return; unmap_kernel_range((unsigned long)alloc->kaddr, alloc->size); + alloc->kaddr = NULL; } +static struct hwmem_mem_type_struct *resolve_mem_type( + enum hwmem_mem_type mem_type) +{ + unsigned int i; + for (i = 0; i < hwmem_num_mem_types; i++) { + if (hwmem_mem_types[i].id == mem_type) + return &hwmem_mem_types[i]; + } + + return ERR_PTR(-ENOENT); +} + /* HWMEM API */ -struct hwmem_alloc *hwmem_alloc(u32 size, enum hwmem_alloc_flags flags, +struct hwmem_alloc *hwmem_alloc(size_t size, enum hwmem_alloc_flags flags, enum hwmem_access def_access, enum hwmem_mem_type mem_type) { - struct hwmem_alloc *alloc; int ret; + struct hwmem_alloc *alloc; - if (!hwdev) { - printk(KERN_ERR "hwmem: Badly configured\n"); - return ERR_PTR(-EINVAL); + if (hwdev == NULL) { + printk(KERN_ERR "HWMEM: Badly configured\n"); + return ERR_PTR(-ENOMSG); } if (size == 0) @@ -392,38 +229,56 @@ struct hwmem_alloc *hwmem_alloc(u32 size, enum hwmem_alloc_flags flags, size = PAGE_ALIGN(size); - alloc = find_free_alloc_bestfit(size); - if (IS_ERR(alloc)) { - dev_info(&hwdev->dev, "Could not find slot for %u bytes" - " allocation\n", size); - goto no_slot; - } - - if (size < alloc->size) { - alloc = split_allocation(alloc, size); - if (IS_ERR(alloc)) - goto split_alloc_failed; - } else { - atomic_inc(&alloc->ref_cnt); + alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto alloc_alloc_failed; } + INIT_LIST_HEAD(&alloc->list); + atomic_inc(&alloc->ref_cnt); alloc->flags = flags; alloc->default_access = def_access; + INIT_LIST_HEAD(&alloc->threadg_info_list); + alloc->creator = __builtin_return_address(0); + alloc->creator_tgid = task_tgid_nr(current); + + alloc->mem_type = resolve_mem_type(mem_type); + if (IS_ERR(alloc->mem_type)) { + ret = PTR_ERR(alloc->mem_type); + goto resolve_mem_type_failed; + } + + alloc->allocator_hndl = alloc->mem_type->allocator_api.alloc( + alloc->mem_type->allocator_instance, size); + if (IS_ERR(alloc->allocator_hndl)) { + ret = PTR_ERR(alloc->allocator_hndl); + goto allocator_failed; + } + + alloc->paddr = alloc->mem_type->allocator_api.get_alloc_paddr( + alloc->allocator_hndl); + alloc->size = alloc->mem_type->allocator_api.get_alloc_size( + alloc->allocator_hndl); + cach_init_buf(&alloc->cach_buf, alloc->flags, alloc->size); ret = kmap_alloc(alloc); if (ret < 0) goto kmap_alloc_failed; cach_set_buf_addrs(&alloc->cach_buf, alloc->kaddr, alloc->paddr); + list_add_tail(&alloc->list, &alloc_list); + clear_alloc_mem(alloc); goto out; kmap_alloc_failed: - __hwmem_release(alloc); +allocator_failed: +resolve_mem_type_failed: + destroy_alloc(alloc); +alloc_alloc_failed: alloc = ERR_PTR(ret); -split_alloc_failed: -no_slot: out: mutex_unlock(&lock); @@ -437,7 +292,7 @@ void hwmem_release(struct hwmem_alloc *alloc) mutex_lock(&lock); if (atomic_dec_and_test(&alloc->ref_cnt)) - __hwmem_release(alloc); + destroy_alloc(alloc); mutex_unlock(&lock); } @@ -457,7 +312,7 @@ int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access, EXPORT_SYMBOL(hwmem_set_domain); int hwmem_pin(struct hwmem_alloc *alloc, struct hwmem_mem_chunk *mem_chunks, - u32 *mem_chunks_length) + u32 *mem_chunks_length) { if (*mem_chunks_length < 1) { *mem_chunks_length = 1; @@ -615,7 +470,7 @@ void hwmem_get_info(struct hwmem_alloc *alloc, u32 *size, if (size != NULL) *size = alloc->size; if (mem_type != NULL) - *mem_type = HWMEM_MEM_CONTIGUOUS_SYS; + *mem_type = alloc->mem_type->id; if (access != NULL) *access = get_access(alloc); @@ -623,7 +478,7 @@ void hwmem_get_info(struct hwmem_alloc *alloc, u32 *size, } EXPORT_SYMBOL(hwmem_get_info); -int hwmem_get_name(struct hwmem_alloc *alloc) +s32 hwmem_get_name(struct hwmem_alloc *alloc) { int ret = 0, name; @@ -647,11 +502,18 @@ int hwmem_get_name(struct hwmem_alloc *alloc) goto get_id_failed; } + if (name > S32_MAX) { + ret = -ENOMSG; + goto overflow; + } + alloc->name = name; ret = name; goto out; +overflow: + idr_remove(&global_idr, name); get_id_failed: pre_get_id_failed: @@ -688,29 +550,62 @@ EXPORT_SYMBOL(hwmem_resolve_by_name); /* Debug */ +#ifdef CONFIG_DEBUG_FS + +static int debugfs_allocs_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos); + +static const struct file_operations debugfs_allocs_fops = { + .owner = THIS_MODULE, + .read = debugfs_allocs_read, +}; + static int print_alloc(struct hwmem_alloc *alloc, char **buf, size_t buf_size) { int ret; - - if (buf_size < 134) - return -EINVAL; - - ret = sprintf(*buf, "paddr: %#10x\tsize: %10u\tref cnt: %2i\t" - "name: %#10x\tflags: %#4x\t$ settings: %#4x\t" - "def acc: %#3x\n", alloc->paddr, alloc->size, - atomic_read(&alloc->ref_cnt), alloc->name, - alloc->flags, alloc->cach_buf.cache_settings, - alloc->default_access); - if (ret < 0) - return -ENOMSG; + char creator[KSYM_SYMBOL_LEN]; + int i; + + if (sprint_symbol(creator, (unsigned long)alloc->creator) < 0) + creator[0] = '\0'; + + for (i = 0; i < 2; i++) { + size_t buf_size_l; + if (i == 0) + buf_size_l = 0; + else + buf_size_l = buf_size; + + ret = snprintf(*buf, buf_size_l, + "%#x\n" + "\tSize: %u\n" + "\tMemory type: %u\n" + "\tName: %#x\n" + "\tReference count: %i\n" + "\tAllocation flags: %#x\n" + "\t$ settings: %#x\n" + "\tDefault access: %#x\n" + "\tPhysical address: %#x\n" + "\tKernel virtual address: %#x\n" + "\tCreator: %s\n" + "\tCreator thread group id: %u\n", + (unsigned int)alloc, alloc->size, alloc->mem_type->id, + alloc->name, atomic_read(&alloc->ref_cnt), + alloc->flags, alloc->cach_buf.cache_settings, + alloc->default_access, alloc->paddr, + (unsigned int)alloc->kaddr, creator, + alloc->creator_tgid); + if (ret < 0) + return -ENOMSG; + else if (ret + 1 > buf_size) + return -EINVAL; + } *buf += ret; return 0; } -#ifdef CONFIG_DEBUG_FS - static int debugfs_allocs_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos) { @@ -721,12 +616,13 @@ static int debugfs_allocs_read(struct file *file, char __user *buf, */ int ret; + size_t i = 0; struct hwmem_alloc *curr_alloc; char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); char *local_buf_pos = local_buf; size_t available_space = min((size_t)PAGE_SIZE, count); /* private_data is intialized to NULL in open which I assume is 0. */ - u32 *curr_pos = (u32 *)&file->private_data; + void **curr_pos = &file->private_data; size_t bytes_read; if (local_buf == NULL) @@ -735,9 +631,7 @@ static int debugfs_allocs_read(struct file *file, char __user *buf, mutex_lock(&lock); list_for_each_entry(curr_alloc, &alloc_list, list) { - u32 alloc_offset = get_alloc_offset(curr_alloc); - - if (alloc_offset < *curr_pos) + if (i++ < (size_t)*curr_pos) continue; ret = print_alloc(curr_alloc, &local_buf_pos, available_space - @@ -747,7 +641,7 @@ static int debugfs_allocs_read(struct file *file, char __user *buf, else if (ret < 0) goto out; - *curr_pos = alloc_offset + 1; + *curr_pos = (void *)i; } bytes_read = (size_t)(local_buf_pos - local_buf); @@ -779,39 +673,17 @@ static void init_debugfs(void) /* Module */ extern int hwmem_ioctl_init(void); -extern void hwmem_ioctl_exit(void); static int __devinit hwmem_probe(struct platform_device *pdev) { - int ret = 0; - struct hwmem_platform_data *platform_data = pdev->dev.platform_data; - - if (sizeof(int) != 4 || sizeof(phys_addr_t) < 4 || - sizeof(void *) < 4 || sizeof(size_t) != 4) { - dev_err(&pdev->dev, "sizeof(int) != 4 || sizeof(phys_addr_t)" - " < 4 || sizeof(void *) < 4 || sizeof(size_t) !=" - " 4\n"); - return -ENOMSG; - } + int ret; - if (hwdev || platform_data->size == 0 || - platform_data->start != PAGE_ALIGN(platform_data->start) || - platform_data->size != PAGE_ALIGN(platform_data->size)) { - dev_err(&pdev->dev, "hwdev || platform_data->size == 0 ||" - "platform_data->start !=" - " PAGE_ALIGN(platform_data->start) ||" - "platform_data->size !=" - " PAGE_ALIGN(platform_data->size)\n"); + if (hwdev) { + dev_err(&pdev->dev, "Probed multiple times\n"); return -EINVAL; } hwdev = pdev; - hwmem_paddr = platform_data->start; - hwmem_size = platform_data->size; - - ret = alloc_kaddrs(); - if (ret < 0) - goto alloc_kaddrs_failed; /* * No need to flush the caches here. If we can keep track of the cache @@ -820,32 +692,18 @@ static int __devinit hwmem_probe(struct platform_device *pdev) * in the caches. */ - ret = init_alloc_list(); - if (ret < 0) - goto init_alloc_list_failed; - ret = hwmem_ioctl_init(); - if (ret) - goto ioctl_init_failed; + if (ret < 0) + dev_warn(&pdev->dev, "Failed to start hwmem-ioctl, continuing" + " anyway\n"); #ifdef CONFIG_DEBUG_FS init_debugfs(); #endif - dev_info(&pdev->dev, "Hwmem probed, device contains %#x bytes\n", - hwmem_size); - - goto out; - -ioctl_init_failed: - clean_alloc_list(); -init_alloc_list_failed: - free_kaddrs(); -alloc_kaddrs_failed: - hwdev = NULL; + dev_info(&pdev->dev, "Probed OK\n"); -out: - return ret; + return 0; } static struct platform_driver hwmem_driver = { diff --git a/include/linux/hwmem.h b/include/linux/hwmem.h index a2eb91d9d9a..ba4c116f4b9 100644 --- a/include/linux/hwmem.h +++ b/include/linux/hwmem.h @@ -569,15 +569,25 @@ s32 hwmem_get_name(struct hwmem_alloc *alloc); */ struct hwmem_alloc *hwmem_resolve_by_name(s32 name); -/* Internal */ +/* Integration */ + +struct hwmem_allocator_api { + void *(*alloc)(void *instance, size_t size); + void (*free)(void *instance, void *alloc); + phys_addr_t (*get_alloc_paddr)(void *alloc); + void *(*get_alloc_kaddr)(void *instance, void *alloc); + size_t (*get_alloc_size)(void *alloc); +}; -struct hwmem_platform_data { - /* Physical address of memory region */ - u32 start; - /* Size of memory region */ - u32 size; +struct hwmem_mem_type_struct { + enum hwmem_mem_type id; + struct hwmem_allocator_api allocator_api; + void *allocator_instance; }; +extern struct hwmem_mem_type_struct *hwmem_mem_types; +extern unsigned int hwmem_num_mem_types; + #endif #endif /* _HWMEM_H_ */ -- cgit v1.2.3 From 7d9f5c80a002abbb99320d0e98a5f334a60758db Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Tue, 5 Jul 2011 14:35:32 +0200 Subject: Lee's chunk to make the kernel compile Signed-off-by: Lee Jones --- drivers/misc/hwmem/hwmem-main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c index 9162ff4fc91..b91d99bc2be 100644 --- a/drivers/misc/hwmem/hwmem-main.c +++ b/drivers/misc/hwmem/hwmem-main.c @@ -240,9 +240,10 @@ struct hwmem_alloc *hwmem_alloc(size_t size, enum hwmem_alloc_flags flags, alloc->flags = flags; alloc->default_access = def_access; INIT_LIST_HEAD(&alloc->threadg_info_list); +#ifdef CONFIG_DEBUG_FS alloc->creator = __builtin_return_address(0); alloc->creator_tgid = task_tgid_nr(current); - +#endif alloc->mem_type = resolve_mem_type(mem_type); if (IS_ERR(alloc->mem_type)) { ret = PTR_ERR(alloc->mem_type); -- cgit v1.2.3 From ad82fbae002df172d91aaef2349d9569c8cd15f8 Mon Sep 17 00:00:00 2001 From: Robert Fekete Date: Thu, 22 Sep 2011 15:10:28 +0200 Subject: hwmem: Add peak memory usage in debugfs prints When doing cat debugfs/cona/hwmem_allocs you will now see current memory usage, Max amount of hwmem memory used after boot, and size of biggest free block. ST-Ericsson ID: 359308 ST-Ericsson Linux next: NA ST-Ericsson FOSS-OUT ID: Trivial Change-Id: I287b711c8e950cc3370a2cc47672f8aefdd873e4 Signed-off-by: Robert Fekete Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/31835 Reviewed-by: Maciej SOCHA Reviewed-by: Jimmy RUBIN Reviewed-by: Anders BAUER Reviewed-by: Linus WALLEIJ --- drivers/misc/hwmem/contig_alloc.c | 125 ++++++++++++++++++++++++++++++++++---- 1 file changed, 114 insertions(+), 11 deletions(-) diff --git a/drivers/misc/hwmem/contig_alloc.c b/drivers/misc/hwmem/contig_alloc.c index bc71ca08f0f..31533ed5988 100644 --- a/drivers/misc/hwmem/contig_alloc.c +++ b/drivers/misc/hwmem/contig_alloc.c @@ -43,6 +43,12 @@ struct instance { #ifdef CONFIG_DEBUG_FS struct inode *debugfs_inode; + int cona_status_free; + int cona_status_used; + int cona_status_max_cont; + int cona_status_max_check; + int cona_status_biggest_free; + int cona_status_printed; #endif /* #ifdef CONFIG_DEBUG_FS */ }; @@ -140,6 +146,12 @@ void *cona_alloc(void *instance, size_t size) } else { alloc->in_use = true; } +#ifdef CONFIG_DEBUG_FS + instance_l->cona_status_max_cont += alloc->size; + instance_l->cona_status_max_check = + max(instance_l->cona_status_max_check, + instance_l->cona_status_max_cont); +#endif /* #ifdef CONFIG_DEBUG_FS */ out: mutex_unlock(&lock); @@ -157,6 +169,10 @@ void cona_free(void *instance, void *alloc) alloc_l->in_use = false; +#ifdef CONFIG_DEBUG_FS + instance_l->cona_status_max_cont -= alloc_l->size; +#endif /* #ifdef CONFIG_DEBUG_FS */ + other = list_entry(alloc_l->list.prev, struct alloc, list); if ((alloc_l->list.prev != &instance_l->alloc_list) && !other->in_use) { @@ -237,6 +253,10 @@ static int init_alloc_list(struct instance *instance) list_add_tail(&alloc->list, &instance->alloc_list); curr_pos = alloc->paddr + alloc->size; +#ifdef CONFIG_DEBUG_FS + instance->cona_status_max_cont += alloc->size; +#endif /* #ifdef CONFIG_DEBUG_FS */ + next_64mib_boundary += SZ_64M; } @@ -319,7 +339,10 @@ static phys_addr_t get_alloc_offset(struct instance *instance, #ifdef CONFIG_DEBUG_FS -static int print_alloc(struct alloc *alloc, char **buf, size_t buf_size); +static int print_alloc(struct instance *instance, struct alloc *alloc, + char **buf, size_t buf_size); +static int print_alloc_status(struct instance *instance, char **buf, + size_t buf_size); static struct instance *get_instance_from_file(struct file *file); static int debugfs_allocs_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos); @@ -329,7 +352,8 @@ static const struct file_operations debugfs_allocs_fops = { .read = debugfs_allocs_read, }; -static int print_alloc(struct alloc *alloc, char **buf, size_t buf_size) +static int print_alloc(struct instance *instance, struct alloc *alloc, + char **buf, size_t buf_size) { int ret; int i; @@ -341,9 +365,64 @@ static int print_alloc(struct alloc *alloc, char **buf, size_t buf_size) else buf_size_l = buf_size; + if (i == 1) { + if (alloc->in_use) + instance->cona_status_used += alloc->size; + else + instance->cona_status_free += alloc->size; + } + + if (!alloc->in_use) { + instance->cona_status_biggest_free = + max((size_t)alloc->size, + (size_t)instance->cona_status_biggest_free); + } + ret = snprintf(*buf, buf_size_l, "paddr: %10x\tsize: %10u\t" - "in use: %1u\n", alloc->paddr, alloc->size, - alloc->in_use); + "in use: %1u\t used: %10u (%dMB)" + " \t free: %10u (%dMB)\n", + alloc->paddr, + alloc->size, + alloc->in_use, + instance->cona_status_used, + instance->cona_status_used/1024/1024, + instance->cona_status_free, + instance->cona_status_free/1024/1024); + + if (ret < 0) + return -ENOMSG; + else if (ret + 1 > buf_size) + return -EINVAL; + } + + *buf += ret; + + return 0; +} + +static int print_alloc_status(struct instance *instance, char **buf, + size_t buf_size) +{ + int ret; + int i; + + for (i = 0; i < 2; i++) { + size_t buf_size_l; + if (i == 0) + buf_size_l = 0; + else + buf_size_l = buf_size; + + ret = snprintf(*buf, buf_size_l, "Overall peak usage:\t%10u " + "(%dMB)\nCurrent max usage:\t%10u (%dMB)\n" + "Current biggest free:\t%10d (%dMB)\n", + instance->cona_status_max_check, + instance->cona_status_max_check/1024/1024, + instance->cona_status_max_cont, + instance->cona_status_max_cont/1024/1024, + instance->cona_status_biggest_free, + instance->cona_status_biggest_free/1024/1024); + if (ret < 0) return -ENOMSG; else if (ret + 1 > buf_size) @@ -385,12 +464,12 @@ static int debugfs_allocs_read(struct file *file, char __user *buf, /* private_data is intialized to NULL in open which I assume is 0. */ void **curr_pos = &file->private_data; size_t bytes_read; + bool readout_aborted = false; if (local_buf == NULL) return -ENOMEM; mutex_lock(&lock); - instance = get_instance_from_file(file); if (IS_ERR(instance)) { ret = PTR_ERR(instance); @@ -403,13 +482,16 @@ static int debugfs_allocs_read(struct file *file, char __user *buf, if (alloc_offset < (phys_addr_t)*curr_pos) continue; - ret = print_alloc(curr_alloc, &local_buf_pos, available_space - - (size_t)(local_buf_pos - local_buf)); - if (ret == -EINVAL) /* No more room */ + ret = print_alloc(instance, curr_alloc, &local_buf_pos, + available_space - (size_t)(local_buf_pos - + local_buf)); + + if (ret == -EINVAL) { /* No more room */ + readout_aborted = true; break; - else if (ret < 0) + } else if (ret < 0) { goto out; - + } /* * There could be an overflow issue here in the unlikely case * where the region is placed at the end of the address range @@ -418,6 +500,28 @@ static int debugfs_allocs_read(struct file *file, char __user *buf, * defer fixing it till it happens. */ *curr_pos = (void *)(alloc_offset + 1); + + /* Make sure to also print status if there were any prints */ + instance->cona_status_printed = false; + } + + if (!readout_aborted && !instance->cona_status_printed) { + ret = print_alloc_status(instance, &local_buf_pos, + available_space - + (size_t)(local_buf_pos - local_buf)); + + if (ret == -EINVAL) /* No more room */ + readout_aborted = true; + else if (ret < 0) + goto out; + else + instance->cona_status_printed = true; + } + + if (!readout_aborted) { + instance->cona_status_free = 0; + instance->cona_status_used = 0; + instance->cona_status_biggest_free = 0; } bytes_read = (size_t)(local_buf_pos - local_buf); @@ -430,7 +534,6 @@ static int debugfs_allocs_read(struct file *file, char __user *buf, out: kfree(local_buf); - mutex_unlock(&lock); return ret; -- cgit v1.2.3 From 9a5a2fe75bbe1d295abbb75026caed22efa671a1 Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Thu, 20 Oct 2011 08:37:23 +0100 Subject: misc: Compile hwmem misc driver Signed-off-by: Philippe Langlais --- drivers/misc/Kconfig | 8 ++++++++ drivers/misc/Makefile | 1 + 2 files changed, 9 insertions(+) diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index c7795096d43..f397b1f5259 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -481,6 +481,14 @@ config PCH_PHUB To compile this driver as a module, choose M here: the module will be called pch_phub. +config HWMEM + bool "Hardware memory driver" + default n + help + This driver provides a way to allocate contiguous system memory which + can be used by hardware. It also enables accessing hwmem allocated + memory buffers through a secure id which can be shared across processes. + config USB_SWITCH_FSA9480 tristate "FSA9480 USB Switch" depends on I2C diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 3e1d80106f0..ef1c665aeed 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -46,6 +46,7 @@ obj-y += ti-st/ obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o obj-y += lis3lv02d/ obj-y += carma/ +obj-$(CONFIG_HWMEM) += hwmem/ obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/ obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o -- cgit v1.2.3 From 7fd802f3b44918316aaeeaf19e1201587a37d44d Mon Sep 17 00:00:00 2001 From: Martin Sjoblom Date: Wed, 22 Feb 2012 13:51:46 +0100 Subject: ux500: hwmem: Add protected hwmem support ST-Ericsson Linux next: NA ST-Ericsson ID: 334907 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: Ia3b5293c55a052cbe35b0d281925426e5d3e37ee Signed-off-by: Martin Sjoblom Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/50227 Reviewed-by: QABUILD Reviewed-by: QATEST Reviewed-by: Jonas ABERG Reviewed-by: Per-Daniel OLSSON Reviewed-by: Robert FEKETE --- arch/arm/mach-ux500/hwmem-int.c | 36 +++++++++++++++++++++++++++++++++++- include/linux/hwmem.h | 4 ++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-ux500/hwmem-int.c b/arch/arm/mach-ux500/hwmem-int.c index c23049df4a6..e3fecb8c354 100644 --- a/arch/arm/mach-ux500/hwmem-int.c +++ b/arch/arm/mach-ux500/hwmem-int.c @@ -29,6 +29,28 @@ unsigned int hwmem_num_mem_types; static phys_addr_t hwmem_paddr; static size_t hwmem_size; +static phys_addr_t hwmem_prot_paddr; +static size_t hwmem_prot_size; + +static int __init parse_hwmem_prot_param(char *p) +{ + + hwmem_prot_size = memparse(p, &p); + + if (*p != '@') + goto no_at; + + hwmem_prot_paddr = memparse(p + 1, &p); + + return 0; + +no_at: + hwmem_prot_size = 0; + + return -EINVAL; +} +early_param("hwmem_prot", parse_hwmem_prot_param); + static int __init parse_hwmem_param(char *p) { hwmem_size = memparse(p, &p); @@ -49,7 +71,7 @@ early_param("hwmem", parse_hwmem_param); static int __init setup_hwmem(void) { - static const unsigned int NUM_MEM_TYPES = 2; + static const unsigned int NUM_MEM_TYPES = 3; int ret; @@ -84,6 +106,18 @@ static int __init setup_hwmem(void) hwmem_mem_types[1] = hwmem_mem_types[0]; hwmem_mem_types[1].id = HWMEM_MEM_CONTIGUOUS_SYS; + hwmem_mem_types[2] = hwmem_mem_types[1]; + hwmem_mem_types[2].id = HWMEM_MEM_PROTECTED_SYS; + + if (hwmem_prot_size > 0) { + hwmem_mem_types[2].allocator_instance = cona_create("hwmem_prot", + hwmem_prot_paddr, hwmem_prot_size); + if (IS_ERR(hwmem_mem_types[2].allocator_instance)) { + ret = PTR_ERR(hwmem_mem_types[2].allocator_instance); + goto hwmem_ima_init_failed; + } + } + hwmem_num_mem_types = NUM_MEM_TYPES; return 0; diff --git a/include/linux/hwmem.h b/include/linux/hwmem.h index ba4c116f4b9..c84eaac79f1 100644 --- a/include/linux/hwmem.h +++ b/include/linux/hwmem.h @@ -102,6 +102,10 @@ enum hwmem_mem_type { * @brief Contiguous system memory. */ HWMEM_MEM_CONTIGUOUS_SYS, + /** + * @brief Protected system memory. + */ + HWMEM_MEM_PROTECTED_SYS, }; /* User space API */ -- cgit v1.2.3 From aa14c10bbcd4449a6dbdc671a334bef1b8431d21 Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Mon, 12 Mar 2012 09:36:13 +0100 Subject: hwmem: Fix kernel headers check problem Signed-off-by: Philippe Langlais --- include/linux/hwmem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/hwmem.h b/include/linux/hwmem.h index c84eaac79f1..cb9e0dc9aaa 100644 --- a/include/linux/hwmem.h +++ b/include/linux/hwmem.h @@ -14,7 +14,7 @@ #include -#if !defined(__KERNEL__) && !defined(_KERNEL) +#if !defined(__KERNEL__) #include #else #include -- cgit v1.2.3