summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPhilippe Langlais <philippe.langlais@stericsson.com>2011-10-11 15:55:36 +0200
committerPhilippe Langlais <philippe.langlais@stericsson.com>2012-05-22 11:03:42 +0200
commitf0967f15f338138aedf2a820d628441fb05c2539 (patch)
tree73a9b14f48a5e6dd7aed50d2871d0e9f731d7732
parent85b9e2431e1ef30333952a8c8e4ebf74bfc4e271 (diff)
HWMEM: Update API
Perform queued hwmem API changes. One commit to ease dependency handling. Depends-On: I13f249cf5f51f9f138171e8d6f59e1d5d2f72de1, I31030bcfda7cf76d15402c2137576da4f3fb2761, I2dc7e6aa5686492550b5164e50c06ed750ac9e16, Ia12bbb9f378c331cfb9b1376dedb3b7b65f56429, Ibc3404df4876971d8b69272c63120e2fe3bb2787 ST-Ericsson ID: AP 327001 ST-Ericsson FOSS-OUT ID: STETL-FOSS-OUT-10068 Change-Id: I9a45ad54a0cc8a5cdb1e3b9038ad50aeacb3f9c3 Signed-off-by: Johan Mossberg <johan.xx.mossberg@stericsson.com> Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/17518 Reviewed-by: Robert FEKETE <robert.fekete@stericsson.com> Conflicts: drivers/misc/dispdev/dispdev.c Conflicts: drivers/video/b2r2/b2r2_blt_main.c drivers/video/mcde/mcde_fb.c
-rw-r--r--arch/arm/mach-ux500/dcache.c98
-rw-r--r--drivers/misc/hwmem/cache_handler.c109
-rw-r--r--drivers/misc/hwmem/hwmem-ioctl.c155
-rw-r--r--drivers/misc/hwmem/hwmem-main.c42
-rw-r--r--include/linux/hwmem.h346
5 files changed, 460 insertions, 290 deletions
diff --git a/arch/arm/mach-ux500/dcache.c b/arch/arm/mach-ux500/dcache.c
index cb4f8329f32..b1c3942c181 100644
--- a/arch/arm/mach-ux500/dcache.c
+++ b/arch/arm/mach-ux500/dcache.c
@@ -83,8 +83,6 @@ static const u32 outer_clean_breakpoint = 68041 + (347363 - 68041) * 0.666;
/* 485414 */
static const u32 outer_flush_breakpoint = 68041 + (694727 - 68041) * 0.666;
-static bool is_wt(enum hwmem_alloc_flags cache_settings);
-
static void __clean_inner_dcache_all(void *param);
static void clean_inner_dcache_all(void);
@@ -96,15 +94,48 @@ static bool is_cache_exclusive(void);
enum hwmem_alloc_flags cachi_get_cache_settings(
enum hwmem_alloc_flags requested_cache_settings)
{
- enum hwmem_alloc_flags cache_settings =
- requested_cache_settings & ~HWMEM_ALLOC_CACHE_HINT_MASK;
-
- if ((cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED) {
- if (is_wt(requested_cache_settings))
- cache_settings |= HWMEM_ALLOC_CACHE_HINT_WT;
- else
- cache_settings |= HWMEM_ALLOC_CACHE_HINT_WB;
- }
+ static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED |
+ HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT |
+ HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE |
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY;
+
+ enum hwmem_alloc_flags cache_settings;
+
+ if (!(requested_cache_settings & CACHE_ON_FLAGS_MASK) &&
+ requested_cache_settings & (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_UNCACHED | HWMEM_ALLOC_HINT_WRITE_COMBINE))
+ /*
+ * We never use uncached as it's extremely slow and there is
+ * no scenario where it would be better than buffered memory.
+ */
+ return HWMEM_ALLOC_HINT_WRITE_COMBINE;
+
+ /*
+ * The user has specified cached or nothing at all, both are treated as
+ * cached.
+ */
+ cache_settings = (requested_cache_settings &
+ ~(HWMEM_ALLOC_HINT_UNCACHED |
+ HWMEM_ALLOC_HINT_NO_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY |
+ HWMEM_ALLOC_HINT_CACHE_NAOW)) |
+ HWMEM_ALLOC_HINT_WRITE_COMBINE | HWMEM_ALLOC_HINT_CACHED |
+ HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE;
+ if (!(cache_settings & (HWMEM_ALLOC_HINT_CACHE_WB |
+ HWMEM_ALLOC_HINT_CACHE_WT)))
+ cache_settings |= HWMEM_ALLOC_HINT_CACHE_WB;
+ /*
+ * On ARMv7 "alloc on write" is just a hint so we need to assume the
+ * worst case ie "alloc on write". We would however like to remember
+ * the requested "alloc on write" setting so that we can pass it on to
+ * the hardware, we use the reserved bit in the alloc flags to do that.
+ */
+ if (requested_cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW)
+ cache_settings |= HWMEM_ALLOC_RESERVED_CHI;
+ else
+ cache_settings &= ~HWMEM_ALLOC_RESERVED_CHI;
return cache_settings;
}
@@ -112,17 +143,21 @@ enum hwmem_alloc_flags cachi_get_cache_settings(
void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings,
pgprot_t *pgprot)
{
- if ((cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED) {
- if (is_wt(cache_settings))
+ if (cache_settings & HWMEM_ALLOC_HINT_CACHED) {
+ if (cache_settings & HWMEM_ALLOC_HINT_CACHE_WT)
*pgprot = __pgprot_modify(*pgprot, L_PTE_MT_MASK,
L_PTE_MT_WRITETHROUGH);
- else
- *pgprot = __pgprot_modify(*pgprot, L_PTE_MT_MASK,
- L_PTE_MT_WRITEBACK);
- } else if (cache_settings & HWMEM_ALLOC_BUFFERED)
+ else {
+ if (cache_settings & HWMEM_ALLOC_RESERVED_CHI)
+ *pgprot = __pgprot_modify(*pgprot,
+ L_PTE_MT_MASK, L_PTE_MT_WRITEALLOC);
+ else
+ *pgprot = __pgprot_modify(*pgprot,
+ L_PTE_MT_MASK, L_PTE_MT_WRITEBACK);
+ }
+ } else {
*pgprot = pgprot_writecombine(*pgprot);
- else
- *pgprot = pgprot_noncached(*pgprot);
+ }
}
void drain_cpu_write_buf(void)
@@ -147,8 +182,9 @@ void clean_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only,
/* Inner clean range */
dmac_map_area(vaddr, length, DMA_TO_DEVICE);
*cleaned_everything = false;
- } else
+ } else {
clean_inner_dcache_all();
+ }
if (!inner_only) {
/*
@@ -160,8 +196,9 @@ void clean_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only,
if (length < outer_flush_breakpoint) {
outer_cache.clean_range(paddr, paddr + length);
*cleaned_everything = false;
- } else
+ } else {
outer_cache.flush_all();
+ }
}
}
@@ -214,22 +251,25 @@ void flush_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only,
/* Inner clean range */
dmac_map_area(vaddr, length, DMA_TO_DEVICE);
*flushed_everything = false;
- } else
+ } else {
clean_inner_dcache_all();
+ }
if (length < outer_flush_breakpoint) {
outer_cache.flush_range(paddr, paddr + length);
*flushed_everything = false;
- } else
+ } else {
outer_cache.flush_all();
+ }
}
if (length < inner_flush_breakpoint) {
/* Inner flush range */
dmac_flush_range(vaddr, (void *)((u32)vaddr + length));
*flushed_everything = false;
- } else
+ } else {
flush_inner_dcache_all();
+ }
}
bool speculative_data_prefetch(void)
@@ -246,16 +286,6 @@ u32 get_dcache_granularity(void)
* Local functions
*/
-static bool is_wt(enum hwmem_alloc_flags cache_settings)
-{
- u32 cache_hints = cache_settings & HWMEM_ALLOC_CACHE_HINT_MASK;
- if (cache_hints == HWMEM_ALLOC_CACHE_HINT_WT ||
- cache_hints == HWMEM_ALLOC_CACHE_HINT_WT_INNER)
- return true;
- else
- return false;
-}
-
static void __clean_inner_dcache_all(void *param)
{
__cpuc_clean_dcache_all();
diff --git a/drivers/misc/hwmem/cache_handler.c b/drivers/misc/hwmem/cache_handler.c
index b313da36aa4..e0ab4ee6cf8 100644
--- a/drivers/misc/hwmem/cache_handler.c
+++ b/drivers/misc/hwmem/cache_handler.c
@@ -65,9 +65,6 @@ static u32 offset_2_paddr(struct cach_buf *buf, u32 offset);
static u32 align_up(u32 value, u32 alignment);
static u32 align_down(u32 value, u32 alignment);
-static bool is_wb(enum hwmem_alloc_flags cache_settings);
-static bool is_inner_only(enum hwmem_alloc_flags cache_settings);
-
/*
* Exported functions
*/
@@ -89,7 +86,7 @@ void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr)
buf->vstart = vaddr;
buf->pstart = paddr;
- if (buf->cache_settings & HWMEM_ALLOC_CACHED) {
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) {
/*
* Keep whatever is in the cache. This way we avoid an
* unnecessary synch if CPU is the first user.
@@ -124,9 +121,9 @@ void cach_set_domain(struct cach_buf *buf, enum hwmem_access access,
struct hwmem_region *__region;
struct hwmem_region full_region;
- if (region != NULL)
+ if (region != NULL) {
__region = region;
- else {
+ } else {
full_region.offset = 0;
full_region.count = 1;
full_region.start = 0;
@@ -156,27 +153,39 @@ void cach_set_domain(struct cach_buf *buf, enum hwmem_access access,
enum hwmem_alloc_flags __attribute__((weak)) cachi_get_cache_settings(
enum hwmem_alloc_flags requested_cache_settings)
{
- enum hwmem_alloc_flags cache_settings =
- requested_cache_settings & ~HWMEM_ALLOC_CACHE_HINT_MASK;
-
- if ((cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED) {
- /*
- * If the alloc is cached we'll use the default setting. We
- * don't know what this setting is so we have to assume the
- * worst case, ie write back inner and outer.
- */
- cache_settings |= HWMEM_ALLOC_CACHE_HINT_WB;
- }
-
- return cache_settings;
+ static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED |
+ HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT |
+ HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE |
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY;
+ /* We don't know the cache setting so we assume worst case. */
+ static const u32 CACHE_SETTING = HWMEM_ALLOC_HINT_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_CACHED | HWMEM_ALLOC_HINT_CACHE_WB |
+ HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE;
+
+ if (requested_cache_settings & CACHE_ON_FLAGS_MASK)
+ return CACHE_SETTING;
+ else if (requested_cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE ||
+ (requested_cache_settings & HWMEM_ALLOC_HINT_UNCACHED &&
+ !(requested_cache_settings &
+ HWMEM_ALLOC_HINT_NO_WRITE_COMBINE)))
+ return HWMEM_ALLOC_HINT_WRITE_COMBINE;
+ else if (requested_cache_settings &
+ (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_UNCACHED))
+ return 0;
+ else
+ /* Nothing specified, use cached */
+ return CACHE_SETTING;
}
void __attribute__((weak)) cachi_set_pgprot_cache_options(
enum hwmem_alloc_flags cache_settings, pgprot_t *pgprot)
{
- if ((cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED)
+ if (cache_settings & HWMEM_ALLOC_HINT_CACHED)
*pgprot = *pgprot; /* To silence compiler and checkpatch */
- else if (cache_settings & HWMEM_ALLOC_BUFFERED)
+ else if (cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE)
*pgprot = pgprot_writecombine(*pgprot);
else
*pgprot = pgprot_noncached(*pgprot);
@@ -197,23 +206,32 @@ static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access,
if (!write && !read)
return;
- if ((buf->cache_settings & HWMEM_ALLOC_CACHED) == HWMEM_ALLOC_CACHED) {
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) {
struct cach_range region_range;
region_2_range(region, buf->size, &region_range);
- if (read || (write && is_wb(buf->cache_settings)))
+ if (read || (write && buf->cache_settings &
+ HWMEM_ALLOC_HINT_CACHE_WB))
/* Perform defered invalidates */
invalidate_cpu_cache(buf, &region_range);
- if (read)
- expand_range(&buf->range_in_cpu_cache, &region_range);
- if (write && is_wb(buf->cache_settings)) {
+ if (read || (write && buf->cache_settings &
+ HWMEM_ALLOC_HINT_CACHE_AOW))
expand_range(&buf->range_in_cpu_cache, &region_range);
+ if (write && buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_WB) {
+ struct cach_range dirty_range_addition;
+
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW)
+ dirty_range_addition = region_range;
+ else
+ intersect_range(&buf->range_in_cpu_cache,
+ &region_range, &dirty_range_addition);
+
expand_range(&buf->range_dirty_in_cpu_cache,
- &region_range);
+ &dirty_range_addition);
}
}
- if (buf->cache_settings & HWMEM_ALLOC_BUFFERED) {
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE) {
if (write)
buf->in_cpu_write_buf = true;
}
@@ -243,8 +261,9 @@ static void sync_buf_post_cpu(struct cach_buf *buf,
&intersection);
clean_cpu_cache(buf, &region_range);
- } else
+ } else {
flush_cpu_cache(buf, &region_range);
+ }
}
if (read)
clean_cpu_cache(buf, &region_range);
@@ -277,13 +296,14 @@ static void invalidate_cpu_cache(struct cach_buf *buf, struct cach_range *range)
offset_2_vaddr(buf, intersection.start),
offset_2_paddr(buf, intersection.start),
range_length(&intersection),
- is_inner_only(buf->cache_settings),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
&flushed_everything);
if (flushed_everything) {
null_range(&buf->range_invalid_in_cpu_cache);
null_range(&buf->range_dirty_in_cpu_cache);
- } else
+ } else {
/*
* No need to shrink range_in_cpu_cache as invalidate
* is only used when we can't keep track of what's in
@@ -291,6 +311,7 @@ static void invalidate_cpu_cache(struct cach_buf *buf, struct cach_range *range)
*/
shrink_range(&buf->range_invalid_in_cpu_cache,
&intersection);
+ }
}
}
@@ -309,7 +330,8 @@ static void clean_cpu_cache(struct cach_buf *buf, struct cach_range *range)
offset_2_vaddr(buf, intersection.start),
offset_2_paddr(buf, intersection.start),
range_length(&intersection),
- is_inner_only(buf->cache_settings),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
&cleaned_everything);
if (cleaned_everything)
@@ -334,7 +356,8 @@ static void flush_cpu_cache(struct cach_buf *buf, struct cach_range *range)
offset_2_vaddr(buf, intersection.start),
offset_2_paddr(buf, intersection.start),
range_length(&intersection),
- is_inner_only(buf->cache_settings),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
&flushed_everything);
if (flushed_everything) {
@@ -485,23 +508,3 @@ static u32 align_down(u32 value, u32 alignment)
return value - remainder;
}
-
-static bool is_wb(enum hwmem_alloc_flags cache_settings)
-{
- u32 cache_hints = cache_settings & HWMEM_ALLOC_CACHE_HINT_MASK;
- if (cache_hints == HWMEM_ALLOC_CACHE_HINT_WB ||
- cache_hints == HWMEM_ALLOC_CACHE_HINT_WB_INNER)
- return true;
- else
- return false;
-}
-
-static bool is_inner_only(enum hwmem_alloc_flags cache_settings)
-{
- u32 cache_hints = cache_settings & HWMEM_ALLOC_CACHE_HINT_MASK;
- if (cache_hints == HWMEM_ALLOC_CACHE_HINT_WT_INNER ||
- cache_hints == HWMEM_ALLOC_CACHE_HINT_WB_INNER)
- return true;
- else
- return false;
-}
diff --git a/drivers/misc/hwmem/hwmem-ioctl.c b/drivers/misc/hwmem/hwmem-ioctl.c
index 8759c395147..e9e50de78bd 100644
--- a/drivers/misc/hwmem/hwmem-ioctl.c
+++ b/drivers/misc/hwmem/hwmem-ioctl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) ST-Ericsson AB 2010
+ * Copyright (C) ST-Ericsson SA 2010
*
* Hardware memory driver, hwmem
*
@@ -21,12 +21,6 @@
#include <linux/device.h>
#include <linux/sched.h>
-/*
- * TODO:
- * Count pin unpin at this level to ensure applications can't interfer
- * with each other.
- */
-
static int hwmem_open(struct inode *inode, struct file *file);
static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma);
static int hwmem_release_fop(struct inode *inode, struct file *file);
@@ -56,7 +50,7 @@ struct hwmem_file {
struct hwmem_alloc *fd_alloc; /* Ref counted */
};
-static int create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc)
+static s32 create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc)
{
int id, ret;
@@ -72,42 +66,42 @@ static int create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc)
}
/*
- * IDR always returns the lowest free id so the only way we can fail
- * here is if hwfile has 2^19 - 1 (524287) allocations.
+ * IDR always returns the lowest free id so there is no wrapping issue
+ * because of this.
*/
- if (id >= 1 << (31 - PAGE_SHIFT)) {
+ if (id >= (s32)1 << (31 - PAGE_SHIFT)) {
dev_err(hwmem_device.this_device, "Out of IDs!\n");
idr_remove(&hwfile->idr, id);
return -ENOMSG;
}
- return id << PAGE_SHIFT;
+ return (s32)id << PAGE_SHIFT;
}
-static void remove_id(struct hwmem_file *hwfile, int id)
+static void remove_id(struct hwmem_file *hwfile, s32 id)
{
idr_remove(&hwfile->idr, id >> PAGE_SHIFT);
}
-static struct hwmem_alloc *resolve_id(struct hwmem_file *hwfile, int id)
+static struct hwmem_alloc *resolve_id(struct hwmem_file *hwfile, s32 id)
{
struct hwmem_alloc *alloc;
alloc = id ? idr_find(&hwfile->idr, id >> PAGE_SHIFT) :
- hwfile->fd_alloc;
+ hwfile->fd_alloc;
if (alloc == NULL)
alloc = ERR_PTR(-EINVAL);
return alloc;
}
-static int alloc(struct hwmem_file *hwfile, struct hwmem_alloc_request *req)
+static s32 alloc(struct hwmem_file *hwfile, struct hwmem_alloc_request *req)
{
- int ret = 0;
+ s32 ret = 0;
struct hwmem_alloc *alloc;
alloc = hwmem_alloc(req->size, req->flags, req->default_access,
- req->mem_type);
+ req->mem_type);
if (IS_ERR(alloc))
return PTR_ERR(alloc);
@@ -123,10 +117,10 @@ static int alloc_fd(struct hwmem_file *hwfile, struct hwmem_alloc_request *req)
struct hwmem_alloc *alloc;
if (hwfile->fd_alloc)
- return -EBUSY;
+ return -EINVAL;
alloc = hwmem_alloc(req->size, req->flags, req->default_access,
- req->mem_type);
+ req->mem_type);
if (IS_ERR(alloc))
return PTR_ERR(alloc);
@@ -139,6 +133,9 @@ static int release(struct hwmem_file *hwfile, s32 id)
{
struct hwmem_alloc *alloc;
+ if (id == 0)
+ return -EINVAL;
+
alloc = resolve_id(hwfile, id);
if (IS_ERR(alloc))
return PTR_ERR(alloc);
@@ -149,7 +146,20 @@ static int release(struct hwmem_file *hwfile, s32 id)
return 0;
}
-static int hwmem_ioctl_set_domain(struct hwmem_file *hwfile,
+static int set_cpu_domain(struct hwmem_file *hwfile,
+ struct hwmem_set_domain_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_CPU,
+ (struct hwmem_region *)&req->region);
+}
+
+static int set_sync_domain(struct hwmem_file *hwfile,
struct hwmem_set_domain_request *req)
{
struct hwmem_alloc *alloc;
@@ -158,18 +168,33 @@ static int hwmem_ioctl_set_domain(struct hwmem_file *hwfile,
if (IS_ERR(alloc))
return PTR_ERR(alloc);
- return hwmem_set_domain(alloc, req->access, req->domain, &req->region);
+ return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_SYNC,
+ (struct hwmem_region *)&req->region);
}
static int pin(struct hwmem_file *hwfile, struct hwmem_pin_request *req)
{
+ int ret;
struct hwmem_alloc *alloc;
+ enum hwmem_mem_type mem_type;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
alloc = resolve_id(hwfile, req->id);
if (IS_ERR(alloc))
return PTR_ERR(alloc);
- return hwmem_pin(alloc, &req->phys_addr, req->scattered_addrs);
+ hwmem_get_info(alloc, NULL, &mem_type, NULL);
+ if (mem_type != HWMEM_MEM_CONTIGUOUS_SYS)
+ return -EINVAL;
+
+ ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length);
+ if (ret < 0)
+ return ret;
+
+ req->phys_addr = mem_chunk.paddr;
+
+ return 0;
}
static int unpin(struct hwmem_file *hwfile, s32 id)
@@ -211,13 +236,10 @@ static int get_info(struct hwmem_file *hwfile,
return 0;
}
-static int export(struct hwmem_file *hwfile, s32 id)
+static s32 export(struct hwmem_file *hwfile, s32 id)
{
- int ret;
+ s32 ret;
struct hwmem_alloc *alloc;
-
- uint32_t size;
- enum hwmem_mem_type mem_type;
enum hwmem_access access;
alloc = resolve_id(hwfile, id);
@@ -234,26 +256,20 @@ static int export(struct hwmem_file *hwfile, s32 id)
* security as the process already has access to the buffer (otherwise
* it would not be able to get here).
*/
- hwmem_get_info(alloc, &size, &mem_type, &access);
+ hwmem_get_info(alloc, NULL, NULL, &access);
ret = hwmem_set_access(alloc, (access | HWMEM_ACCESS_IMPORT),
- task_tgid_nr(current));
+ task_tgid_nr(current));
if (ret < 0)
- goto error;
+ return ret;
return hwmem_get_name(alloc);
-
-error:
- return ret;
}
-static int import(struct hwmem_file *hwfile, s32 name)
+static s32 import(struct hwmem_file *hwfile, s32 name)
{
- int ret = 0;
+ s32 ret = 0;
struct hwmem_alloc *alloc;
-
- uint32_t size;
- enum hwmem_mem_type mem_type;
enum hwmem_access access;
alloc = hwmem_resolve_by_name(name);
@@ -261,8 +277,7 @@ static int import(struct hwmem_file *hwfile, s32 name)
return PTR_ERR(alloc);
/* Check access permissions for process */
- hwmem_get_info(alloc, &size, &mem_type, &access);
-
+ hwmem_get_info(alloc, NULL, NULL, &access);
if (!(access & HWMEM_ACCESS_IMPORT)) {
ret = -EPERM;
goto error;
@@ -270,26 +285,44 @@ static int import(struct hwmem_file *hwfile, s32 name)
ret = create_id(hwfile, alloc);
if (ret < 0)
- hwmem_release(alloc);
+ goto error;
+
+ return ret;
error:
+ hwmem_release(alloc);
+
return ret;
}
static int import_fd(struct hwmem_file *hwfile, s32 name)
{
+ int ret;
struct hwmem_alloc *alloc;
+ enum hwmem_access access;
if (hwfile->fd_alloc)
- return -EBUSY;
+ return -EINVAL;
alloc = hwmem_resolve_by_name(name);
if (IS_ERR(alloc))
return PTR_ERR(alloc);
+ /* Check access permissions for process */
+ hwmem_get_info(alloc, NULL, NULL, &access);
+ if (!(access & HWMEM_ACCESS_IMPORT)) {
+ ret = -EPERM;
+ goto error;
+ }
+
hwfile->fd_alloc = alloc;
return 0;
+
+error:
+ hwmem_release(alloc);
+
+ return ret;
}
static int hwmem_open(struct inode *inode, struct file *file)
@@ -315,7 +348,7 @@ static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma)
mutex_lock(&hwfile->lock);
- alloc = resolve_id(hwfile, vma->vm_pgoff << PAGE_SHIFT);
+ alloc = resolve_id(hwfile, (s32)vma->vm_pgoff << PAGE_SHIFT);
if (IS_ERR(alloc)) {
ret = PTR_ERR(alloc);
goto out;
@@ -385,23 +418,29 @@ static long hwmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case HWMEM_RELEASE_IOC:
ret = release(hwfile, (s32)arg);
break;
- case HWMEM_SET_DOMAIN_IOC:
+ case HWMEM_SET_CPU_DOMAIN_IOC:
{
struct hwmem_set_domain_request req;
if (copy_from_user(&req, (void __user *)arg,
sizeof(struct hwmem_set_domain_request)))
ret = -EFAULT;
else
- ret = hwmem_ioctl_set_domain(hwfile, &req);
+ ret = set_cpu_domain(hwfile, &req);
+ }
+ break;
+ case HWMEM_SET_SYNC_DOMAIN_IOC:
+ {
+ struct hwmem_set_domain_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_domain_request)))
+ ret = -EFAULT;
+ else
+ ret = set_sync_domain(hwfile, &req);
}
break;
case HWMEM_PIN_IOC:
{
struct hwmem_pin_request req;
- /*
- * TODO: Validate and copy scattered_addrs. Not a
- * problem right now as it's never used.
- */
if (copy_from_user(&req, (void __user *)arg,
sizeof(struct hwmem_pin_request)))
ret = -EFAULT;
@@ -468,6 +507,22 @@ static unsigned long hwmem_get_unmapped_area(struct file *file,
int __init hwmem_ioctl_init(void)
{
+ if (PAGE_SHIFT < 1 || PAGE_SHIFT > 30 || sizeof(size_t) != 4 ||
+ sizeof(int) > 4 || sizeof(enum hwmem_alloc_flags) != 4 ||
+ sizeof(enum hwmem_access) != 4 ||
+ sizeof(enum hwmem_mem_type) != 4) {
+ dev_err(hwmem_device.this_device, "PAGE_SHIFT < 1 || PAGE_SHIFT"
+ " > 30 || sizeof(size_t) != 4 || sizeof(int) > 4 ||"
+ " sizeof(enum hwmem_alloc_flags) != 4 || sizeof(enum"
+ " hwmem_access) != 4 || sizeof(enum hwmem_mem_type)"
+ " != 4\n");
+ return -ENOMSG;
+ }
+ if (PAGE_SHIFT > 15)
+ dev_warn(hwmem_device.this_device, "Due to the page size only"
+ " %u id:s per file instance are available\n",
+ ((u32)1 << (31 - PAGE_SHIFT)) - 1);
+
return misc_register(&hwmem_device);
}
diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c
index 0010e45ff52..fbfd8502a1d 100644
--- a/drivers/misc/hwmem/hwmem-main.c
+++ b/drivers/misc/hwmem/hwmem-main.c
@@ -1,10 +1,10 @@
/*
- * Copyright (C) ST-Ericsson AB 2010
+ * Copyright (C) ST-Ericsson SA 2010
*
* Hardware memory driver, hwmem
*
- * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
- * for ST-Ericsson.
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>,
+ * Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
*
* License terms: GNU General Public License (GPL), version 2.
*/
@@ -46,7 +46,7 @@ struct hwmem_alloc {
u32 paddr;
void *kaddr;
u32 size;
- u32 name;
+ s32 name;
/* Access control */
enum hwmem_access default_access;
@@ -446,12 +446,19 @@ int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access,
}
EXPORT_SYMBOL(hwmem_set_domain);
-int hwmem_pin(struct hwmem_alloc *alloc, uint32_t *phys_addr,
- uint32_t *scattered_phys_addrs)
+int hwmem_pin(struct hwmem_alloc *alloc, struct hwmem_mem_chunk *mem_chunks,
+ u32 *mem_chunks_length)
{
+ if (*mem_chunks_length < 1) {
+ *mem_chunks_length = 1;
+ return -ENOSPC;
+ }
+
mutex_lock(&lock);
- *phys_addr = alloc->paddr;
+ mem_chunks[0].paddr = alloc->paddr;
+ mem_chunks[0].size = alloc->size;
+ *mem_chunks_length = 1;
mutex_unlock(&lock);
@@ -492,7 +499,7 @@ int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma)
goto illegal_access;
}
- if (vma_size > (unsigned long)alloc->size) {
+ if (vma_size > alloc->size) {
ret = -EINVAL;
goto illegal_size;
}
@@ -590,14 +597,17 @@ error_get_pid:
}
EXPORT_SYMBOL(hwmem_set_access);
-void hwmem_get_info(struct hwmem_alloc *alloc, uint32_t *size,
+void hwmem_get_info(struct hwmem_alloc *alloc, u32 *size,
enum hwmem_mem_type *mem_type, enum hwmem_access *access)
{
mutex_lock(&lock);
- *size = alloc->size;
- *mem_type = HWMEM_MEM_CONTIGUOUS_SYS;
- *access = get_access(alloc);
+ if (size != NULL)
+ *size = alloc->size;
+ if (mem_type != NULL)
+ *mem_type = HWMEM_MEM_CONTIGUOUS_SYS;
+ if (access != NULL)
+ *access = get_access(alloc);
mutex_unlock(&lock);
}
@@ -766,6 +776,14 @@ static int __devinit hwmem_probe(struct platform_device *pdev)
int ret = 0;
struct hwmem_platform_data *platform_data = pdev->dev.platform_data;
+ if (sizeof(int) != 4 || sizeof(phys_addr_t) < 4 ||
+ sizeof(void *) < 4 || sizeof(size_t) != 4) {
+ dev_err(&pdev->dev, "sizeof(int) != 4 || sizeof(phys_addr_t)"
+ " < 4 || sizeof(void *) < 4 || sizeof(size_t) !="
+ " 4\n");
+ return -ENOMSG;
+ }
+
if (hwdev || platform_data->size == 0 ||
platform_data->start != PAGE_ALIGN(platform_data->start) ||
platform_data->size != PAGE_ALIGN(platform_data->size)) {
diff --git a/include/linux/hwmem.h b/include/linux/hwmem.h
index bc0a26a30a6..6756085f72a 100644
--- a/include/linux/hwmem.h
+++ b/include/linux/hwmem.h
@@ -1,7 +1,7 @@
/*
- * Copyright (C) ST-Ericsson AB 2010
+ * Copyright (C) ST-Ericsson SA 2010
*
- * ST-Ericsson HW memory driver
+ * Hardware memory driver, hwmem
*
* Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
* for ST-Ericsson.
@@ -27,36 +27,49 @@
*/
enum hwmem_alloc_flags {
/**
- * @brief Buffer will not be cached and not buffered
+ * @brief Buffered
*/
- HWMEM_ALLOC_UNCACHED = (0 << 0),
+ HWMEM_ALLOC_HINT_WRITE_COMBINE = (1 << 0),
/**
- * @brief Buffer will be buffered, but not cached
+ * @brief Non-buffered
*/
- HWMEM_ALLOC_BUFFERED = (1 << 0),
+ HWMEM_ALLOC_HINT_NO_WRITE_COMBINE = (1 << 1),
/**
- * @brief Buffer will be cached and buffered, use cache hints to be
- * more specific
+ * @brief Cached
*/
- HWMEM_ALLOC_CACHED = (3 << 0),
+ HWMEM_ALLOC_HINT_CACHED = (1 << 2),
/**
- * @brief Buffer should be cached write-back in both level 1 and 2 cache
+ * @brief Uncached
*/
- HWMEM_ALLOC_CACHE_HINT_WB = (1 << 2),
+ HWMEM_ALLOC_HINT_UNCACHED = (1 << 3),
/**
- * @brief Buffer should be cached write-through in both level 1 and
- * 2 cache
+ * @brief Write back
*/
- HWMEM_ALLOC_CACHE_HINT_WT = (2 << 2),
+ HWMEM_ALLOC_HINT_CACHE_WB = (1 << 4),
/**
- * @brief Buffer should be cached write-back in level 1 cache
+ * @brief Write through
*/
- HWMEM_ALLOC_CACHE_HINT_WB_INNER = (3 << 2),
+ HWMEM_ALLOC_HINT_CACHE_WT = (1 << 5),
/**
- * @brief Buffer should be cached write-through in level 1 cache
+ * @brief No alloc on write
*/
- HWMEM_ALLOC_CACHE_HINT_WT_INNER = (4 << 2),
- HWMEM_ALLOC_CACHE_HINT_MASK = 0x1C,
+ HWMEM_ALLOC_HINT_CACHE_NAOW = (1 << 6),
+ /**
+ * @brief Alloc on write
+ */
+ HWMEM_ALLOC_HINT_CACHE_AOW = (1 << 7),
+ /**
+ * @brief Inner and outer cache
+ */
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE = (1 << 8),
+ /**
+ * @brief Inner cache only
+ */
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY = (1 << 9),
+ /**
+ * @brief Reserved for use by the cache handler integration
+ */
+ HWMEM_ALLOC_RESERVED_CHI = (1 << 31),
};
/**
@@ -78,82 +91,32 @@ enum hwmem_access {
};
/**
- * @brief Flags defining memory type.
+ * @brief Values defining memory types.
*/
enum hwmem_mem_type {
/**
- * @brief Scattered system memory. Currently not supported!
+ * @brief Scattered system memory.
*/
- HWMEM_MEM_SCATTERED_SYS = (1 << 0),
+ HWMEM_MEM_SCATTERED_SYS,
/**
* @brief Contiguous system memory.
*/
- HWMEM_MEM_CONTIGUOUS_SYS = (1 << 1),
+ HWMEM_MEM_CONTIGUOUS_SYS,
};
-/**
- * @brief Values defining memory domain.
- */
-enum hwmem_domain {
- /**
- * @brief This value specifies the neutral memory domain. Setting this
- * domain will syncronize all supported memory domains (currently CPU).
- */
- HWMEM_DOMAIN_SYNC = 0,
- /**
- * @brief This value specifies the CPU memory domain.
- */
- HWMEM_DOMAIN_CPU = 1,
-};
+/* User space API */
/**
- * @brief Structure defining a region of a memory buffer.
- *
- * A buffer is defined to contain a number of equally sized blocks. Each block
- * has a part of it included in the region [<start>-<end>). That is
- * <end>-<start> bytes. Each block is <size> bytes long. Total number of bytes
- * in the region is (<end> - <start>) * <count>. First byte of the region is
- * <offset> + <start> bytes into the buffer.
- *
- * Here's an example of a region in a graphics buffer (X = buffer, R = region):
- *
- * XXXXXXXXXXXXXXXXXXXX \
- * XXXXXXXXXXXXXXXXXXXX |-- offset = 60
- * XXXXXXXXXXXXXXXXXXXX /
- * XXRRRRRRRRXXXXXXXXXX \
- * XXRRRRRRRRXXXXXXXXXX |-- count = 4
- * XXRRRRRRRRXXXXXXXXXX |
- * XXRRRRRRRRXXXXXXXXXX /
- * XXXXXXXXXXXXXXXXXXXX
- * --| start = 2
- * ----------| end = 10
- * --------------------| size = 20
+ * @see struct hwmem_region.
*/
-struct hwmem_region {
- /**
- * @brief The first block's offset from beginning of buffer.
- */
- uint32_t offset;
- /**
- * @brief The number of blocks included in this region.
- */
- uint32_t count;
- /**
- * @brief The index of the first byte included in this block.
- */
- uint32_t start;
- /**
- * @brief The index of the last byte included in this block plus one.
- */
- uint32_t end;
- /**
- * @brief The size in bytes of each block.
- */
- uint32_t size;
+struct hwmem_region_us {
+ __u32 offset;
+ __u32 count;
+ __u32 start;
+ __u32 end;
+ __u32 size;
};
-/* User space API */
-
/**
* @brief Alloc request data.
*/
@@ -162,19 +125,19 @@ struct hwmem_alloc_request {
* @brief [in] Size of requested allocation in bytes. Size will be
* aligned to PAGE_SIZE bytes.
*/
- uint32_t size;
+ __u32 size;
/**
* @brief [in] Flags describing requested allocation options.
*/
- uint32_t flags; /* enum hwmem_alloc_flags */
+ __u32 flags; /* enum hwmem_alloc_flags */
/**
* @brief [in] Default access rights for buffer.
*/
- uint32_t default_access; /* enum hwmem_access */
+ __u32 default_access; /* enum hwmem_access */
/**
* @brief [in] Memory type of the buffer.
*/
- uint32_t mem_type; /* enum hwmem_mem_type */
+ __u32 mem_type; /* enum hwmem_mem_type */
};
/**
@@ -185,24 +148,20 @@ struct hwmem_set_domain_request {
* @brief [in] Identifier of buffer to be prepared. If 0 is specified
* the buffer associated with the current file instance will be used.
*/
- int32_t id;
- /**
- * @brief [in] Value specifying the new memory domain.
- */
- uint32_t domain; /* enum hwmem_domain */
+ __s32 id;
/**
* @brief [in] Flags specifying access mode of the operation.
*
* One of HWMEM_ACCESS_READ and HWMEM_ACCESS_WRITE is required.
* For details, @see enum hwmem_access.
*/
- uint32_t access; /* enum hwmem_access */
+ __u32 access; /* enum hwmem_access */
/**
* @brief [in] The region of bytes to be prepared.
*
* For details, @see struct hwmem_region.
*/
- struct hwmem_region region;
+ struct hwmem_region_us region;
};
/**
@@ -213,18 +172,11 @@ struct hwmem_pin_request {
* @brief [in] Identifier of buffer to be pinned. If 0 is specified,
* the buffer associated with the current file instance will be used.
*/
- int32_t id;
+ __s32 id;
/**
* @brief [out] Physical address of first word in buffer.
*/
- uint32_t phys_addr;
- /**
- * @brief [in] Pointer to buffer for physical addresses of pinned
- * scattered buffer. Buffer must be (buffer_size / page_size) *
- * sizeof(uint32_t) bytes.
- * This field can be NULL for physically contiguos buffers.
- */
- uint32_t *scattered_addrs;
+ __u32 phys_addr;
};
/**
@@ -232,14 +184,15 @@ struct hwmem_pin_request {
*/
struct hwmem_set_access_request {
/**
- * @brief [in] Identifier of buffer to be pinned. If 0 is specified,
- * the buffer associated with the current file instance will be used.
+ * @brief [in] Identifier of buffer to set access rights for. If 0 is
+ * specified, the buffer associated with the current file instance will
+ * be used.
*/
- int32_t id;
+ __s32 id;
/**
* @param access Access value indicating what is allowed.
*/
- uint32_t access; /* enum hwmem_access */
+ __u32 access; /* enum hwmem_access */
/**
* @param pid Process ID to set rights for.
*/
@@ -254,19 +207,19 @@ struct hwmem_get_info_request {
* @brief [in] Identifier of buffer to get info about. If 0 is specified,
* the buffer associated with the current file instance will be used.
*/
- int32_t id;
+ __s32 id;
/**
* @brief [out] Size in bytes of buffer.
*/
- uint32_t size;
+ __u32 size;
/**
* @brief [out] Memory type of buffer.
*/
- uint32_t mem_type; /* enum hwmem_mem_type */
+ __u32 mem_type; /* enum hwmem_mem_type */
/**
* @brief [out] Access rights for buffer.
*/
- uint32_t access; /* enum hwmem_access */
+ __u32 access; /* enum hwmem_access */
};
/**
@@ -296,7 +249,8 @@ struct hwmem_get_info_request {
* @brief Releases buffer.
*
* Buffers are reference counted and will not be destroyed until the last
- * reference is released. Bufferes allocated with ALLOC_FD_IOC not allowed.
+ * reference is released. Buffers allocated with ALLOC_FD_IOC shall not be
+ * released with this IOC, @see HWMEM_ALLOC_FD_IOC.
*
* Input is the buffer identifier.
*
@@ -305,44 +259,72 @@ struct hwmem_get_info_request {
#define HWMEM_RELEASE_IOC _IO('W', 3)
/**
- * @brief Set the buffer's memory domain and prepares it for access.
+ * Memory Mapping
+ *
+ * To map a hwmem buffer mmap the hwmem fd and supply the buffer identifier as
+ * the offset. If the buffer is linked to the fd and thus have no buffer
+ * identifier supply 0 as the offset. Note that the offset feature of mmap is
+ * disabled in both cases, you can only mmap starting a position 0.
+ */
+
+/**
+ * @brief Prepares the buffer for CPU access.
+ *
+ * Input is a pointer to a hwmem_set_domain_request struct.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_SET_CPU_DOMAIN_IOC _IOW('W', 4, struct hwmem_set_domain_request)
+
+/**
+ * DEPRECATED: Set sync domain from driver instead!
+ *
+ * @brief Prepares the buffer for access by any DMA hardware.
*
* Input is a pointer to a hwmem_set_domain_request struct.
*
* @return Zero on success, or a negative error code.
*/
-#define HWMEM_SET_DOMAIN_IOC _IOR('W', 4, struct hwmem_set_domain_request)
+#define HWMEM_SET_SYNC_DOMAIN_IOC _IOW('W', 5, struct hwmem_set_domain_request)
/**
- * @brief Pins the buffer and returns the physical address of the buffer.
+ * DEPRECATED: Pin from driver instead!
+ *
+ * @brief Pins the buffer.
+ *
+ * Input is a pointer to a hwmem_pin_request struct. Only contiguous buffers
+ * can be pinned from user space.
*
* @return Zero on success, or a negative error code.
*/
-#define HWMEM_PIN_IOC _IOWR('W', 5, struct hwmem_pin_request)
+#define HWMEM_PIN_IOC _IOWR('W', 6, struct hwmem_pin_request)
/**
+ * DEPRECATED: Unpin from driver instead!
+ *
* @brief Unpins the buffer.
*
* @return Zero on success, or a negative error code.
*/
-#define HWMEM_UNPIN_IOC _IO('W', 6)
+#define HWMEM_UNPIN_IOC _IO('W', 7)
/**
* @brief Set access rights for buffer.
*
+ * Input is a pointer to a hwmem_set_access_request struct.
+ *
* @return Zero on success, or a negative error code.
*/
-#define HWMEM_SET_ACCESS_IOC _IOW('W', 7, struct hwmem_set_access_request)
+#define HWMEM_SET_ACCESS_IOC _IOW('W', 8, struct hwmem_set_access_request)
/**
* @brief Get buffer information.
*
- * Input is the buffer identifier. If 0 is specified the buffer associated
- * with the current file instance will be used.
+ * Input is a pointer to a hwmem_get_info_request struct.
*
* @return Zero on success, or a negative error code.
*/
-#define HWMEM_GET_INFO_IOC _IOWR('W', 8, struct hwmem_get_info_request)
+#define HWMEM_GET_INFO_IOC _IOWR('W', 9, struct hwmem_get_info_request)
/**
* @brief Export the buffer identifier for use in another process.
@@ -355,33 +337,101 @@ struct hwmem_get_info_request {
*
* @return A global buffer name on success, or a negative error code.
*/
-#define HWMEM_EXPORT_IOC _IO('W', 9)
+#define HWMEM_EXPORT_IOC _IO('W', 10)
/**
* @brief Import a buffer to allow local access to the buffer.
*
* Input is the buffer's global name.
*
- * @return The imported buffer's identifier on success, or a negative error code.
+ * @return The imported buffer's identifier on success, or a negative error
+ * code.
*/
-#define HWMEM_IMPORT_IOC _IO('W', 10)
+#define HWMEM_IMPORT_IOC _IO('W', 11)
/**
- * @brief Import a buffer to allow local access to the buffer using fd.
+ * @brief Import a buffer to allow local access to the buffer using the current
+ * fd.
*
* Input is the buffer's global name.
*
* @return Zero on success, or a negative error code.
*/
-#define HWMEM_IMPORT_FD_IOC _IO('W', 11)
+#define HWMEM_IMPORT_FD_IOC _IO('W', 12)
#ifdef __KERNEL__
/* Kernel API */
+/**
+ * @brief Values defining memory domain.
+ */
+enum hwmem_domain {
+ /**
+ * @brief This value specifies the neutral memory domain. Setting this
+ * domain will syncronize all supported memory domains.
+ */
+ HWMEM_DOMAIN_SYNC = 0,
+ /**
+ * @brief This value specifies the CPU memory domain.
+ */
+ HWMEM_DOMAIN_CPU,
+};
+
struct hwmem_alloc;
/**
+ * @brief Structure defining a region of a memory buffer.
+ *
+ * A buffer is defined to contain a number of equally sized blocks. Each block
+ * has a part of it included in the region [<start>-<end>). That is
+ * <end>-<start> bytes. Each block is <size> bytes long. Total number of bytes
+ * in the region is (<end> - <start>) * <count>. First byte of the region is
+ * <offset> + <start> bytes into the buffer.
+ *
+ * Here's an example of a region in a graphics buffer (X = buffer, R = region):
+ *
+ * XXXXXXXXXXXXXXXXXXXX \
+ * XXXXXXXXXXXXXXXXXXXX |-- offset = 60
+ * XXXXXXXXXXXXXXXXXXXX /
+ * XXRRRRRRRRXXXXXXXXXX \
+ * XXRRRRRRRRXXXXXXXXXX |-- count = 4
+ * XXRRRRRRRRXXXXXXXXXX |
+ * XXRRRRRRRRXXXXXXXXXX /
+ * XXXXXXXXXXXXXXXXXXXX
+ * --| start = 2
+ * ----------| end = 10
+ * --------------------| size = 20
+ */
+struct hwmem_region {
+ /**
+ * @brief The first block's offset from beginning of buffer.
+ */
+ size_t offset;
+ /**
+ * @brief The number of blocks included in this region.
+ */
+ size_t count;
+ /**
+ * @brief The index of the first byte included in this block.
+ */
+ size_t start;
+ /**
+ * @brief The index of the last byte included in this block plus one.
+ */
+ size_t end;
+ /**
+ * @brief The size in bytes of each block.
+ */
+ size_t size;
+};
+
+struct hwmem_mem_chunk {
+ phys_addr_t paddr;
+ size_t size;
+};
+
+/**
* @brief Allocates <size> number of bytes.
*
* @param size Number of bytes to allocate. All allocations are page aligned.
@@ -391,7 +441,7 @@ struct hwmem_alloc;
*
* @return Pointer to allocation, or a negative error code.
*/
-struct hwmem_alloc *hwmem_alloc(u32 size, enum hwmem_alloc_flags flags,
+struct hwmem_alloc *hwmem_alloc(size_t size, enum hwmem_alloc_flags flags,
enum hwmem_access def_access, enum hwmem_mem_type mem_type);
/**
@@ -419,14 +469,26 @@ int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access,
/**
* @brief Pins the buffer.
*
+ * Notice that the number of mem chunks a buffer consists of can change at any
+ * time if the buffer is not pinned. Because of this one can not assume that
+ * pin will succeed if <mem_chunks> has the length specified by a previous call
+ * to pin as the buffer layout may have changed between the calls. There are
+ * two ways of handling this situation, keep redoing the pin procedure till it
+ * succeeds or allocate enough mem chunks for the worst case ("buffer size" /
+ * "page size" mem chunks). Contiguous buffers always require only one mem
+ * chunk.
+ *
* @param alloc Buffer to be pinned.
- * @param phys_addr Reference to variable to receive physical address.
- * @param scattered_phys_addrs Pointer to buffer to receive physical addresses
- * of all pages in the scattered buffer. Can be NULL if buffer is contigous.
- * Buffer size must be (buffer_size / page_size) * sizeof(uint32_t) bytes.
+ * @param mem_chunks Pointer to array of mem chunks.
+ * @param mem_chunks_length Pointer to variable that contains the length of
+ * <mem_chunks> array. On success the number of written mem chunks will be
+ * stored in this variable. If the call fails with -ENOSPC the required length
+ * of <mem_chunks> will be stored in this variable.
+ *
+ * @return Zero on success, or a negative error code.
*/
-int hwmem_pin(struct hwmem_alloc *alloc, uint32_t *phys_addr,
- uint32_t *scattered_phys_addrs);
+int hwmem_pin(struct hwmem_alloc *alloc, struct hwmem_mem_chunk *mem_chunks,
+ size_t *mem_chunks_length);
/**
* @brief Unpins the buffer.
@@ -438,7 +500,9 @@ void hwmem_unpin(struct hwmem_alloc *alloc);
/**
* @brief Map the buffer to user space.
*
- * @param alloc Buffer to be unpinned.
+ * @param alloc Buffer to be mapped.
+ *
+ * @return Zero on success, or a negative error code.
*/
int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma);
@@ -476,12 +540,12 @@ int hwmem_set_access(struct hwmem_alloc *alloc, enum hwmem_access access,
* @brief Get buffer information.
*
* @param alloc Buffer to get information about.
- * @param size Pointer to size output variable.
- * @param size Pointer to memory type output variable.
- * @param size Pointer to access rights output variable.
+ * @param size Pointer to size output variable. Can be NULL.
+ * @param size Pointer to memory type output variable. Can be NULL.
+ * @param size Pointer to access rights output variable. Can be NULL.
*/
-void hwmem_get_info(struct hwmem_alloc *alloc, uint32_t *size,
- enum hwmem_mem_type *mem_type, enum hwmem_access *access);
+void hwmem_get_info(struct hwmem_alloc *alloc, size_t *size,
+ enum hwmem_mem_type *mem_type, enum hwmem_access *access);
/**
* @brief Allocate a global buffer name.
@@ -492,7 +556,7 @@ void hwmem_get_info(struct hwmem_alloc *alloc, uint32_t *size,
*
* @return Positive global name on success, or a negative error code.
*/
-int hwmem_get_name(struct hwmem_alloc *alloc);
+s32 hwmem_get_name(struct hwmem_alloc *alloc);
/**
* @brief Import the global buffer name to allow local access to the buffer.
@@ -508,10 +572,10 @@ struct hwmem_alloc *hwmem_resolve_by_name(s32 name);
/* Internal */
struct hwmem_platform_data {
- /* Starting physical address of memory region */
- unsigned long start;
+ /* Physical address of memory region */
+ u32 start;
/* Size of memory region */
- unsigned long size;
+ u32 size;
};
#endif