diff options
Diffstat (limited to 'drivers/staging/nmf-cm/cm/engine/memory')
15 files changed, 3092 insertions, 0 deletions
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/chunk_mgr.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/chunk_mgr.h new file mode 100644 index 00000000000..340301a9259 --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/chunk_mgr.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ +/** + * \internal + */ +#ifndef CHUNK_MGR_H_ +#define CHUNK_MGR_H_ + +#include <cm/engine/memory/inc/remote_allocator.h> +#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h> + +t_cm_error allocChunkPool(void); +t_cm_error fillChunkPool(void); +void freeChunkPool(void); + +/***************************************************************************/ +/* + * allocChunk + * param current : Pointer on chunck to free + * + * Add a chunk in the chunck list + * + */ +/***************************************************************************/ +t_cm_chunk* allocChunk(void); + +/***************************************************************************/ +/* + * freeChunk + * param current : Pointer on chunck to free + * + * Remove a chunk in the chunck list + * + */ +/***************************************************************************/ +void freeChunk(t_cm_chunk *chunk); + +#endif /*CHUNK_MGR_H_*/ diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/domain.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/domain.h new file mode 100644 index 00000000000..c9b39956c63 --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/domain.h @@ -0,0 +1,163 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ +/***************************************************************************/ +/* file : domain.h + * author : NMF team + * version : 1.0 + * + * brief : NMF domain definitions + */ +/***************************************************************************/ + +#ifndef DOMAIN_H_ +#define DOMAIN_H_ + +#include <cm/inc/cm_type.h> +#include <cm/engine/memory/inc/domain_type.h> +#include <cm/engine/memory/inc/memory.h> +#include <cm/engine/dsp/inc/dsp.h> + +/* These default domains are used for singleton only ! */ +#define DEFAULT_SVA_DOMAIN (t_cm_domain_id)1 +#define DEFAULT_SIA_DOMAIN (t_cm_domain_id)2 + +/*! + * \brief Domain type. + * \internal + * \ingroup CM_DOMAIN_API + */ +typedef enum { + DOMAIN_ANY = 0, + DOMAIN_NORMAL, + DOMAIN_SCRATCH_PARENT, + DOMAIN_SCRATCH_CHILD +} t_cm_domain_type; + +/*! + * \brief Domain descriptor. Holds offsets for all memory types present in the system. + * \internal + * \ingroup CM_DOMAIN_API + */ +typedef struct { + t_cm_domain_memory domain; // the actual memory ranges + t_cm_domain_type type; // domain type + t_uint32 refcount; // reference counter for scratch domain dependencies + t_nmf_client_id client; // client id for cleaning + + union { + struct { + t_memory_handle handle; // memory handle of the allocated chunk the covers the esram-data scratch region + } parent; + struct { + t_cm_allocator_desc *alloc; //allocator descriptor for the scratch domain + t_cm_domain_id parent_ref; //parent domain reference + } child; + } scratch; + void *dbgCooky; //pointer to OS internal data +} t_cm_domain_desc; + +#ifdef DEBUG +#define DOMAIN_DEBUG(handle) \ + handle = handle & ~0xc0; +#else +#define DOMAIN_DEBUG(handle) +#endif + +/*! + * \brief Domain descriptor array. + */ +extern t_cm_domain_desc domainDesc[]; + +typedef struct { + t_cm_domain_id parentId; + t_cm_domain_id domainId; + t_cm_allocator_desc *allocDesc; +} t_cm_domain_scratch_desc; + +extern t_cm_domain_scratch_desc domainScratchDesc[]; + +typedef struct { + t_cm_system_address sdramCode; + t_cm_system_address sdramData; + t_cm_system_address esramCode; + t_cm_system_address esramData; +} t_cm_domain_info; + +/*! + * \brief Init of the domain subsystem. + */ +PUBLIC t_cm_error cm_DM_Init(void); + +/*! + * \brief Clean-up of the domain subsystem. + */ +PUBLIC void cm_DM_Destroy(void); + +/*! + * \brief Domain creation. + * + * Allocates in slot in the domain descriptors array and copies segment infos from the domain + * parameter to the descriptor. The resulting handle is returned via @param handle. + * + * Returns: CM_DOMAIN_INVALID in case of error, otherwise CM_OK. + */ +PUBLIC t_cm_error cm_DM_CreateDomain(const t_nmf_client_id client, const t_cm_domain_memory *domain, t_cm_domain_id *handle); + +/*! + * \brief Scratch (or overlap) domain creation. + * + * Create a scratch domain, ie domain where allocation may overlap. + */ +PUBLIC t_cm_error cm_DM_CreateDomainScratch(const t_nmf_client_id client, const t_cm_domain_id parentId, const t_cm_domain_memory *domain, t_cm_domain_id *handle); + +/* ! + * \brief Retrieve the coreId from a given domain. Utility. + */ +PUBLIC t_nmf_core_id cm_DM_GetDomainCoreId(const t_cm_domain_id domainId); + +/*! + * \brief Destroy all domains belonging to a given client. + */ +PUBLIC t_cm_error cm_DM_DestroyDomains(const t_nmf_client_id client); + +/*! + * \brief Destroy a given domain. + */ +PUBLIC t_cm_error cm_DM_DestroyDomain(t_cm_domain_id handle); + +/*! + * \brief Check if the handle is valid. + */ +PUBLIC t_cm_error cm_DM_CheckDomain(t_cm_domain_id handle, t_cm_domain_type type); +PUBLIC t_cm_error cm_DM_CheckDomainWithClient(t_cm_domain_id handle, t_cm_domain_type type, t_nmf_client_id client); + +/*! + * \brief Memory allocation in a given domain, for a given memory type (see CM_AllocMpcMemory). + */ +PUBLIC t_memory_handle cm_DM_Alloc(t_cm_domain_id domainId, t_dsp_memory_type_id memType, t_uint32 size, t_cm_mpc_memory_alignment memAlignment, t_bool powerOn); + +/*! + * \brief Memory free using a given domain handle + */ +PUBLIC void cm_DM_FreeWithInfo(t_memory_handle memHandle, t_nmf_core_id *coreId, t_dsp_memory_type_id *memType, t_bool powerOff); + +/*! + * \brief Memory free using a given domain handle + */ +PUBLIC void cm_DM_Free(t_memory_handle memHandle, t_bool powerOff); + +/*! + * \brief Wrapper function for CM_GetMpcMemoryStatus. + */ +PUBLIC t_cm_error cm_DM_GetAllocatorStatus(t_cm_domain_id domainId, t_dsp_memory_type_id memType, t_cm_allocator_status *pStatus); + +PUBLIC t_cm_error cm_DM_GetDomainAbsAdresses(t_cm_domain_id domainId, t_cm_domain_info *info); + +/*! + * \brief Change the domain for the given allocated chunk + */ +PUBLIC void cm_DM_SetDefaultDomain(t_memory_handle memHandle, t_nmf_core_id coreId); +#endif /* DOMAIN_H_ */ diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/domain_type.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/domain_type.h new file mode 100644 index 00000000000..354315d4d72 --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/domain_type.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL), version 2, with + * user space exemption described in the top-level COPYING file in + * the Linux kernel source tree. + */ +/***************************************************************************/ +/* file : domain.h + * author : NMF team + * version : 1.0 + * + * brief : NMF domain definitions + */ +/***************************************************************************/ + +#ifndef DOMAIN_TYPE_H_ +#define DOMAIN_TYPE_H_ + +#include <cm/inc/cm_type.h> +#include <cm/engine/memory/inc/memory_type.h> + +/*! + * \brief Domain identifier + * \ingroup CM_DOMAIN_API + */ +typedef t_uint8 t_cm_domain_id; + +/*! + * \brief Client identifier + * 0 (zero) is considered as an invalid or 'NO' client identifier + * \ingroup CM_DOMAIN_API + */ +typedef t_uint32 t_nmf_client_id; +#define NMF_CORE_CLIENT (t_nmf_client_id)-1 +#define NMF_CURRENT_CLIENT (t_nmf_client_id)0 + +typedef struct { + t_uint32 offset; //!< offset relativ to segment start in memory (in bytes) + t_uint32 size; //!< size in bytes of the domain segment +} t_cm_domain_segment; + +/*! + * \brief Domain memory description structure + * \ingroup CM_DOMAIN_API + */ +typedef struct { + t_nmf_core_id coreId; //!< MMDSP Core Id for this domain (used for TCM-X and TCM-Y at instantiate) + t_cm_domain_segment esramCode; //!< ESRAM code segment + t_cm_domain_segment esramData; //!< ESRAM data segment + t_cm_domain_segment sdramCode; //!< SDRAM code segment + t_cm_domain_segment sdramData; //!< SDRAM data segment +} t_cm_domain_memory; + +#define INIT_DOMAIN_SEGMENT {0, 0} +#define INIT_DOMAIN {MASK_ALL8, INIT_DOMAIN_SEGMENT, INIT_DOMAIN_SEGMENT, INIT_DOMAIN_SEGMENT, INIT_DOMAIN_SEGMENT} + + +#endif /* DOMAIN_TYPE_H_ */ diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/memory.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/memory.h new file mode 100644 index 00000000000..c6d1fb2dfff --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/memory.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ +/*! + * \internal + * \brief Internal Memory Management API. + * + * \defgroup MEMORY_INTERNAL Private Memory API. + * + */ +#ifndef __INC_MEMORY_H +#define __INC_MEMORY_H + +#include <cm/engine/api/control/configuration_engine.h> +#include <cm/engine/memory/inc/remote_allocator.h> + +#endif /* __INC_MEMORY_H */ diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/memory_type.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/memory_type.h new file mode 100644 index 00000000000..dbdba4c3d9d --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/memory_type.h @@ -0,0 +1,151 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL), version 2, with + * user space exemption described in the top-level COPYING file in + * the Linux kernel source tree. + */ +/*! + * \brief Public Component Manager Memory API type. + * + * This file contains the Component Manager API type for manipulating memory. + */ +#ifndef __INC_MEMORY_TYPE_H +#define __INC_MEMORY_TYPE_H + +#include <cm/inc/cm_type.h> + +/*! + * @defgroup t_cm_mpc_memory_type t_cm_mpc_memory_type + * \brief Definition of symbols used to reference the various type of Media Processor Core adressable memory + * @{ + * \ingroup MEMORY + */ +typedef t_uint8 t_cm_mpc_memory_type; //!< Fake enumeration type +#define CM_MM_MPC_TCM16_X ((t_cm_mpc_memory_type)0) +#define CM_MM_MPC_TCM24_X ((t_cm_mpc_memory_type)1) +#define CM_MM_MPC_ESRAM16 ((t_cm_mpc_memory_type)2) +#define CM_MM_MPC_ESRAM24 ((t_cm_mpc_memory_type)3) +#define CM_MM_MPC_SDRAM16 ((t_cm_mpc_memory_type)4) +#define CM_MM_MPC_SDRAM24 ((t_cm_mpc_memory_type)5) +#define CM_MM_MPC_TCM16_Y ((t_cm_mpc_memory_type)6) +#define CM_MM_MPC_TCM24_Y ((t_cm_mpc_memory_type)7) +#define CM_MM_MPC_TCM16 CM_MM_MPC_TCM16_X +#define CM_MM_MPC_TCM24 CM_MM_MPC_TCM24_X + +/* @} */ + +/*! + * @defgroup t_cm_memory_alignment t_cm_memory_alignment + * \brief Definition of symbols used to constraint the alignment of the allocated memory + * @{ + * \ingroup MEMORY + */ +typedef t_uint16 t_cm_memory_alignment; //!< Fake enumeration type +#define CM_MM_ALIGN_NONE ((t_cm_memory_alignment)0x00000000) +#define CM_MM_ALIGN_BYTE ((t_cm_memory_alignment)CM_MM_ALIGN_NONE) +#define CM_MM_ALIGN_HALFWORD ((t_cm_memory_alignment)0x00000001) +#define CM_MM_ALIGN_WORD ((t_cm_memory_alignment)0x00000003) +#define CM_MM_ALIGN_2WORDS ((t_cm_memory_alignment)0x00000007) +#define CM_MM_ALIGN_16BYTES ((t_cm_memory_alignment)0x0000000F) +#define CM_MM_ALIGN_4WORDS ((t_cm_memory_alignment)0x0000000F) +#define CM_MM_ALIGN_AHB_BURST ((t_cm_memory_alignment)0x0000000F) +#define CM_MM_ALIGN_32BYTES ((t_cm_memory_alignment)0x0000001F) +#define CM_MM_ALIGN_8WORDS ((t_cm_memory_alignment)0x0000001F) +#define CM_MM_ALIGN_64BYTES ((t_cm_memory_alignment)0x0000003F) +#define CM_MM_ALIGN_16WORDS ((t_cm_memory_alignment)0x0000003F) +#define CM_MM_ALIGN_128BYTES ((t_cm_memory_alignment)0x0000007F) +#define CM_MM_ALIGN_32WORDS ((t_cm_memory_alignment)0x0000007F) +#define CM_MM_ALIGN_256BYTES ((t_cm_memory_alignment)0x000000FF) +#define CM_MM_ALIGN_64WORDS ((t_cm_memory_alignment)0x000000FF) +#define CM_MM_ALIGN_512BYTES ((t_cm_memory_alignment)0x000001FF) +#define CM_MM_ALIGN_128WORDS ((t_cm_memory_alignment)0x000001FF) +#define CM_MM_ALIGN_1024BYTES ((t_cm_memory_alignment)0x000003FF) +#define CM_MM_ALIGN_256WORDS ((t_cm_memory_alignment)0x000003FF) +#define CM_MM_ALIGN_2048BYTES ((t_cm_memory_alignment)0x000007FF) +#define CM_MM_ALIGN_512WORDS ((t_cm_memory_alignment)0x000007FF) +#define CM_MM_ALIGN_4096BYTES ((t_cm_memory_alignment)0x00000FFF) +#define CM_MM_ALIGN_1024WORDS ((t_cm_memory_alignment)0x00000FFF) +#define CM_MM_ALIGN_65536BYTES ((t_cm_memory_alignment)0x0000FFFF) +#define CM_MM_ALIGN_16384WORDS ((t_cm_memory_alignment)0x0000FFFF) +/* @} */ + +/*! + * @defgroup t_cm_mpc_memory_alignment t_cm_mpc_memory_alignment + * \brief Definition of symbols used to constraint the alignment of the allocated mpc memory + * @{ + * \ingroup MEMORY + */ +typedef t_uint16 t_cm_mpc_memory_alignment; //!< Fake enumeration type +#define CM_MM_MPC_ALIGN_NONE ((t_cm_mpc_memory_alignment)0x00000000) +#define CM_MM_MPC_ALIGN_HALFWORD ((t_cm_mpc_memory_alignment)0x00000001) +#define CM_MM_MPC_ALIGN_WORD ((t_cm_mpc_memory_alignment)0x00000003) +#define CM_MM_MPC_ALIGN_2WORDS ((t_cm_mpc_memory_alignment)0x00000007) +#define CM_MM_MPC_ALIGN_4WORDS ((t_cm_mpc_memory_alignment)0x0000000F) +#define CM_MM_MPC_ALIGN_8WORDS ((t_cm_mpc_memory_alignment)0x0000001F) +#define CM_MM_MPC_ALIGN_16WORDS ((t_cm_mpc_memory_alignment)0x0000003F) +#define CM_MM_MPC_ALIGN_32WORDS ((t_cm_mpc_memory_alignment)0x0000007F) +#define CM_MM_MPC_ALIGN_64WORDS ((t_cm_mpc_memory_alignment)0x000000FF) +#define CM_MM_MPC_ALIGN_128WORDS ((t_cm_mpc_memory_alignment)0x000001FF) +#define CM_MM_MPC_ALIGN_256WORDS ((t_cm_mpc_memory_alignment)0x000003FF) +#define CM_MM_MPC_ALIGN_512WORDS ((t_cm_mpc_memory_alignment)0x000007FF) +#define CM_MM_MPC_ALIGN_1024WORDS ((t_cm_mpc_memory_alignment)0x00000FFF) +#define CM_MM_MPC_ALIGN_65536BYTES ((t_cm_mpc_memory_alignment)0x0000FFFF) +#define CM_MM_MPC_ALIGN_16384WORDS ((t_cm_mpc_memory_alignment)0x0000FFFF) +/* @} */ + +/*! + * \brief Identifier of a memory handle + * \ingroup MEMORY + */ +typedef t_uint32 t_cm_memory_handle; + +/*! + * \brief Description of a memory segment + * + * <=> allocable addressable space + * \ingroup MEMORY + */ +typedef struct { + t_cm_system_address systemAddr; //!< Logical AND physical segment start address + t_uint32 size; //!< segment size (in bytes) +} t_nmf_memory_segment; +#define INIT_MEMORY_SEGMENT {{0, 0}, 0} + +/*! + * \brief Definition of structure used for an allocator status + * \ingroup MEMORY + */ +typedef struct +{ + struct { + t_uint32 size; //!< size of the allocator + /* Block counters */ + t_uint16 used_block_number; //!< used block number + t_uint16 free_block_number; //!< free block number + + /* Free memory min/max */ + t_uint32 maximum_free_size; //!< maximum free size + t_uint32 minimum_free_size; //!< minimum free size + + /* Accumulation of free and used memory */ + t_uint32 accumulate_free_memory; //!< accumulate free memory + t_uint32 accumulate_used_memory; //!< accumulate used memory + } global; + + struct { + t_uint32 size; //!< size of the domain + t_uint32 maximum_free_size; //!< maximum free size in the given domain + t_uint32 minimum_free_size; //!< minimum free size in the given domain + t_uint32 accumulate_free_memory; //all free memory of the given domain + t_uint32 accumulate_used_memory; //all used memory of the given domain + } domain; + + struct { + t_uint32 sizes[3]; + } stack[NB_CORE_IDS]; + +} t_cm_allocator_status; + +#endif /* __INC_MEMORY_TYPE_H */ + diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/migration.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/migration.h new file mode 100644 index 00000000000..824d25374b3 --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/migration.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ +/*! + * \internal + * \brief Migration API. + * + * \defgroup + * + */ +#ifndef __INC_MIGRATION_H +#define __INC_MIGRATION_H + +#include <cm/engine/memory/inc/domain_type.h> +#include <cm/engine/dsp/inc/dsp.h> + +typedef enum { + STATE_MIGRATED = 1, + STATE_NORMAL = 0, +} t_cm_migration_state; + +PUBLIC t_cm_error cm_migrate(const t_cm_domain_id srcShared, const t_cm_domain_id src, const t_cm_domain_id dst); + +PUBLIC t_cm_error cm_unmigrate(void); + +PUBLIC t_uint32 cm_migration_translate(t_dsp_segment_type segmentType, t_uint32 addr); + +PUBLIC void cm_migration_check_state(t_nmf_core_id coreId, t_cm_migration_state expected); + +#endif /* __INC_MIGRATION_H */ diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/remote_allocator.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/remote_allocator.h new file mode 100644 index 00000000000..36994f37daa --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/remote_allocator.h @@ -0,0 +1,275 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ +/** + * \internal + * + * \note: In this module, we assume that parameters were checked !! + */ +#ifndef __REMOTE_ALLOCATOR_H_ +#define __REMOTE_ALLOCATOR_H_ + +/* + * Include + */ +#include <cm/inc/cm_type.h> +#include <cm/engine/memory/inc/memory_type.h> + + +/* + * Description of the memory block status + */ +typedef enum { + MEM_USED = 0, /* Memory block is used */ + MEM_FREE = 1 /* Memory block is free */ +} t_mem_status; + +/* + * Chunk structure. + */ +struct cm_allocator_desc; +typedef struct chunk_struct +{ + /* Double linked list of chunks */ + struct chunk_struct *prev; + struct chunk_struct *next; + + /* Double linked list of free memory */ + struct chunk_struct *prev_free_mem; + struct chunk_struct *next_free_mem; + + /* Offset of the block memory */ + t_uint32 offset; + + /* Size of the block memory */ + t_cm_size size; + + /* Status of the block memory */ + t_mem_status status; + + /* User data */ + t_uint16 userData; + + /* Alloc debug info*/ + t_uint32 domainId; + + /* Alloc desc backlink */ + struct cm_allocator_desc *alloc; +} t_cm_chunk; + +/*! + * \brief Identifier of an internal memory handle + * \ingroup MEMORY_INTERNAL + */ +typedef t_cm_chunk* t_memory_handle; + +#define INVALID_MEMORY_HANDLE ((t_cm_chunk*)NULL) + + +/* + * Context structure + */ +#define BINS 63 + +//TODO, juraj, add memType to alloc struct ? +typedef struct cm_allocator_desc { + const char *pAllocName; /* Name of the allocator */ + t_uint32 maxSize; /* Max size of the allocator -> Potentially increase/decrease by stack management */ + t_uint32 sbrkSize; /* Current size of allocator */ + t_uint32 offset; /* Offset of the allocator */ + t_cm_chunk *chunks; /* Array of chunk */ + t_cm_chunk *lastChunk; /* Null terminated last chunk of previous array declaration */ + t_cm_chunk *free_mem_chunks[BINS]; /* List of free memory */ + struct cm_allocator_desc* next; /* List of allocator */ +} t_cm_allocator_desc; + +int bin_index(unsigned int sz); + +/* + * Functions + */ +/*! + * \brief Create a new allocator for a piece of memory (hw mapped (xram, yram)) + * Any further allocation into this piece of memory will return an offset inside it. + * (a constant offset value can be added to this offset) + * + * \retval t_cm_allocator_desc* new memory allocator identifier + * + * \ingroup MEMORY_INTERNAL + */ +PUBLIC t_cm_allocator_desc* cm_MM_CreateAllocator( + t_cm_size size, //!< [in] Size of the addressable space in bytes + t_uint32 offset, //!< [in] Constant offset to add to each allocated block base address + const char* name //!< [in] Name of the allocator + ); + +/*! + * \brief Free a memory allocator descriptor + * + * \retval t_cm_error + * + * \ingroup MEMORY_INTERNAL + */ +PUBLIC t_cm_error cm_MM_DeleteAllocator( + t_cm_allocator_desc* alloc //!< [in] Identifier of the memory allocator to be freed + ); + + +/*! + * \brief Resize an allocator to the size value. + * + * \retval t_cm_error + * + * \ingroup MEMORY_INTERNAL + */ +PUBLIC t_cm_error cm_MM_ResizeAllocator( + t_cm_allocator_desc* alloc, //!< [in] Identifier of the memory allocator used to allocate the piece of memory + t_cm_size size //!< [in] Size of the addressable space in allocDesc granularity + ); + +/*! + * \brief Check validity of a user handle + */ +t_cm_error cm_MM_getValidMemoryHandle(t_cm_memory_handle handle, t_memory_handle* validHandle); + +/*! + * \brief Wrapper routine to allocate some memory into a given allocator + * + * \retval t_memory_handle handle on the new allocated piece of memory + * + * \ingroup MEMORY_INTERNAL + */ +PUBLIC t_memory_handle cm_MM_Alloc( + t_cm_allocator_desc* alloc, //!< [in] Identifier of the memory allocator + t_cm_size size, //!< [in] Size of the addressable space + t_cm_memory_alignment memAlignment, //!< [in] Alignment constraint + t_uint32 seg_offset, //!< [in] Offset of range where allocating + t_uint32 seg_size, //!< [in] Size of range where allocating + t_uint32 domainId + ); + + +/*! + * \brief Routine to reallocate memory for a given handle + * + * Routine to reallocate memory for a given handle. The chunk can be extended or shrinked in both + * directions - top and bottom, depending on the offset and size arguments. + * + * \retval t_memory_handle handle on the reallocated piece of memory + * + * \ingroup MEMORY_INTERNAL + */ +PUBLIC t_cm_error cm_MM_Realloc( + t_cm_allocator_desc* alloc, + const t_cm_size size, + const t_uint32 offset, + t_memory_handle *handle); +/*! + * \brief Frees the allocated chunk + * + * \ingroup MEMORY_INTERNAL + */ +PUBLIC void cm_MM_Free( + t_cm_allocator_desc* alloc, //!< [in] Identifier of the memory allocator + t_memory_handle memHandle //!< [in] Memory handle to free + ); + + +/*! + * \brief Get the allocator status + * + * \param[in] alloc Identifier of the memory allocator + * \param[out] pStatus Status of the allocator + * + * \retval t_cm_error + * + * \ingroup MEMORY_INTERNAL + */ +PUBLIC t_cm_error cm_MM_GetAllocatorStatus(t_cm_allocator_desc* alloc, t_uint32 offset, t_uint32 size, t_cm_allocator_status *pStatus); + +/*! + * \brief Returns the offset into a given memory allocator of an allocated piece of memory + * + * \param[in] memHandle handle on the given memory + * + * \retval t_uint32 offset into the given memory allocator + * + * \ingroup MEMORY_INTERNAL + */ +PUBLIC t_uint32 cm_MM_GetOffset(t_memory_handle memHandle); + + +/*! + * \brief Returns the size in word size for a given memory allocator of an allocated piece of memory + * + * \param[in] memHandle handle on the given memory + * + * \retval t_uint32 size in wordsize for the given memory allocator + * + * \ingroup MEMORY_INTERNAL + */ +PUBLIC t_uint32 cm_MM_GetSize(t_memory_handle memHandle); + +/*! + * \brief Returns the size in bytes for a given memory allocator + * + * \param[in] allocDesc Identifier of the memory allocator + * \retval size + * + * \ingroup MEMORY_INTERNAL + */ +PUBLIC t_uint32 cm_MM_GetAllocatorSize(t_cm_allocator_desc* allocDesc); + + +/*! + * \brief Set the user data of an allocated piece of memory + * + * \param[in] memHandle handle on the given memory + * \param[in] userData UsedData of the given memory piece + * + * \retval t_cm_error + * + * \ingroup MEMORY_INTERNAL + */ +PUBLIC void cm_MM_SetMemoryHandleUserData (t_memory_handle memHandle, t_uint16 userData); + + +/*! + * \brief Return the user data of an allocated piece of memory + * + * \param[in] memHandle handle on the given memory + * \param[out] pUserData returned UsedData of the given memory piece + * + * \retval t_cm_error + * + * \ingroup MEMORY_INTERNAL + */ +PUBLIC void cm_MM_GetMemoryHandleUserData(t_memory_handle memHandle, t_uint16 *pUserData, t_cm_allocator_desc **alloc); + +/*! + * \brief Dump chunkd in the range of [start:end] + * + * \param[in] alloc Allocator descriptor + * \param[in] start Range start + * \param[in] end Range end + * + * \retval void + * + * \ingroup MEMORY_INTERNAL + */ +PUBLIC void cm_MM_DumpMemory(t_cm_allocator_desc* alloc, t_uint32 start, t_uint32 end); + +/*! + * \brief Change the domain for the given chunk of memory + * + * \param[in] memHandle The given chunk of memory + * \param[in] domainId The new domain id to set + * + * \retval void + * + * \ingroup MEMORY_INTERNAL + */ +PUBLIC void cm_MM_SetDefaultDomain(t_memory_handle memHandle, t_uint32 domainId); +#endif /* _REMOTE_ALLOCATOR_H_*/ diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/remote_allocator_utils.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/remote_allocator_utils.h new file mode 100644 index 00000000000..0a7c901187b --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/remote_allocator_utils.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ +/** + * \internal + */ +#ifndef REMOTE_ALLOCATOR_UTILS_H_ +#define REMOTE_ALLOCATOR_UTILS_H_ + +#include <cm/engine/memory/inc/remote_allocator.h> +#include <cm/engine/memory/inc/chunk_mgr.h> + +typedef enum { + FREE_CHUNK_BEFORE, + FREE_CHUNK_AFTER, +} t_mem_split_position; + + +PUBLIC void updateFreeList(t_cm_allocator_desc* alloc, t_cm_chunk* chunk); + +PUBLIC void linkChunk(t_cm_allocator_desc* alloc, t_cm_chunk* prev,t_cm_chunk* add); +PUBLIC void unlinkChunk(t_cm_allocator_desc* alloc,t_cm_chunk* current); + +PUBLIC void unlinkFreeMem(t_cm_allocator_desc* alloc,t_cm_chunk* current); +PUBLIC void linkFreeMemBefore(t_cm_chunk* add, t_cm_chunk* next); +PUBLIC void linkFreeMemAfter(t_cm_chunk* prev,t_cm_chunk* add); + +PUBLIC t_cm_chunk* splitChunk(t_cm_allocator_desc* alloc, t_cm_chunk *chunk, t_uint32 offset, t_mem_split_position position); + +#endif /*REMOTE_ALLOCATOR_UTILS_H_*/ diff --git a/drivers/staging/nmf-cm/cm/engine/memory/src/chunk_mgr.c b/drivers/staging/nmf-cm/cm/engine/memory/src/chunk_mgr.c new file mode 100644 index 00000000000..78a549a74ce --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/src/chunk_mgr.c @@ -0,0 +1,97 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ +/* + * Include + */ +#include <cm/inc/cm_type.h> +#include "../inc/chunk_mgr.h" +#include <cm/engine/trace/inc/trace.h> + +#define CHUNKS_PER_PAGE 500 +#define CHUNK_THRESOLD 5 + +struct t_page_chuncks { + struct t_page_chuncks *nextPage; + // unsigned int freeChunkInPage; + t_cm_chunk chunks[CHUNKS_PER_PAGE]; +}; + +static struct t_page_chuncks *firstPage = 0; + +static unsigned int freeChunks = 0; +static t_cm_chunk *firstFreeChunk = 0; + +t_cm_chunk* allocChunk() +{ + t_cm_chunk* chunk = firstFreeChunk; + + firstFreeChunk = chunk->next; + + chunk->next_free_mem = 0; + chunk->prev_free_mem = 0; + chunk->prev = 0; + chunk->next = 0; + chunk->status = MEM_FREE; + // chunk->offset = 0; + // chunk->size = 0; + // chunk->alloc = 0; + // chunk->userData = 0; + + freeChunks--; + + return chunk; +} + +void freeChunk(t_cm_chunk* chunk) +{ + // Link chunk in free list + chunk->next = firstFreeChunk; + firstFreeChunk = chunk; + + // Increase counter + freeChunks++; +} + +t_cm_error allocChunkPool(void) +{ + struct t_page_chuncks* newPage; + int i; + + newPage = (struct t_page_chuncks*)OSAL_Alloc(sizeof(struct t_page_chuncks)); + if(newPage == NULL) + return CM_NO_MORE_MEMORY; + + // Link page + newPage->nextPage = firstPage; + firstPage = newPage; + + // Put chunk in free list + for(i = 0; i < CHUNKS_PER_PAGE; i++) + freeChunk(&newPage->chunks[i]); + + return CM_OK; +} + +t_cm_error fillChunkPool(void) +{ + if(freeChunks < CHUNK_THRESOLD) + return allocChunkPool(); + + return CM_OK; +} + +void freeChunkPool(void) +{ + while(firstPage != NULL) + { + struct t_page_chuncks* tofree = firstPage; + firstPage = firstPage->nextPage; + OSAL_Free(tofree); + } + + firstFreeChunk = 0; + freeChunks = 0; +} diff --git a/drivers/staging/nmf-cm/cm/engine/memory/src/domain.c b/drivers/staging/nmf-cm/cm/engine/memory/src/domain.c new file mode 100644 index 00000000000..a605cc475a9 --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/src/domain.c @@ -0,0 +1,608 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ +#include <cm/inc/cm_type.h> +#include <inc/nmf-limits.h> + +#include <cm/engine/memory/inc/domain.h> +#include <cm/engine/memory/inc/migration.h> +#include <cm/engine/memory/inc/chunk_mgr.h> +#include <cm/engine/trace/inc/trace.h> +#include <cm/engine/dsp/inc/dsp.h> +#include <cm/engine/component/inc/instance.h> +#include <cm/engine/power_mgt/inc/power.h> +#include <cm/engine/trace/inc/trace.h> + +/* + * domain_memory structure is all we need + */ +#define MAX_USER_DOMAIN_NB 64 +#define MAX_SCRATCH_DOMAIN_NB 16 + +t_cm_domain_desc domainDesc[MAX_USER_DOMAIN_NB]; +t_cm_domain_scratch_desc domainScratchDesc[MAX_SCRATCH_DOMAIN_NB]; + +static t_cm_allocator_desc *cm_DM_getAllocator(t_cm_domain_id domainId, t_dsp_memory_type_id memType); +static void cm_DM_DomainError(const t_cm_domain_id parentId, const t_nmf_client_id client); + +#define INIT_DOMAIN_STRUCT(domainDesc) do { \ + domainDesc.client = 0; \ + domainDesc.type = DOMAIN_NORMAL; \ + domainDesc.refcount = 0; \ + domainDesc.domain.coreId = MASK_ALL8; \ + domainDesc.domain.esramCode.offset = 0; \ + domainDesc.domain.esramCode.size = 0; \ + domainDesc.domain.esramData.offset = 0; \ + domainDesc.domain.esramData.size = 0; \ + domainDesc.domain.sdramCode.offset = 0; \ + domainDesc.domain.sdramCode.size = 0; \ + domainDesc.domain.sdramData.offset = 0; \ + domainDesc.domain.sdramData.size = 0; \ + domainDesc.scratch.parent.handle = 0; \ + domainDesc.scratch.child.alloc = 0; \ + domainDesc.scratch.child.parent_ref = 0; \ + domainDesc.dbgCooky = NULL; \ + } while (0) + +#define FIND_DOMAIN_ID(domainId) \ + { \ + domainId = 0; \ + while (domainDesc[domainId].client != 0 && domainId < MAX_USER_DOMAIN_NB) { \ + domainId++; \ + } \ + if (domainId >= MAX_USER_DOMAIN_NB) { \ + return CM_INTERNAL_DOMAIN_OVERFLOW; \ + } \ + } + +#define FIND_SCRATCH_DOMAIN_ID(domainId) \ + { \ + domainId = 0; \ + while (domainScratchDesc[domainId].allocDesc != 0 && domainId < MAX_SCRATCH_DOMAIN_NB) { \ + domainId++; \ + } \ + if (domainId >= MAX_SCRATCH_DOMAIN_NB) { \ + return CM_INTERNAL_DOMAIN_OVERFLOW; \ + } \ + } + +PUBLIC t_cm_error cm_DM_CheckDomain(t_cm_domain_id handle, t_cm_domain_type type) +{ + if ((handle <= 3) + || (handle >= MAX_USER_DOMAIN_NB)) { //remember, domain[0-3] are reserved + return CM_INVALID_DOMAIN_HANDLE; + } + + if (domainDesc[handle].client == 0) { + return CM_INVALID_DOMAIN_HANDLE; + } + + if (type != DOMAIN_ANY) { + if (domainDesc[handle].type != type) { + return CM_INVALID_DOMAIN_HANDLE; + } + } + + return CM_OK; +} + +PUBLIC t_cm_error cm_DM_CheckDomainWithClient(t_cm_domain_id handle, t_cm_domain_type type, t_nmf_client_id client) +{ + t_cm_error error; + + if((error = cm_DM_CheckDomain(handle, type)) != CM_OK) + return error; + +#ifdef CHECK_TO_BE_REACTIVATED_IN_2_11 + if(domainDesc[handle].client != client) + { + ERROR("CM_DOMAIN_VIOLATION: domain %d created by client %d not usable by client %d.", handle, domainDesc[handle].client, client, 0, 0, 0); + return CM_DOMAIN_VIOLATION; + } +#endif + + return CM_OK; +} + +PUBLIC t_cm_error cm_DM_Init(void) +{ + t_cm_error error; + + int i = 0; + for(i = 0; i < MAX_USER_DOMAIN_NB; i++) { + INIT_DOMAIN_STRUCT(domainDesc[i]); + } + + //domains[0-3] are reserved - allows to catch some cases of incorrect usage, + //especially when user uses coreId instead of domainId, ie id = 1, 2, 3 + domainDesc[0].client = NMF_CORE_CLIENT; + domainDesc[1].client = NMF_CORE_CLIENT; + domainDesc[2].client = NMF_CORE_CLIENT; + domainDesc[3].client = NMF_CORE_CLIENT; + + /* We use domain 1 and 2 for the singleton, only used for components structure */ + domainDesc[DEFAULT_SVA_DOMAIN].type = DOMAIN_NORMAL; + domainDesc[DEFAULT_SVA_DOMAIN].domain.coreId= SVA_CORE_ID; + domainDesc[DEFAULT_SVA_DOMAIN].domain.esramCode.size = (t_uint32)-1; + domainDesc[DEFAULT_SVA_DOMAIN].domain.esramData.size = (t_uint32)-1; + domainDesc[DEFAULT_SVA_DOMAIN].domain.sdramCode.size = (t_uint32)-1; + domainDesc[DEFAULT_SVA_DOMAIN].domain.sdramData.size = (t_uint32)-1; + domainDesc[DEFAULT_SIA_DOMAIN].type = DOMAIN_NORMAL; + domainDesc[DEFAULT_SIA_DOMAIN].domain.coreId= SIA_CORE_ID; + domainDesc[DEFAULT_SIA_DOMAIN].domain.esramCode.size = (t_uint32)-1; + domainDesc[DEFAULT_SIA_DOMAIN].domain.esramData.size = (t_uint32)-1; + domainDesc[DEFAULT_SIA_DOMAIN].domain.sdramCode.size = (t_uint32)-1; + domainDesc[DEFAULT_SIA_DOMAIN].domain.sdramData.size = (t_uint32)-1; + + for(i = 0; i < MAX_SCRATCH_DOMAIN_NB; i++) { + domainScratchDesc[i].domainId = 0; + domainScratchDesc[i].parentId = 0; + domainScratchDesc[i].allocDesc = 0; + } + + // Alloc twice for having comfortable chunk + if((error = allocChunkPool()) != CM_OK) + return error; + if((error = allocChunkPool()) != CM_OK) + { + freeChunkPool(); + return error; + } + + return CM_OK; +} + +PUBLIC void cm_DM_Destroy(void) +{ + //cm_DM_Init(); + freeChunkPool(); +} + +PUBLIC t_nmf_core_id cm_DM_GetDomainCoreId(const t_cm_domain_id domainId) +{ + + return domainDesc[domainId].domain.coreId; +} + +#if 0 +static t_uint32 cm_DM_isSegmentOverlaping(const t_cm_domain_segment *d0, const t_cm_domain_segment *d1) +{ + t_uint32 min0 = d0->offset; + t_uint32 max0 = d0->offset + d0->size; + t_uint32 min1 = d1->offset; + t_uint32 max1 = d1->offset + d1->size; + + if ( (min0 < min1) && (min1 < max0) ){ /* min0 < min1 < max0 OR min1 in [min0:max0] */ + return 1; + } + if ( (min1 < min0) && (min0 <= max1) ){ /* min1 < min0 < max0 OR min0 in [min1:max1] */ + return 1; + } + + return 0; +} +{ + ... + + t_uint32 i; + //check non-overlapp with other domains + for (i = 0; i < MAX_USER_DOMAIN_NB; i++) { + if (domainDesc[i].client != 0) { + if (cm_DM_isSegmentOverlaping(&domainDesc[i].domain.esramData, &domain->esramData)) { + return CM_DOMAIN_OVERLAP; + } + /* + if (cm_DM_isSegmentOverlaping(&domainDesc[i].domain.esramData, &domain->esramData)) { + return CM_DOMAIN_OVERLAP; + } + */ + } + } + + ... +} +#endif + +PUBLIC t_cm_error cm_DM_CreateDomain(const t_nmf_client_id client, const t_cm_domain_memory *domain, t_cm_domain_id *handle) +{ + t_cm_domain_id domainId; + FIND_DOMAIN_ID(domainId); + + if (client == 0) + return CM_INVALID_PARAMETER; + + if (domain->coreId > LAST_CORE_ID) + return CM_INVALID_DOMAIN_DEFINITION; + + //FIXME, juraj, check invalid domain definition + domainDesc[domainId].client = client; + domainDesc[domainId].domain = *domain; + + if (osal_debug_ops.domain_create) + osal_debug_ops.domain_create(domainId); + + *handle = domainId; + + return CM_OK; +} + +//TODO, juraj, add assert to cm_MM_GetOffset(), if domain is scratch parent +PUBLIC t_cm_error cm_DM_CreateDomainScratch(const t_nmf_client_id client, const t_cm_domain_id parentId, const t_cm_domain_memory *domain, t_cm_domain_id *handle) +{ + t_cm_error error; + t_memory_handle memhandle; + t_cm_allocator_desc *alloc; + t_uint32 parentMin, parentMax; + t_uint32 scratchMin, scratchMax; + + /* check if the parent domain exists */ + /* parent could be DOMAIN_NORMAL (1st call) or DOMAIN_SCRATCH_PARENT (other calls) */ + if ((error = cm_DM_CheckDomain(parentId, DOMAIN_ANY)) != CM_OK) { + return error; + } + + parentMin = domainDesc[parentId].domain.esramData.offset; + parentMax = domainDesc[parentId].domain.esramData.offset + domainDesc[parentId].domain.esramData.size; + scratchMin = domain->esramData.offset; + scratchMax = domain->esramData.offset + domain->esramData.size; + /* check if the scratch domain respects the parent domain (esram data only )*/ + if ( (parentMin > scratchMin) || (parentMax < scratchMax) ) { + return CM_INVALID_DOMAIN_DEFINITION; + } + + /* create the scratch domain */ + if ((error = cm_DM_CreateDomain(client, domain, handle)) != CM_OK) { + return error; + } + + /* check if this is the first scratch domain */ + if (domainDesc[parentId].scratch.parent.handle == 0) { + /* 1st scratch domain */ + t_cm_domain_segment tmp; + + /* reserve the zone for the scratch domain */ + tmp = domainDesc[parentId].domain.esramData; + domainDesc[parentId].domain.esramData = domain->esramData; + memhandle = cm_DM_Alloc(parentId, ESRAM_EXT16, domain->esramData.size / 2, CM_MM_ALIGN_NONE, FALSE); //note byte to 16bit-word conversion + domainDesc[parentId].domain.esramData = tmp; + if (memhandle == 0) { + cm_DM_DestroyDomain(*handle); + cm_DM_DomainError(parentId, client); + return CM_NO_MORE_MEMORY; + } + + domainDesc[parentId].type = DOMAIN_SCRATCH_PARENT; + domainDesc[parentId].refcount = 0; //reinit the refcount + domainDesc[parentId].scratch.parent.handle = memhandle; + + } else { + /* nth scratch domain */ + t_uint32 i; + t_uint32 oldMin = domainDesc[parentId].domain.esramData.offset + domainDesc[parentId].domain.esramData.offset; + t_uint32 oldMax = 0; + + /* compute the new scratch zone size */ + for(i = 0; i < MAX_USER_DOMAIN_NB; i++) { + if ((domainDesc[i].type == DOMAIN_SCRATCH_CHILD) && (domainDesc[i].scratch.child.parent_ref == parentId)) { + /* ok, here we have a scratch domain created from the same child domain */ + t_uint32 min = domainDesc[i].domain.esramData.offset; + t_uint32 max = domainDesc[i].domain.esramData.offset + domainDesc[i].domain.esramData.size; + + oldMin = (min < oldMin)?min:oldMin; + oldMax = (max > oldMax)?max:oldMax; + } + } + + /* resize the scratch zone */ + if ((oldMin > scratchMin) || (oldMax < scratchMax)) { + t_uint32 newMin = (oldMin > scratchMin)?scratchMin:oldMin; + t_uint32 newMax = (oldMax < scratchMax)?scratchMax:oldMax; + + if(cm_MM_Realloc(cm_DM_getAllocator(parentId, ESRAM_EXT16), newMax - newMin, newMin, + &domainDesc[parentId].scratch.parent.handle) != CM_OK) + { + /* failed to extend the zone */ + cm_DM_DestroyDomain(*handle); + cm_DM_DomainError(parentId, client); + return CM_NO_MORE_MEMORY; + } + } + } + + /* create esram-data allocator in the scratch domain */ + alloc = cm_MM_CreateAllocator(domainDesc[*handle].domain.esramData.size, + domainDesc[*handle].domain.esramData.offset, + "scratch"); + + domainDesc[*handle].type = DOMAIN_SCRATCH_CHILD; + domainDesc[*handle].scratch.child.parent_ref = parentId; + domainDesc[*handle].scratch.child.alloc = alloc; + domainDesc[parentId].refcount++; + + return error; +} + +PUBLIC t_cm_error cm_DM_DestroyDomains(const t_nmf_client_id client) +{ + t_cm_domain_id handle; + t_cm_error error, status=CM_OK; + + for (handle=0; handle<MAX_USER_DOMAIN_NB; handle++) { + if ((domainDesc[handle].client == client) + && ((error=cm_DM_DestroyDomain(handle)) != CM_OK)) { + LOG_INTERNAL(0, "Error (%d) destroying remaining domainId %d for client %u\n", error, handle, client, 0, 0, 0); + status = error; + } + } + return status; +} + +PUBLIC t_cm_error cm_DM_DestroyDomain(t_cm_domain_id handle) +{ + t_cm_error error = CM_OK; + t_uint32 i; + + if ((error = cm_DM_CheckDomain(handle, DOMAIN_ANY)) != CM_OK) { + return error; + } + + //forbid destruction of cm domains + //if (handle == cm_DSP_GetState(domainDesc[handle].domain.coreId)->domainEE) + // return CM_INVALID_DOMAIN_HANDLE; + + /* loop all components and check if there are still components instantiated with this handle */ + //actually this check is redundant with the usage counters as component instantiations allocate memory + for (i=0; i<ComponentTable.idxMax; i++) + { + if (NULL != componentEntry(i) && componentEntry(i)->domainId == handle) { + return CM_ILLEGAL_DOMAIN_OPERATION; + } + } + + //perform check based on usage counters + if (domainDesc[handle].refcount != 0) { + return CM_ILLEGAL_DOMAIN_OPERATION; + } + + if (domainDesc[handle].type == DOMAIN_SCRATCH_PARENT) { + return CM_ILLEGAL_DOMAIN_OPERATION; //parent destroyed implicitly with the last scratch + } else if (domainDesc[handle].type == DOMAIN_SCRATCH_CHILD) { + t_cm_allocator_status status; + t_cm_domain_id parentId = domainDesc[handle].scratch.child.parent_ref; + + cm_MM_GetAllocatorStatus(domainDesc[handle].scratch.child.alloc, 0, 0xffff, &status); + if (status.global.accumulate_used_memory != 0) { + //something is still allocated + return CM_ILLEGAL_DOMAIN_OPERATION; + } + + domainDesc[parentId].refcount--; + cm_MM_DeleteAllocator(domainDesc[handle].scratch.child.alloc); //returns no error + + if (domainDesc[parentId].refcount == 0) { + /* last scratch domain */ + cm_DM_Free(domainDesc[parentId].scratch.parent.handle, FALSE); + domainDesc[parentId].scratch.parent.handle = 0; + domainDesc[parentId].type = DOMAIN_NORMAL; + } else { + /* other child scratch domains exist, check if the reserved zone needs resize, ie reduce */ + + t_uint32 i; + /* init oldMin and oldMax to values we are sure will get overwritten below */ + t_uint32 oldMin = 0xffffffff; + t_uint32 oldMax = 0x0; + t_uint32 scratchMin = domainDesc[handle].domain.esramData.offset; + t_uint32 scratchMax = domainDesc[handle].domain.esramData.offset + domainDesc[handle].domain.esramData.size; + + /* compute the remaining reserved zone size */ + for(i = 0; i < MAX_USER_DOMAIN_NB; i++) { + if (i == handle) + continue; //do not consider the current domain to be destroyed later in this function + if ((domainDesc[i].type == DOMAIN_SCRATCH_CHILD) && (domainDesc[i].scratch.child.parent_ref == parentId)) { + /* ok, here we have a scratch domain created from the same child domain */ + t_uint32 min = domainDesc[i].domain.esramData.offset; + t_uint32 max = domainDesc[i].domain.esramData.offset + domainDesc[i].domain.esramData.size; + + oldMin = (min < oldMin)?min:oldMin; + oldMax = (max > oldMax)?max:oldMax; + } + } + + /* resize the scratch zone */ + if ((oldMin > scratchMin) || (oldMax < scratchMax)) { + CM_ASSERT(cm_MM_Realloc(cm_DM_getAllocator(parentId, ESRAM_EXT16), oldMax - oldMin, oldMin, + &domainDesc[parentId].scratch.parent.handle) == CM_OK); //the realloc shouldn't fail.. + } + } + } + + if (osal_debug_ops.domain_destroy) + osal_debug_ops.domain_destroy(handle); + + //reset the domain desc + INIT_DOMAIN_STRUCT(domainDesc[handle]); + + return CM_OK; +} + +/* + * - if the domainId is scratch parent, all allocations are done as in normal domains + * - if the domainId is scratch child + * if allocation type is esram, retrieve the allocator from the domainDesc + * else allocation is done as for normal domain + * - if the domainId is normal, allocator is retrieved from mpcDesc via cm_DSP_GetAllocator() + */ +static t_cm_allocator_desc *cm_DM_getAllocator(t_cm_domain_id domainId, t_dsp_memory_type_id memType) +{ + t_cm_allocator_desc *alloc = 0; + + if ((domainDesc[domainId].type == DOMAIN_SCRATCH_CHILD) + && ((memType == ESRAM_EXT16) || (memType == ESRAM_EXT24))) { + alloc = domainDesc[domainId].scratch.child.alloc; + } else { + alloc = cm_DSP_GetAllocator(domainDesc[domainId].domain.coreId, memType); + } + + return alloc; +} + +void START(void); +void END(const char*); + +//TODO, juraj, alloc would need to return finer errors then 0 +PUBLIC t_memory_handle cm_DM_Alloc(t_cm_domain_id domainId, t_dsp_memory_type_id memType, t_uint32 wordSize, t_cm_mpc_memory_alignment memAlignment, t_bool powerOn) +{ + t_nmf_core_id coreId = domainDesc[domainId].domain.coreId; + t_memory_handle handle; + t_cm_allocator_desc *alloc; + t_uint32 offset; + t_uint32 size; + + cm_DSP_GetInternalMemoriesInfo(domainId, memType, &offset, &size); + + if ((alloc = cm_DM_getAllocator(domainId, memType)) == 0) { + return 0; + } + + handle = cm_MM_Alloc(alloc, + cm_DSP_ConvertSize(memType, wordSize), + (t_cm_memory_alignment) memAlignment, + offset, size, domainId); + + if(handle != INVALID_MEMORY_HANDLE) + { + cm_MM_SetMemoryHandleUserData(handle, (coreId << SHIFT_BYTE1) | (memType << SHIFT_BYTE0)); + + if (powerOn) { + // [Pwr] The associated power domain can be enabled only after the Alloc request. + // Associated MPC memory chunk is not accessed (Remote allocator feature) + cm_PWR_EnableMemory( + coreId, + memType, + /* + * Compute physical address based on cm_DSP_GetHostSystemAddress but in optimized way + * -> See it for information + * -> Note TCM memory is not correctly compute, but it's not used + */ + cm_DSP_GetState(coreId)->allocator[memType]->baseAddress.physical + cm_MM_GetOffset(handle), + cm_MM_GetSize(handle)); + } + } else { + LOG_INTERNAL(0, "CM_NO_MORE_MEMORY domainId: %d, memType %d, wordSize %d, alignement %d\n", + domainId, memType, wordSize, memAlignment, 0, 0); + cm_MM_DumpMemory(alloc, offset, offset + size); + } + + return handle; +} + +PUBLIC void cm_DM_FreeWithInfo(t_memory_handle memHandle, t_nmf_core_id *coreId, t_dsp_memory_type_id *memType, t_bool powerOff) +{ + t_dsp_chunk_info chunk_info; + + cm_DSP_GetDspChunkInfo(memHandle, &chunk_info); + + if (powerOff) { + cm_PWR_DisableMemory( + chunk_info.coreId, + chunk_info.memType, + cm_DSP_GetPhysicalAdress(memHandle), + cm_MM_GetSize(memHandle)); + } + + cm_MM_Free(chunk_info.alloc, memHandle); + + *coreId = chunk_info.coreId; + *memType = chunk_info.memType; +} + +PUBLIC void cm_DM_Free(t_memory_handle memHandle, t_bool powerOff) +{ + t_nmf_core_id coreId; + t_dsp_memory_type_id memType; + + cm_DM_FreeWithInfo(memHandle, &coreId, &memType, powerOff); +} + +PUBLIC t_cm_error cm_DM_GetAllocatorStatus(t_cm_domain_id domainId, t_dsp_memory_type_id memType, t_cm_allocator_status *pStatus) +{ + t_cm_error error; + t_uint32 dOffset; + t_uint32 dSize; + + //TODO, scratch + error = cm_DM_CheckDomain(domainId, DOMAIN_ANY); + if (error != CM_OK) { + return error; + } + + cm_DSP_GetInternalMemoriesInfo(domainId, memType, &dOffset, &dSize); + + return cm_DSP_GetAllocatorStatus(domainDesc[domainId].domain.coreId, memType, + dOffset, dSize, pStatus); +} + +//WARNING: this function is only correct *before* migration! because +//the computation of absolute adresses of a domain is based on the allocator for the given +//segment (this is hidden in cm_DSP_GetDspBaseAddress and this info is not valid +//after migration (non-contiguous address-space from the ARM-side) +PUBLIC t_cm_error cm_DM_GetDomainAbsAdresses(t_cm_domain_id domainId, t_cm_domain_info *info) +{ + t_cm_error error; + t_nmf_core_id coreId = domainDesc[domainId].domain.coreId; + + cm_migration_check_state(coreId, STATE_NORMAL); + + error = cm_DM_CheckDomain(domainId, DOMAIN_NORMAL); + if (error != CM_OK) { + return error; + } + + cm_DSP_GetDspBaseAddress(coreId, SDRAM_CODE, &info->sdramCode); + cm_DSP_GetDspBaseAddress(coreId, ESRAM_CODE, &info->esramCode); + cm_DSP_GetDspBaseAddress(coreId, SDRAM_EXT24, &info->sdramData); + cm_DSP_GetDspBaseAddress(coreId, ESRAM_EXT24, &info->esramData); + + info->sdramCode.physical += domainDesc[domainId].domain.sdramCode.offset; + info->sdramCode.logical += domainDesc[domainId].domain.sdramCode.offset; + info->esramCode.physical += domainDesc[domainId].domain.esramCode.offset; + info->esramCode.logical += domainDesc[domainId].domain.esramCode.offset; + info->sdramData.physical += domainDesc[domainId].domain.sdramData.offset; + info->sdramData.logical += domainDesc[domainId].domain.sdramData.offset; + info->esramData.physical += domainDesc[domainId].domain.esramData.offset; + info->esramData.logical += domainDesc[domainId].domain.esramData.offset; + + return CM_OK; +} + +static void cm_DM_DomainError(const t_cm_domain_id parentId, const t_nmf_client_id client) +{ + int i; + LOG_INTERNAL(0, "NMF_DEBUG_SCRATCH failed to allocate domain (client %u): 0x%08x -> 0x%08x\n", + client, + domainDesc[parentId].domain.esramData.offset, + domainDesc[parentId].domain.esramData.offset + domainDesc[parentId].domain.esramData.size, + 0, 0, 0); + for(i = 0; i < MAX_USER_DOMAIN_NB; i++) { + if (domainDesc[i].type == DOMAIN_SCRATCH_CHILD) { + LOG_INTERNAL(0, "NMF_DEBUG_SCRATCH scratch domain %d allocated (client %u): 0x%08x -> 0x%08x\n", + i, domainDesc[i].client, + domainDesc[i].domain.esramData.offset, + domainDesc[i].domain.esramData.offset + domainDesc[i].domain.esramData.size, + 0, 0); + } + } + cm_MM_DumpMemory(cm_DM_getAllocator(parentId, ESRAM_EXT16), + domainDesc[parentId].domain.esramData.offset, + domainDesc[parentId].domain.esramData.offset + domainDesc[parentId].domain.esramData.size); +} + +PUBLIC void cm_DM_SetDefaultDomain(t_memory_handle memHandle, t_nmf_core_id coreId) +{ + if (coreId == SVA_CORE_ID) + cm_MM_SetDefaultDomain(memHandle, DEFAULT_SVA_DOMAIN); + else if (coreId == SIA_CORE_ID) + cm_MM_SetDefaultDomain(memHandle, DEFAULT_SIA_DOMAIN); +} diff --git a/drivers/staging/nmf-cm/cm/engine/memory/src/domain_wrapper.c b/drivers/staging/nmf-cm/cm/engine/memory/src/domain_wrapper.c new file mode 100644 index 00000000000..ec305812f15 --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/src/domain_wrapper.c @@ -0,0 +1,95 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ +#include <cm/engine/api/domain_engine.h> +#include <cm/engine/api/migration_engine.h> +#include <cm/engine/memory/inc/domain.h> +#include <cm/engine/memory/inc/migration.h> +#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h> + +PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_CreateMemoryDomain( + const t_nmf_client_id client, + const t_cm_domain_memory *domain, + t_cm_domain_id *handle + ) +{ + t_cm_error error; + + OSAL_LOCK_API(); + error = cm_DM_CreateDomain(client, domain, handle); + OSAL_UNLOCK_API(); + return error; +} + +PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_CreateMemoryDomainScratch( + const t_nmf_client_id client, + const t_cm_domain_id parentId, + const t_cm_domain_memory *domain, + t_cm_domain_id *handle + ) +{ + t_cm_error error; + + OSAL_LOCK_API(); + error = cm_DM_CreateDomainScratch(client, parentId, domain, handle); + OSAL_UNLOCK_API(); + return error; +} + +PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_DestroyMemoryDomain( + t_cm_domain_id handle) +{ + t_cm_error error; + + OSAL_LOCK_API(); + error = cm_DM_DestroyDomain(handle); + OSAL_UNLOCK_API(); + return error; +} + +PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_FlushMemoryDomains( + t_nmf_client_id client) +{ + t_cm_error error; + + OSAL_LOCK_API(); + error = cm_DM_DestroyDomains(client); + OSAL_UNLOCK_API(); + return error; +} + +PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetDomainCoreId(const t_cm_domain_id domainId, t_nmf_core_id *coreId) +{ + t_cm_error error; + OSAL_LOCK_API(); + //TODO, scratch + error = cm_DM_CheckDomain(domainId, DOMAIN_NORMAL); + if (error != CM_OK) { + OSAL_UNLOCK_API(); + return error; + } + + *coreId = cm_DM_GetDomainCoreId(domainId); + OSAL_UNLOCK_API(); + return CM_OK; +} + +PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_Migrate(const t_cm_domain_id srcShared, const t_cm_domain_id src, const t_cm_domain_id dst) +{ + t_cm_error error; + OSAL_LOCK_API(); + error = cm_migrate(srcShared, src, dst); + OSAL_UNLOCK_API(); + return error; +} + +PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_Unmigrate(void) +{ + t_cm_error error; + OSAL_LOCK_API(); + error = cm_unmigrate(); + OSAL_UNLOCK_API(); + return error; +} diff --git a/drivers/staging/nmf-cm/cm/engine/memory/src/memory_wrapper.c b/drivers/staging/nmf-cm/cm/engine/memory/src/memory_wrapper.c new file mode 100644 index 00000000000..37bea690e48 --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/src/memory_wrapper.c @@ -0,0 +1,222 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ +#include <cm/engine/memory/inc/memory.h> +#include <cm/engine/dsp/inc/dsp.h> +#include <cm/engine/component/inc/instance.h> +#include <cm/engine/configuration/inc/configuration.h> +#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h> +#include <cm/engine/trace/inc/trace.h> + +PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_AllocMpcMemory( + t_cm_domain_id domainId, + t_nmf_client_id clientId, + t_cm_mpc_memory_type memType, + t_cm_size size, + t_cm_mpc_memory_alignment memAlignment, + t_cm_memory_handle *pHandle + ) +{ + t_dsp_memory_type_id dspMemType; + + switch(memType) + { + case CM_MM_MPC_TCM16_X: + dspMemType = INTERNAL_XRAM16; + break; + case CM_MM_MPC_TCM24_X: + dspMemType = INTERNAL_XRAM24; + break; + case CM_MM_MPC_TCM16_Y: + dspMemType = INTERNAL_YRAM16; + break; + case CM_MM_MPC_TCM24_Y: + dspMemType = INTERNAL_YRAM24; + break; +#ifndef __STN_8810 + case CM_MM_MPC_ESRAM16: + dspMemType = ESRAM_EXT16; + break; + case CM_MM_MPC_ESRAM24: + dspMemType = ESRAM_EXT24; + break; +#endif /* ndef __STN_8810 */ + case CM_MM_MPC_SDRAM16: + dspMemType = SDRAM_EXT16; + break; + case CM_MM_MPC_SDRAM24: + dspMemType = SDRAM_EXT24; + break; + default: + return CM_INVALID_PARAMETER; + } + + OSAL_LOCK_API(); + { + t_cm_error error; + error = cm_DM_CheckDomainWithClient(domainId, DOMAIN_ANY, clientId); + if (error != CM_OK) { + OSAL_UNLOCK_API(); + return error; + } + } + + switch(memAlignment) { + case CM_MM_MPC_ALIGN_NONE : + case CM_MM_MPC_ALIGN_HALFWORD : + case CM_MM_MPC_ALIGN_WORD : + case CM_MM_MPC_ALIGN_2WORDS : + case CM_MM_MPC_ALIGN_4WORDS : + case CM_MM_MPC_ALIGN_8WORDS : + case CM_MM_MPC_ALIGN_16WORDS : + case CM_MM_MPC_ALIGN_32WORDS : + case CM_MM_MPC_ALIGN_64WORDS : + case CM_MM_MPC_ALIGN_128WORDS : + case CM_MM_MPC_ALIGN_256WORDS : + case CM_MM_MPC_ALIGN_512WORDS : + case CM_MM_MPC_ALIGN_1024WORDS : + case CM_MM_MPC_ALIGN_65536BYTES : + //case CM_MM_MPC_ALIGN_16384WORDS : maps to the same value as above + break; + default: + OSAL_UNLOCK_API(); + return CM_INVALID_PARAMETER; + } + + /* in case we allocate in tcm x be sure ee is load before */ + if ( memType == CM_MM_MPC_TCM16_X || memType == CM_MM_MPC_TCM24_X || + memType == CM_MM_MPC_TCM16_Y || memType == CM_MM_MPC_TCM24_Y ) + { + t_cm_error error; + if ((error = cm_CFG_CheckMpcStatus(cm_DM_GetDomainCoreId(domainId))) != CM_OK) + { + OSAL_UNLOCK_API(); + return error; + } + } + + /* alloc memory */ + *pHandle = (t_cm_memory_handle)cm_DM_Alloc(domainId, dspMemType, size, memAlignment, TRUE); + if(*pHandle == (t_cm_memory_handle)INVALID_MEMORY_HANDLE) + { + OSAL_UNLOCK_API(); + ERROR("CM_NO_MORE_MEMORY: CM_AllocMpcMemory() failed\n", 0, 0, 0, 0, 0, 0); + return CM_NO_MORE_MEMORY; + } + + OSAL_UNLOCK_API(); + return CM_OK; +} + +PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_FreeMpcMemory(t_cm_memory_handle handle) +{ + t_cm_error error; + t_memory_handle validHandle; + t_nmf_core_id coreId; + t_dsp_memory_type_id memType; + + OSAL_LOCK_API(); + + if((error = cm_MM_getValidMemoryHandle(handle, &validHandle)) != CM_OK) + { + OSAL_UNLOCK_API(); + return error; + } + + cm_DM_FreeWithInfo(validHandle, &coreId, &memType, TRUE); + + /* in case we allocate in tcm x be sure ee is load before */ + if ( memType == INTERNAL_XRAM16 || memType == INTERNAL_XRAM24 || + memType == INTERNAL_YRAM16 || memType == INTERNAL_YRAM24 ) + { + cm_CFG_ReleaseMpc(coreId); + } + + OSAL_UNLOCK_API(); + return CM_OK; +} + +PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetMpcMemorySystemAddress(t_cm_memory_handle handle, t_cm_system_address *pSystemAddress) +{ + t_cm_error error; + t_memory_handle validHandle; + + OSAL_LOCK_API(); + + if((error = cm_MM_getValidMemoryHandle(handle, &validHandle)) != CM_OK) + { + OSAL_UNLOCK_API(); + return error; + } + + cm_DSP_GetHostSystemAddress(validHandle, pSystemAddress); + + OSAL_UNLOCK_API(); + return CM_OK; +} + +PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetMpcMemoryMpcAddress(t_cm_memory_handle handle, t_uint32 *pDspAddress) +{ + t_cm_error error; + t_memory_handle validHandle; + + OSAL_LOCK_API(); + + if((error = cm_MM_getValidMemoryHandle(handle, &validHandle)) != CM_OK) + { + OSAL_UNLOCK_API(); + return error; + } + + cm_DSP_GetDspAddress(validHandle, pDspAddress); + + OSAL_UNLOCK_API(); + return CM_OK; +} + +PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetMpcMemoryStatus(t_nmf_core_id coreId, t_cm_mpc_memory_type memType, t_cm_allocator_status *pStatus) +{ + t_dsp_memory_type_id dspMemType; + t_cm_error error; + + switch(memType) + { + case CM_MM_MPC_TCM16_X: + dspMemType = INTERNAL_XRAM16; + break; + case CM_MM_MPC_TCM24_X: + dspMemType = INTERNAL_XRAM24; + break; + case CM_MM_MPC_TCM16_Y: + dspMemType = INTERNAL_YRAM16; + break; + case CM_MM_MPC_TCM24_Y: + dspMemType = INTERNAL_YRAM24; + break; +#ifndef __STN_8810 + case CM_MM_MPC_ESRAM16: + dspMemType = ESRAM_EXT16; + break; + case CM_MM_MPC_ESRAM24: + dspMemType = ESRAM_EXT24; + break; +#endif /* ndef __STN_8810 */ + case CM_MM_MPC_SDRAM16: + dspMemType = SDRAM_EXT16; + break; + case CM_MM_MPC_SDRAM24: + dspMemType = SDRAM_EXT24; + break; + default: + return CM_INVALID_PARAMETER; + } + + OSAL_LOCK_API(); + error = cm_DSP_GetAllocatorStatus(coreId, dspMemType, 0, 0, pStatus); + OSAL_UNLOCK_API(); + + return error; +} + diff --git a/drivers/staging/nmf-cm/cm/engine/memory/src/migration.c b/drivers/staging/nmf-cm/cm/engine/memory/src/migration.c new file mode 100644 index 00000000000..d68898d830e --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/src/migration.c @@ -0,0 +1,392 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ +#include <cm/inc/cm_type.h> +#include <inc/type.h> +#include <inc/nmf-limits.h> + +#include <cm/engine/communication/fifo/inc/nmf_fifo_arm.h> +#include <cm/engine/dsp/inc/dsp.h> +#include <cm/engine/memory/inc/domain.h> +#include <cm/engine/memory/inc/memory.h> +#include <cm/engine/memory/inc/migration.h> +#include <cm/engine/trace/inc/trace.h> +#include <cm/engine/utils/inc/mem.h> + +#if defined(__STN_8500) && (__STN_8500 > 10) + +typedef enum { + CM_MIGRATION_OK = 0, + CM_MIGRATION_INVALID_ARGUMENT = 1, + CM_MIGRATION_ERROR = 2, +} t_cm_migration_error; + +extern t_nmf_fifo_arm_desc* mpc2mpcComsFifoId[NB_CORE_IDS][NB_CORE_IDS]; + +/*! + * \brief Data structure representing a segment to migrate + * + * segment: + * - used to determine which mmdsp-hw base is to be updated, index in mpcDesc->segments[] structure + * * this is hard-coded in cm_migrate(), could be computed (would be nice) LIMITATION + * srcAdr.physical: + * - new base setting + * * computed from the src domain in cm_DM_GetAbsAdresses() which uses the start of the allocator for the memory + * this is a LIMITATION, as this information is valid only before migration + * srcAdr.logical: + * - cm_MemCopy() + * * computed as srcAdr.logical + * dstAdr.physical: see srcAdr.physical + * dstAdr.logical: see srcAdr.logical + * size: + * - cm_MemCopy() + * - setting the top when new base is set + */ +typedef struct { + t_dsp_segment_type segment; //!< the link to the segment type + t_cm_system_address srcAdr; //!< source address + t_cm_system_address dstAdr; //!< destination address + t_uint32 size; //!< size of the segment +} t_cm_migration_segment; + +/*! + * \brief Internal data structure 1/ during migration, and 2/ between migration and unmigration calls + * + * all needed information are computed before calling _cm_migration_move() + */ +typedef struct { + t_cm_migration_state state; //!< migration state + t_nmf_core_id coreId; //!< migration only on one mpc + t_cm_migration_segment segments[NB_MIGRATION_SEGMENT]; //!< segments to migrate (selected on migration_move) + t_memory_handle handles[NB_MIGRATION_SEGMENT]; //!< memory handles for destination chunks allocated prior migration +} t_cm_migration_internal_state; + +static t_cm_migration_internal_state migrationState = {STATE_NORMAL, }; + +static t_cm_error _cm_migration_initSegment( + t_dsp_segment_type dspSegment, + t_cm_system_address *srcAdr, + t_uint32 size, + t_cm_domain_id dst, + t_cm_migration_internal_state *info + ) +{ + t_cm_system_address dstAdr; + t_cm_migration_segment *segment = &info->segments[dspSegment]; + t_memory_handle handle; + + handle = cm_DM_Alloc(dst, ESRAM_EXT16, size >> 1, CM_MM_ALIGN_AHB_BURST, TRUE); //note: byte to half-word conversion + if (handle == 0) { + ERROR("CM_NO_MORE_MEMORY: Unable to init segment for migration\n", 0, 0, 0, 0, 0, 0); + return CM_NO_MORE_MEMORY; + } + + info->handles[dspSegment] = handle; + + cm_DSP_GetHostSystemAddress(handle, &dstAdr); + + segment->segment = dspSegment; //this is redundant and could be avoided by recoding move(), but nice to have for debug + segment->size = size; + segment->srcAdr = *srcAdr; + segment->dstAdr = dstAdr; + + return CM_OK; +} + +static void _cm_migration_releaseSegment(t_cm_migration_internal_state *info, t_dsp_segment_type segId) +{ + cm_DM_Free(info->handles[segId], TRUE); +} + +static t_cm_migration_error _cm_migration_release(t_cm_migration_internal_state *info) +{ + t_uint32 i = 0; + for (i = 0; i < NB_MIGRATION_SEGMENT; i++) { + cm_DM_Free(info->handles[i], TRUE); + } + + return CM_MIGRATION_OK; +} + +#define SEGMENT_START(seg) \ + seg.offset + +#define SEGMENT_END(seg) \ + seg.offset + seg.size + +static t_cm_error _cm_migration_check( + const t_cm_domain_id srcShared, + const t_cm_domain_id src, + const t_cm_domain_id dst, + t_cm_migration_internal_state *info + ) +{ + t_cm_error error = CM_OK; + t_cm_domain_info domainInfoSrc; + t_cm_domain_info domainInfoShared; + t_cm_domain_desc *domainEE; + t_cm_domain_desc *domainShared; + t_nmf_core_id coreId = cm_DM_GetDomainCoreId(src); + + //coreIds in src, srcShared and dst match + if (!((domainDesc[src].domain.coreId == domainDesc[srcShared].domain.coreId) + && (domainDesc[src].domain.coreId == domainDesc[dst].domain.coreId))) { + return CM_INVALID_PARAMETER; + } + + //check srcShared starts at 0 + //FIXME, juraj, today EE code is in SDRAM, but this is flexible, so must find out where EE is instantiated + if (domainDesc[srcShared].domain.sdramCode.offset != 0x0) { + return CM_INVALID_PARAMETER; + } + + //check srcShared contains EE domain + domainEE = &domainDesc[cm_DSP_GetState(coreId)->domainEE]; + domainShared = &domainDesc[srcShared]; + if ((SEGMENT_START(domainEE->domain.esramCode) < SEGMENT_START(domainShared->domain.esramCode)) + ||(SEGMENT_END(domainEE->domain.esramCode) > SEGMENT_END(domainShared->domain.esramCode)) + ||(SEGMENT_START(domainEE->domain.esramData) < SEGMENT_START(domainShared->domain.esramData)) + ||(SEGMENT_END(domainEE->domain.esramData) > SEGMENT_END(domainShared->domain.esramData)) + ||(SEGMENT_START(domainEE->domain.sdramCode) < SEGMENT_START(domainShared->domain.sdramCode)) + ||(SEGMENT_END(domainEE->domain.sdramCode) > SEGMENT_END(domainShared->domain.sdramCode)) + ||(SEGMENT_START(domainEE->domain.sdramData) < SEGMENT_START(domainShared->domain.sdramData)) + ||(SEGMENT_END(domainEE->domain.sdramData) > SEGMENT_END(domainShared->domain.sdramData)) + ) { + return CM_INVALID_PARAMETER; + } + + info->coreId = coreId; + cm_DM_GetDomainAbsAdresses(srcShared, &domainInfoShared); + cm_DM_GetDomainAbsAdresses(src, &domainInfoSrc); + + if ((error = _cm_migration_initSegment(SDRAM_CODE_EE, &domainInfoShared.sdramCode, + domainDesc[srcShared].domain.sdramCode.size, dst, info)) != CM_OK) + goto _migration_error1; + if ((error = _cm_migration_initSegment(SDRAM_CODE_USER, &domainInfoSrc.sdramCode, + domainDesc[src].domain.sdramCode.size, dst, info)) != CM_OK) + goto _migration_error2; + if ((error = _cm_migration_initSegment(SDRAM_DATA_EE, &domainInfoShared.sdramData, + domainDesc[srcShared].domain.sdramData.size, dst, info)) != CM_OK) + goto _migration_error3; + if ((error = _cm_migration_initSegment(SDRAM_DATA_USER, &domainInfoSrc.sdramData, + domainDesc[src].domain.sdramData.size, dst, info)) != CM_OK) + goto _migration_error4; + return error; + +_migration_error4: _cm_migration_releaseSegment(info, SDRAM_DATA_EE); +_migration_error3: _cm_migration_releaseSegment(info, SDRAM_CODE_USER); +_migration_error2: _cm_migration_releaseSegment(info, SDRAM_CODE_EE); +_migration_error1: + OSAL_Log("Couldn't allocate memory for migration\n", 0, 0, 0, 0, 0, 0); + return CM_NO_MORE_MEMORY; +} + +typedef t_cm_error (*updateBase_t)(t_nmf_core_id, t_dsp_segment_type, t_cm_system_address, t_cm_system_address); + +static t_cm_migration_error _cm_migration_move( + t_nmf_core_id coreId, + t_cm_migration_segment *seg, + updateBase_t updateBase, + char* name + ) +{ + LOG_INTERNAL(1, "##### Migration %s: 0x%x -> 0x%x\n", name, seg->srcAdr.logical, seg->dstAdr.logical, 0, 0, 0); + cm_MemCopy((void*)seg->dstAdr.logical, (void*)seg->srcAdr.logical, seg->size); + updateBase(coreId, seg->segment, seg->srcAdr, seg->dstAdr); + cm_MemSet((void*)seg->srcAdr.logical, 0xdead, seg->size); //for debug, to be sure that we have actually moved the code and bases + + return CM_MIGRATION_OK; +} + +static t_cm_migration_error _cm_migration_update_internal( + t_cm_migration_internal_state *info, + t_cm_migration_state state + ) +{ + t_nmf_fifo_arm_desc *pArmFifo; + + migrationState.state = state; + + switch(state) { + case STATE_MIGRATED: + //move fifos + pArmFifo = mpc2mpcComsFifoId[ARM_CORE_ID][info->coreId]; + pArmFifo->fifoDesc = (t_nmf_fifo_desc*)cm_migration_translate(pArmFifo->dspAddressInfo.segmentType, (t_shared_addr)pArmFifo->fifoDescShadow); + pArmFifo = mpc2mpcComsFifoId[info->coreId][ARM_CORE_ID]; + pArmFifo->fifoDesc = (t_nmf_fifo_desc*)cm_migration_translate(pArmFifo->dspAddressInfo.segmentType, (t_shared_addr)pArmFifo->fifoDescShadow); + break; + + case STATE_NORMAL: + //move fifos + pArmFifo = mpc2mpcComsFifoId[ARM_CORE_ID][info->coreId]; + pArmFifo->fifoDesc = pArmFifo->fifoDescShadow; + pArmFifo = mpc2mpcComsFifoId[info->coreId][ARM_CORE_ID]; + pArmFifo->fifoDesc = pArmFifo->fifoDescShadow; + break; + + default: + OSAL_Log("unknown state", 0, 0, 0, 0, 0, 0); + CM_ASSERT(0); + } + + return CM_MIGRATION_OK; +} + +PUBLIC t_cm_error cm_migrate(const t_cm_domain_id srcShared, const t_cm_domain_id src, const t_cm_domain_id dst) +{ + t_cm_migration_error mError; + t_cm_error error; + + if ((error = _cm_migration_check(srcShared, src, dst, &migrationState)) != CM_OK) { + return error; + } + + /* stop DSP execution */ + cm_DSP_Stop(migrationState.coreId); + + /* migrate EE and FX */ + mError = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_CODE_EE], cm_DSP_updateCodeBase, "code"); + if (mError) { + OSAL_Log("EE code migration failed", 0, 0, 0, 0, 0, 0); + CM_ASSERT(0); + } + mError = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_DATA_EE], cm_DSP_updateDataBase, "data"); + if (mError) { + OSAL_Log("EE data migration failed", 0, 0, 0, 0, 0, 0); + CM_ASSERT(0); + } + /* migrate user domain */ + mError = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_CODE_USER], cm_DSP_updateCodeBase, "code"); + if (mError) { + OSAL_Log("User code migration failed", 0, 0, 0, 0, 0, 0); + CM_ASSERT(0); + } + mError = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_DATA_USER], cm_DSP_updateDataBase, "data"); + if (mError) { + OSAL_Log("User data migration failed", 0, 0, 0, 0, 0, 0); + CM_ASSERT(0); + } + /* update CM internal structures */ + mError = _cm_migration_update_internal(&migrationState, STATE_MIGRATED); + if (mError) { + OSAL_Log("Update internal data failed", 0, 0, 0, 0, 0, 0); + CM_ASSERT(0); + } + + /* Be sure everything has been write before restarting mmdsp */ + OSAL_mb(); + + /* resume DSP execution */ + cm_DSP_Start(migrationState.coreId); + + return CM_OK; +} + +static void _cm_migration_swapSegments( + t_cm_migration_segment *segment + ) +{ + t_cm_system_address tmp; + tmp = segment->dstAdr; + segment->dstAdr = segment->srcAdr; + segment->srcAdr = tmp; +} + +PUBLIC t_cm_error cm_unmigrate(void) +{ + t_cm_migration_error merror; + + if (migrationState.state != STATE_MIGRATED) + return CM_INVALID_PARAMETER; //TODO, juraj, define a proper error for this migration case + + cm_DSP_Stop(migrationState.coreId); + + _cm_migration_swapSegments(&migrationState.segments[SDRAM_CODE_EE]); + _cm_migration_swapSegments(&migrationState.segments[SDRAM_DATA_EE]); + _cm_migration_swapSegments(&migrationState.segments[SDRAM_CODE_USER]); + _cm_migration_swapSegments(&migrationState.segments[SDRAM_DATA_USER]); + + merror = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_CODE_EE], cm_DSP_updateCodeBase, "code"); + if (merror) { + OSAL_Log("EE code unmigration failed", 0, 0, 0, 0, 0, 0); + CM_ASSERT(0); + } + merror = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_DATA_EE], cm_DSP_updateDataBase, "data"); + if (merror) { + OSAL_Log("EE data unmigration failed", 0, 0, 0, 0, 0, 0); + CM_ASSERT(0); + } + merror = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_CODE_USER], cm_DSP_updateCodeBase, "code"); + if (merror) { + OSAL_Log("User code unmigration failed", 0, 0, 0, 0, 0, 0); + CM_ASSERT(0); + } + merror = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_DATA_USER], cm_DSP_updateDataBase, "data"); + if (merror) { + OSAL_Log("User data unmigration failed", 0, 0, 0, 0, 0, 0); + CM_ASSERT(0); + } + + /* update CM internal structures */ + merror = _cm_migration_update_internal(&migrationState, STATE_NORMAL); + if (merror) { + OSAL_Log("Update internal data failed", 0, 0, 0, 0, 0, 0); + CM_ASSERT(0); + } + + /* Be sure everything has been write before restarting mmdsp */ + OSAL_mb(); + + cm_DSP_Start(migrationState.coreId); + + /* update CM internal structures */ + merror = _cm_migration_release(&migrationState); + if (merror) { + OSAL_Log("Update internal data failed", 0, 0, 0, 0, 0, 0); + CM_ASSERT(0); + } + + return CM_OK; +} + +// here we make the assumption that the offset doesn't depend from the dsp!! +PUBLIC t_uint32 cm_migration_translate(t_dsp_segment_type segmentType, t_uint32 addr) +{ + //TODO, juraj, save delta instead of recalculating it + t_sint32 offset; + if (migrationState.state == STATE_MIGRATED) { + offset = migrationState.segments[segmentType].dstAdr.logical - migrationState.segments[segmentType].srcAdr.logical; + } else { + offset = 0; + } + return addr + offset; +} + +PUBLIC void cm_migration_check_state(t_nmf_core_id coreId, t_cm_migration_state expected) +{ + CM_ASSERT(migrationState.state == expected); +} + +#else +PUBLIC t_cm_error cm_migrate(const t_cm_domain_id srcShared, const t_cm_domain_id src, const t_cm_domain_id dst) +{ + return CM_OK; +} + +PUBLIC t_cm_error cm_unmigrate(void) +{ + return CM_OK; +} + +PUBLIC t_uint32 cm_migration_translate(t_dsp_segment_type segmentType, t_uint32 addr) +{ + return addr; +} + +PUBLIC void cm_migration_check_state(t_nmf_core_id coreId, t_cm_migration_state expected) +{ + return; +} +#endif diff --git a/drivers/staging/nmf-cm/cm/engine/memory/src/remote_allocator.c b/drivers/staging/nmf-cm/cm/engine/memory/src/remote_allocator.c new file mode 100644 index 00000000000..0d000d37371 --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/src/remote_allocator.c @@ -0,0 +1,656 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ +/* + * Include + */ +#include "../inc/remote_allocator.h" +#include "../inc/remote_allocator_utils.h" +#include "../inc/chunk_mgr.h" + +#include <cm/engine/trace/inc/trace.h> +#include <cm/engine/trace/inc/xtitrace.h> + +static void cm_MM_RA_checkAllocator(t_cm_allocator_desc* alloc); +//static void cm_MM_RA_checkAlloc(t_cm_allocator_desc* alloc, t_uint32 size, t_uint32 align, t_uint32 min, t_uint32 max); + +int bin_index(unsigned int sz) { + /* + * 32 bins of size 2 + * 16 bins of size 16 + * 8 bins of size 128 + * 4 bins of size 1024 + * 2 bins of size 8192 + * 1 bin of size what's left + * + */ + return (((sz >> 6) == 0) ? (sz >> 1): // 0 -> 0 .. 31 + ((sz >> 6) <= 4) ? 28 + (sz >> 4): // 64 -> 32 .. 47 + ((sz >> 6) <= 20) ? 46 + (sz >> 7): // 320 -> 48 .. 55 + ((sz >> 6) <= 84) ? 55 + (sz >> 10): // 1344 -> 56 .. 59 + ((sz >> 6) <= 340) ? 59 + (sz >> 13): // 5440 -> 60 .. 61 + 62); // 21824.. +} + +static t_cm_allocator_desc* ListOfAllocators = NULL; + +PUBLIC t_cm_allocator_desc* cm_MM_CreateAllocator(t_cm_size size, t_uint32 offset, const char* name) +{ + t_cm_allocator_desc *alloc; + + CM_ASSERT(fillChunkPool() == CM_OK); + + /* Alloc structure */ + alloc = (t_cm_allocator_desc*)OSAL_Alloc_Zero(sizeof(t_cm_allocator_desc)); + CM_ASSERT(alloc != NULL); + + // Add allocator in list + alloc->next = ListOfAllocators; + ListOfAllocators = alloc; + + /* assign name */ + alloc->pAllocName = name; + + alloc->maxSize = size; + alloc->sbrkSize = 0; + alloc->offset = offset; + + //TODO, juraj, alloc impacts trace format + cm_TRC_traceMemAlloc(TRACE_ALLOCATOR_COMMAND_CREATE, 0, size, name); + + return alloc; +} + +PUBLIC t_cm_error cm_MM_DeleteAllocator(t_cm_allocator_desc *alloc) +{ + t_cm_chunk *chunk, *next_cm_chunk; + + cm_TRC_traceMemAlloc(TRACE_ALLOCATOR_COMMAND_DESTROY, 0, 0, alloc->pAllocName); + + /* Parse all chunks and free them */ + chunk = alloc->chunks; + while(chunk != 0) + { + next_cm_chunk = chunk->next; + unlinkChunk(alloc, chunk); + freeChunk(chunk); + + chunk = next_cm_chunk; + } + + // Remove allocator from the list + if(ListOfAllocators == alloc) + ListOfAllocators = alloc->next; + else { + t_cm_allocator_desc *prev = ListOfAllocators; + while(prev->next != alloc) + prev = prev->next; + prev->next = alloc->next; + } + + + /* Free allocator descriptor */ + OSAL_Free(alloc); + + return CM_OK; +} + +PUBLIC t_cm_error cm_MM_ResizeAllocator(t_cm_allocator_desc *alloc, t_cm_size size) +{ + /* sanity check */ + if (size == 0) + return CM_INVALID_PARAMETER; + + if(alloc->sbrkSize > size) + return CM_NO_MORE_MEMORY; + + alloc->maxSize = size; + + if (cmIntensiveCheckState) + cm_MM_RA_checkAllocator(alloc); + + return CM_OK; +} + +t_cm_error cm_MM_getValidMemoryHandle(t_cm_memory_handle handle, t_memory_handle* validHandle) +{ +#ifdef LINUX + /* On linux, there is already a check within the linux part + * => we don't need to check twice */ + *validHandle = (t_memory_handle)handle; + return CM_OK; +#else + t_cm_allocator_desc *alloc = ListOfAllocators; + + for(; alloc != NULL; alloc = alloc->next) + { + t_cm_chunk* chunk = alloc->chunks; + + /* Parse all chunks */ + for(; chunk != NULL; chunk = chunk->next) + { + if(chunk == (t_memory_handle)handle) + { + if(chunk->status == MEM_FREE) + return CM_MEMORY_HANDLE_FREED; + + *validHandle = (t_memory_handle)handle; + + return CM_OK; + } + } + } + + return CM_UNKNOWN_MEMORY_HANDLE; +#endif +} + +//TODO, juraj, add appartenance to allocHandle (of chunk) and degage setUserData +PUBLIC t_memory_handle cm_MM_Alloc( + t_cm_allocator_desc* alloc, + t_cm_size size, + t_cm_memory_alignment memAlignment, + t_uint32 seg_offset, + t_uint32 seg_size, + t_uint32 domainId) +{ + t_cm_chunk* chunk; + t_uint32 aligned_offset; + t_uint32 aligned_end; + t_uint32 seg_end = seg_offset + seg_size; + int i; + + /* Sanity check */ + if ( (size == 0) || (size > seg_size) ) + return INVALID_MEMORY_HANDLE; + + if(fillChunkPool() != CM_OK) + return INVALID_MEMORY_HANDLE; + + /* Get first chunk available for the specific size */ + // Search a list with a free chunk + for(i = bin_index(size); i < BINS; i++) + { + chunk = alloc->free_mem_chunks[i]; + while (chunk != 0) + { + /* Alignment of the lower boundary */ + aligned_offset = ALIGN_VALUE(MAX(chunk->offset, seg_offset), (memAlignment + 1)); + + aligned_end = aligned_offset + size; + + if ((aligned_end <= seg_end) + && aligned_end <= (chunk->offset + chunk->size) + && aligned_offset >= seg_offset + && aligned_offset >= chunk->offset) + goto found; + + chunk = chunk->next_free_mem; + } + } + + // Try to increase sbrkSize through maxSize + aligned_offset = ALIGN_VALUE(MAX((alloc->offset + alloc->sbrkSize), seg_offset), (memAlignment + 1)); + + aligned_end = aligned_offset + size; + + if ((aligned_end <= seg_end) + && aligned_end <= (alloc->offset + alloc->maxSize) + && aligned_offset >= seg_offset + && aligned_offset >= (alloc->offset + alloc->sbrkSize)) + { + /* If that fit requirement, create a new free chunk at the end of current allocator */ + chunk = allocChunk(); + + /* Update chunk size */ + chunk->offset = alloc->offset + alloc->sbrkSize; // offset start at end of current allocator + chunk->size = aligned_end - chunk->offset; + chunk->alloc = alloc; + + /* Chain it with latest chunk */ + linkChunk(alloc, alloc->lastChunk, chunk); + + /* Increase sbrkSize to end of this new chunk */ + alloc->sbrkSize += chunk->size; + + goto foundNew; + } + + return INVALID_MEMORY_HANDLE; + +found: + /* Remove chunk from free list */ + unlinkFreeMem(alloc, chunk); + +foundNew: + //create an empty chunk before the allocated one + if (chunk->offset < aligned_offset) { + chunk = splitChunk(alloc, chunk, aligned_offset, FREE_CHUNK_BEFORE); + } + //create an empty chunk after the allocated one + if (chunk->offset + chunk->size > aligned_end) { + splitChunk(alloc, chunk, aligned_end, FREE_CHUNK_AFTER); + } + + chunk->status = MEM_USED; + chunk->prev_free_mem = 0; + chunk->next_free_mem = 0; + chunk->domainId = domainId; + + //TODO, juraj, alloc impacts trace format + cm_TRC_traceMem(TRACE_ALLOC_COMMAND_ALLOC, 0, chunk->offset, chunk->size); + + if (cmIntensiveCheckState) { + cm_MM_RA_checkAllocator(alloc); + } + + return (t_memory_handle) chunk; +} + +//caution - if successfull, the chunk offset will be aligned with seg_offset +//caution++ the offset of the allocated chunk changes implicitly +PUBLIC t_cm_error cm_MM_Realloc( + t_cm_allocator_desc* alloc, + const t_cm_size size, + const t_uint32 offset, + t_memory_handle *handle) +{ + t_cm_chunk *chunk = (t_cm_chunk*)*handle; + t_uint32 oldOffset = chunk->offset; + t_uint32 oldSize = chunk->size; + t_uint32 oldDomainId = chunk->domainId; + t_uint16 userData = chunk->userData; + + cm_MM_Free(alloc, *handle); + + *handle = cm_MM_Alloc(alloc, size, CM_MM_ALIGN_NONE, offset, size, oldDomainId); + + if(*handle == INVALID_MEMORY_HANDLE) + { + *handle = cm_MM_Alloc(alloc, oldSize, CM_MM_ALIGN_NONE, oldOffset, oldSize, oldDomainId); + + CM_ASSERT(*handle != INVALID_MEMORY_HANDLE); + + chunk = (t_cm_chunk*)*handle; + chunk->userData = userData; + + return CM_NO_MORE_MEMORY; + } + + chunk = (t_cm_chunk*)*handle; + chunk->userData = userData; + + return CM_OK; + +#if 0 + /* check reallocation is related to this chunk! */ + CM_ASSERT(chunk->offset <= (offset + size)); + CM_ASSERT(offset <= (chunk->offset + chunk->size)); + CM_ASSERT(size); + + /* check if extend low */ + if (offset < chunk->offset) { + /* note: it is enough to check only the previous chunk, + * because adjacent chunks of same status are merged + */ + if ((chunk->prev == 0) + ||(chunk->prev->status != MEM_FREE) + ||(chunk->prev->offset > offset)) { + return INVALID_MEMORY_HANDLE; + } + } + + /* check if extend high, extend sbrk if necessary */ + if ( (offset + size) > (chunk->offset + chunk->size)) { + if(chunk->next == 0) + { + // check if allocator can be extended to maxSize + if((offset + size) > (alloc->offset + alloc->maxSize)) + return INVALID_MEMORY_HANDLE; + } + else + { + if ((chunk->next->status != MEM_FREE) + ||( (chunk->next->offset + chunk->next->size) < (offset + size))) { + return INVALID_MEMORY_HANDLE; + } + } + } + + if(fillChunkPool() != CM_OK) + return INVALID_MEMORY_HANDLE; + + + /* extend low + * all conditions should have been checked + * this must not fail + */ + if (offset < chunk->offset) { + t_uint32 delta = chunk->prev->offset + chunk->prev->size - offset; + t_cm_chunk *prev = chunk->prev; + + chunk->offset -= delta; + chunk->size += delta; + + CM_ASSERT(prev->status == MEM_FREE); //TODO, juraj, already checked + unlinkFreeMem(alloc, prev); + prev->size -= delta; + if(prev->size == 0) + { + unlinkChunk(alloc, prev); + freeChunk(prev); + } else { + updateFreeList(alloc, prev); + } + } + + /* extend high */ + if ( (offset + size) > (chunk->offset + chunk->size)) { + t_uint32 delta = size - chunk->size; + t_cm_chunk *next = chunk->next; + + chunk->size += delta; + + if(next == 0) + { + alloc->sbrkSize += delta; + } else { + CM_ASSERT(next->status == MEM_FREE); + unlinkFreeMem(alloc, next); + next->offset += delta; + next->size -= delta; + if(next->size == 0) + { + unlinkChunk(alloc, next); + freeChunk(next); + } else { + updateFreeList(alloc, next); + } + } + } + + /* reduce top */ + if ((offset + size) < (chunk->offset + chunk->size)) { + t_uint32 delta = chunk->size - size; + + if(chunk->next == 0) { + alloc->sbrkSize -= delta; + chunk->size -= delta; + + } else if (chunk->next->status == MEM_FREE) { + unlinkFreeMem(alloc, chunk->next); + chunk->size -= delta; + chunk->next->offset -= delta; + chunk->next->size += delta; + updateFreeList(alloc, chunk->next); + } else { + t_cm_chunk *tmp = splitChunk(alloc, chunk, offset + size, FREE_CHUNK_AFTER); //tmp = chunk, chunk = result + tmp->status = MEM_USED; + tmp->next->status = MEM_FREE; + } + } + + /* reduce bottom */ + if (offset > chunk->offset) { + if (chunk->prev->status == MEM_FREE) { + t_uint32 delta = offset - chunk->offset; + unlinkFreeMem(alloc, chunk->prev); + chunk->prev->size += delta; + chunk->offset = offset; + chunk->size -= delta; + updateFreeList(alloc, chunk->prev); + } else { + t_cm_chunk *tmp = splitChunk(alloc, chunk, offset, FREE_CHUNK_BEFORE); //tmp->next = chunk, tmp = result + tmp->status = MEM_USED; + tmp->prev->status = MEM_FREE; + } + } + + cm_MM_RA_checkAllocator(alloc); + + return (t_memory_handle)chunk; +#endif +} + +PUBLIC void cm_MM_Free(t_cm_allocator_desc* alloc, t_memory_handle memHandle) +{ + t_cm_chunk* chunk = (t_cm_chunk*)memHandle; + + //TODO, juraj, alloc impacts trace format + cm_TRC_traceMem(TRACE_ALLOC_COMMAND_FREE, 0, + chunk->offset, chunk->size); + + /* Update chunk status */ + chunk->status = MEM_FREE; + chunk->domainId = 0x0; + + // Invariant: Current chunk is free but not in free list + + /* Check if the previous chunk is free */ + if((chunk->prev != 0) && (chunk->prev->status == MEM_FREE)) + { + t_cm_chunk* prev = chunk->prev; + + // Remove chunk to be freed from memory list + unlinkChunk(alloc, chunk); + + // Remove previous from free list + unlinkFreeMem(alloc, prev); + + // Update previous size + prev->size += chunk->size; + + freeChunk(chunk); + + chunk = prev; + } + + /* Check if the next chunk is free */ + if((chunk->next != 0) && (chunk->next->status == MEM_FREE)) + { + t_cm_chunk* next = chunk->next; + + // Remove next from memory list + unlinkChunk(alloc, next); + + // Remove next from free list + unlinkFreeMem(alloc, next); + + // Update previous size + chunk->size += next->size; + + freeChunk(next); + } + + if(chunk->next == 0) + { + // If we are the last one, decrease sbrkSize + alloc->sbrkSize -= chunk->size; + + unlinkChunk(alloc, chunk); + freeChunk(chunk); + + } + else + { + // Add it in free list + updateFreeList(alloc, chunk); + } + + if (cmIntensiveCheckState) { + cm_MM_RA_checkAllocator(alloc); + } +} + +PUBLIC t_cm_error cm_MM_GetAllocatorStatus(t_cm_allocator_desc* alloc, t_uint32 offset, t_uint32 size, t_cm_allocator_status *pStatus) +{ + t_cm_chunk* chunk = alloc->chunks; + t_uint32 sbrkFree = alloc->maxSize - alloc->sbrkSize; + t_uint8 min_free_size_updated = FALSE; + + /* Init status */ + pStatus->global.used_block_number = 0; + pStatus->global.free_block_number = 0; + pStatus->global.maximum_free_size = 0; + pStatus->global.minimum_free_size = 0xFFFFFFFF; + pStatus->global.accumulate_free_memory = 0; + pStatus->global.accumulate_used_memory = 0; + pStatus->global.size = alloc->maxSize; + pStatus->domain.maximum_free_size = 0; + pStatus->domain.minimum_free_size = 0xFFFFFFFF; + pStatus->domain.accumulate_free_memory = 0; + pStatus->domain.accumulate_used_memory = 0; + pStatus->domain.size= size; + + /* Parse all chunks */ + while(chunk != 0) + { + + /* Chunk is free */ + if (chunk->status == MEM_FREE) { + pStatus->global.free_block_number++; + pStatus->global.accumulate_free_memory += chunk->size; + + /* Check max size */ + if (chunk->size > pStatus->global.maximum_free_size) + { + pStatus->global.maximum_free_size = chunk->size; + } + + /* Check min size */ + if (chunk->size < pStatus->global.minimum_free_size) + { + pStatus->global.minimum_free_size = chunk->size; + min_free_size_updated = TRUE; + } + } else {/* Chunk used */ + pStatus->global.used_block_number++; + pStatus->global.accumulate_used_memory += chunk->size; + } + + chunk = chunk->next; + } + + /* Accumulate free space between sbrkSize and maxSize */ + pStatus->global.accumulate_free_memory += sbrkFree; + if (sbrkFree > 0) + pStatus->global.free_block_number++; + if (pStatus->global.maximum_free_size < sbrkFree) + pStatus->global.maximum_free_size = sbrkFree; + if (pStatus->global.minimum_free_size > sbrkFree) { + pStatus->global.minimum_free_size = sbrkFree; + min_free_size_updated = TRUE; + } + + /* Put max free size to min free size */ + if (min_free_size_updated == FALSE) { + pStatus->global.minimum_free_size = pStatus->global.maximum_free_size; + } + + return CM_OK; +} + +PUBLIC t_uint32 cm_MM_GetOffset(t_memory_handle memHandle) +{ + /* Provide offset */ + return ((t_cm_chunk*)memHandle)->offset; +} + +PUBLIC t_uint32 cm_MM_GetSize(t_memory_handle memHandle) +{ + return ((t_cm_chunk*)memHandle)->size; +} + +PUBLIC t_uint32 cm_MM_GetAllocatorSize(t_cm_allocator_desc* alloc) +{ + return alloc->maxSize; +} + +PUBLIC void cm_MM_SetMemoryHandleUserData(t_memory_handle memHandle, t_uint16 userData) +{ + ((t_cm_chunk*)memHandle)->userData = userData; +} + +PUBLIC void cm_MM_GetMemoryHandleUserData(t_memory_handle memHandle, t_uint16 *pUserData, t_cm_allocator_desc **alloc) +{ + *pUserData = ((t_cm_chunk*)memHandle)->userData; + if (alloc) + *alloc = ((t_cm_chunk*)memHandle)->alloc; +} + +/* + * check free list is ordered + * check all chunks are correctly linked + * check adjacent chunks are not FREE + */ +static void cm_MM_RA_checkAllocator(t_cm_allocator_desc* alloc) +{ + t_cm_chunk *chunk = alloc->chunks; + t_uint32 size = 0; + int i; + + CM_ASSERT(alloc->sbrkSize <= alloc->maxSize); + + while(chunk != 0) { + if(chunk == alloc->chunks) + CM_ASSERT(chunk->prev == 0); + if(chunk == alloc->lastChunk) + CM_ASSERT(chunk->next == 0); + + CM_ASSERT(chunk->alloc == alloc); + + if (chunk->next != 0) { + CM_ASSERT(!((chunk->status == MEM_FREE) && (chunk->next->status == MEM_FREE))); //two free adjacent blocks + CM_ASSERT(chunk->offset < chunk->next->offset); //offsets reverted + CM_ASSERT(chunk->offset + chunk->size == chunk->next->offset); // Not hole in allocator + } + size += chunk->size; + chunk = chunk->next; + } + + CM_ASSERT(size == alloc->sbrkSize); + + for(i = 0; i < BINS; i++) + { + chunk = alloc->free_mem_chunks[i]; + while(chunk != 0) { + if (chunk->next_free_mem != 0) { + CM_ASSERT(chunk->size <= chunk->next_free_mem->size); //free list not ordered + } + chunk = chunk->next_free_mem; + } + } +} + +PUBLIC void cm_MM_DumpMemory(t_cm_allocator_desc* alloc, t_uint32 start, t_uint32 end) +{ + t_cm_chunk *chunk = alloc->chunks; + + LOG_INTERNAL(0, "ALLOCATOR Dumping allocator \"%s\" [0x%08x:0x%08x]\n", alloc->pAllocName, start, end, 0, 0, 0); + while(chunk != 0) { + if (((chunk->offset < start) && (chunk->offset + chunk->size > start)) + || ((chunk->offset < end) && (chunk->offset + chunk->size > end)) + || ((chunk->offset > start) && (chunk->offset + chunk->size < end)) + || ((chunk->offset < start) && (chunk->offset + chunk->size > end))) + { + LOG_INTERNAL(0, "ALLOCATOR chunk [0x%08x -> 0x%08x[: status:%s, domainId: 0x%x\n", + chunk->offset, + chunk->offset + chunk->size, + chunk->status?"FREE":"USED", + chunk->domainId, 0, 0); + } + chunk = chunk->next; + } +} + +PUBLIC void cm_MM_SetDefaultDomain(t_memory_handle memHandle, t_uint32 domainId) +{ + ((t_cm_chunk *) memHandle)->domainId = domainId; +} diff --git a/drivers/staging/nmf-cm/cm/engine/memory/src/remote_allocator_utils.c b/drivers/staging/nmf-cm/cm/engine/memory/src/remote_allocator_utils.c new file mode 100644 index 00000000000..4e800376dbb --- /dev/null +++ b/drivers/staging/nmf-cm/cm/engine/memory/src/remote_allocator_utils.c @@ -0,0 +1,250 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL) version 2. + */ +#include <cm/engine/memory/inc/remote_allocator_utils.h> +#include <cm/engine/trace/inc/trace.h> + +/***************************************************************************/ +/* + * linkChunk + * param prev : Pointer on previous chunk where the chunk will be added + * param add : Pointer on chunk to add + * + * Add a chunk in the memory list + * + */ +/***************************************************************************/ +PUBLIC void linkChunk(t_cm_allocator_desc* alloc, t_cm_chunk* prev, t_cm_chunk* add) +{ + // Link previous + if(prev == 0) + { + add->next = alloc->chunks; + alloc->chunks = add; + } + else + { + add->prev = prev; + add->next = prev->next; + prev->next = add; + } + + // Link next + if(add->next == 0) + { + // Link at the end + alloc->lastChunk = add; + } + else + add->next->prev = add; +} + +/***************************************************************************/ +/* + * unlinkChunk + * param allocHandle : Allocator handle + * param current : Pointer on chunk to remove + * + * Remove a chunk in the memory list and update first pointer + * + */ +/***************************************************************************/ +PUBLIC void unlinkChunk(t_cm_allocator_desc* alloc, t_cm_chunk* current) +{ + /* Link previous with next */ + if (current->prev != 0) + current->prev->next = current->next; + else + { + CM_ASSERT(alloc->chunks == current); + + // We remove the first, update chunks + alloc->chunks = current->next; + } + + /* Link next with previous */ + if(current->next != 0) + current->next->prev= current->prev; + else + { + CM_ASSERT(alloc->lastChunk == current); + + // We remove the last, update lastChunk + alloc->lastChunk = current->prev; + } +} + + +/***************************************************************************/ +/* + * unlinkFreeMem() unlinks chunk from free memory double-linked list + * makes the previous and next chunk in the list point to each other.. + * param allocHandle : Allocator handle + * param current : Pointer on chunk to remove + * + * Remove a chunk in the free memory list and update pointer + * + */ +/***************************************************************************/ +PUBLIC void unlinkFreeMem(t_cm_allocator_desc* alloc ,t_cm_chunk* current) +{ + int bin = bin_index(current->size); + + /* unlink previous */ + if (current->prev_free_mem != 0) + { + current->prev_free_mem->next_free_mem = current->next_free_mem; + } + + /* Unlink next */ + if (current->next_free_mem !=0 ) + { + current->next_free_mem->prev_free_mem = current->prev_free_mem; + } + + /* update first free pointer */ + if (alloc->free_mem_chunks[bin] == current) + { + alloc->free_mem_chunks[bin] = current->next_free_mem; + } + + current->prev_free_mem = 0; + current->next_free_mem = 0; +} + +/***************************************************************************/ +/* + * linkFreeMemBefore + * param add : Pointer on chunk to add + * param next : Pointer on next chunk where the chunk will be added before + * + * Add a chunk in the free memory list + * + */ +/***************************************************************************/ +PUBLIC void linkFreeMemBefore(t_cm_chunk* add, t_cm_chunk* next) +{ + /* Link next */ + add->prev_free_mem = next->prev_free_mem; + add->next_free_mem = next; + + /* Link previous */ + if (next->prev_free_mem != 0) + { + next->prev_free_mem->next_free_mem = add; + } + next->prev_free_mem = add; +} + +/***************************************************************************/ +/* + * linkFreeMemAfter + * param add : Pointer on chunk to add + * param prev : Pointer on previous chunk where the chunk will be added after + * + * Add a chunk in the free memory list + * + */ +/***************************************************************************/ +PUBLIC void linkFreeMemAfter(t_cm_chunk* prev,t_cm_chunk* add) +{ + /* Link previous */ + add->prev_free_mem = prev; + add->next_free_mem = prev->next_free_mem; + + /* Link next */ + if (prev->next_free_mem != 0) + { + prev->next_free_mem->prev_free_mem = add; + } + prev->next_free_mem = add; +} + + +/***************************************************************************/ +/* + * updateFreeList + * param allocHandle : Allocator handle + * param offset : Pointer on chunk + * + * Update free memory list, ordered by size + * + */ +/***************************************************************************/ +PUBLIC void updateFreeList(t_cm_allocator_desc* alloc , t_cm_chunk* chunk) +{ + t_cm_chunk* free_chunk; + int bin = bin_index(chunk->size); + + /* check case with no more free block */ + if (alloc->free_mem_chunks[bin] == 0) + { + alloc->free_mem_chunks[bin] = chunk; + return ; + } + + /* order list */ + free_chunk = alloc->free_mem_chunks[bin]; + while ((free_chunk->next_free_mem != 0) && (chunk->size > free_chunk->size)) + { + free_chunk = free_chunk->next_free_mem; + } + + /* Add after free chunk if smaller -> we are the last */ + if(free_chunk->size <= chunk->size) + { + linkFreeMemAfter(free_chunk,chunk); + } + else // This mean that we are smaller + { + linkFreeMemBefore(chunk,free_chunk); + + /* Update first free chunk */ + if (alloc->free_mem_chunks[bin] == free_chunk) + { + alloc->free_mem_chunks[bin] = chunk; + } + } +} + + +/***************************************************************************/ +/* + * splitChunk + * param allocHandle : Allocator handle + * param chunk : Current chunk (modified in place) + * param offset : Offset address of the start memory + * return : New chunk handle or 0 if an error occurs + * + * Create new chunk before/after the current chunk with the size + */ +/***************************************************************************/ +PUBLIC t_cm_chunk* splitChunk(t_cm_allocator_desc* alloc ,t_cm_chunk *chunk, + t_uint32 offset, t_mem_split_position position) +{ + t_cm_chunk *free; + t_cm_chunk *returned; + + t_cm_chunk* new_chunk = allocChunk(); + + if (position == FREE_CHUNK_AFTER) { + returned = chunk; + free = new_chunk; + } else { //FREE_CHUNK_BEFORE + returned = new_chunk; + free = chunk; + } + + new_chunk->offset = offset; + new_chunk->size = chunk->offset + chunk->size - offset; + new_chunk->alloc = alloc; + chunk->size = offset - chunk->offset; + + linkChunk(alloc, chunk, new_chunk); + unlinkFreeMem(alloc, free); + updateFreeList(alloc, free); + + return returned; +} |