Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions zephyr/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -455,6 +455,12 @@ zephyr_library_sources(
schedule.c
)

if(CONFIG_IMX)
zephyr_library_sources(alloc_imx.c)
else()
zephyr_library_sources(alloc_cavs.c)
endif()

zephyr_library_sources_ifdef(CONFIG_TRACE
${SOF_SRC_PATH}/trace/dma-trace.c
${SOF_SRC_PATH}/trace/trace.c
Expand Down
205 changes: 205 additions & 0 deletions zephyr/alloc_cavs.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
// SPDX-License-Identifier: BSD-3-Clause
/*
* Copyright(c) 2020 Intel Corporation. All rights reserved.
*
* Author: Liam Girdwood <[email protected]>
*/

#include <sof/lib/alloc.h>

/* Zephyr includes */
#include <device.h>
#include <init.h>

#ifndef CONFIG_KERNEL_COHERENCE
#include <arch/xtensa/cache.h>
#endif

/*
* Memory - Create Zephyr HEAP for SOF.
*
* Currently functional but some items still WIP.
*/

#ifndef HEAP_RUNTIME_SIZE
#define HEAP_RUNTIME_SIZE 0
#endif

/* system size not declared on some platforms */
#ifndef HEAP_SYSTEM_SIZE
#define HEAP_SYSTEM_SIZE 0
#endif

/* The Zephyr heap */
#define HEAPMEM_SIZE HEAP_BUFFER_SIZE
#define HEAPMEM_SHARED_SIZE (HEAP_SYSTEM_SIZE + HEAP_RUNTIME_SIZE + \
HEAP_RUNTIME_SHARED_SIZE + HEAP_SYSTEM_SHARED_SIZE)

static uint8_t heapmem[HEAPMEM_SIZE] __aligned(PLATFORM_DCACHE_ALIGN);
static uint8_t heapmem_shared[HEAPMEM_SHARED_SIZE] __aligned(PLATFORM_DCACHE_ALIGN);

/* Use k_heap structure */
static struct k_heap sof_heap;
static struct k_heap sof_heap_shared;

static int statics_init(const struct device *unused)
{
ARG_UNUSED(unused);

sys_heap_init(&sof_heap.heap, heapmem, HEAPMEM_SIZE);
sys_heap_init(&sof_heap_shared.heap, heapmem_shared, HEAPMEM_SHARED_SIZE);

return 0;
}

SYS_INIT(statics_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);

static void *heap_alloc_aligned(struct k_heap *h, size_t align, size_t bytes)
{
void *ret = NULL;

k_spinlock_key_t key = k_spin_lock(&h->lock);

ret = sys_heap_aligned_alloc(&h->heap, align, bytes);

k_spin_unlock(&h->lock, key);

return ret;
}

static void *heap_alloc_aligned_cached(struct k_heap *h, size_t min_align, size_t bytes)
{
unsigned int align = MAX(PLATFORM_DCACHE_ALIGN, min_align);
unsigned int aligned_size = ALIGN_UP(bytes, align);
void *ptr;

/*
* Zephyr sys_heap stores metadata at start of each
* heap allocation. To ensure no allocated cached buffer
* overlaps the same cacheline with the metadata chunk,
* align both allocation start and size of allocation
* to cacheline.
*/
ptr = heap_alloc_aligned(h, align, aligned_size);

if (ptr) {
ptr = uncache_to_cache(ptr);

/*
* Heap can be used by different cores, so cache
* needs to be invalidated before next user
*/
z_xtensa_cache_inv(ptr, aligned_size);
}

return ptr;
}

static void heap_free(struct k_heap *h, void *mem)
{
k_spinlock_key_t key = k_spin_lock(&h->lock);

sys_heap_free(&h->heap, mem);

k_spin_unlock(&h->lock, key);
}

static inline bool zone_is_cached(enum mem_zone zone)
{
if (zone == SOF_MEM_ZONE_BUFFER)
return true;

return false;
}

void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
{
if (zone_is_cached(zone))
return heap_alloc_aligned_cached(&sof_heap, 0, bytes);

return heap_alloc_aligned(&sof_heap_shared, 8, bytes);
}

/* Use SOF_MEM_ZONE_BUFFER at the moment */
void *rbrealloc_align(void *ptr, uint32_t flags, uint32_t caps, size_t bytes,
size_t old_bytes, uint32_t alignment)
{
void *new_ptr;

if (!ptr) {
/* TODO: Use correct zone */
return rballoc_align(flags, caps, bytes, alignment);
}

/* Original version returns NULL without freeing this memory */
if (!bytes) {
/* TODO: Should we call rfree(ptr); */
tr_err(&zephyr_tr, "realloc failed for 0 bytes");
return NULL;
}

new_ptr = rballoc_align(flags, caps, bytes, alignment);
if (!new_ptr)
return NULL;

if (!(flags & SOF_MEM_FLAG_NO_COPY))
memcpy(new_ptr, ptr, MIN(bytes, old_bytes));

rfree(ptr);

tr_info(&zephyr_tr, "rbealloc: new ptr %p", new_ptr);

return new_ptr;
}

/**
* Similar to rmalloc(), guarantees that returned block is zeroed.
*
* @note Do not use for buffers (SOF_MEM_ZONE_BUFFER zone).
* rballoc(), rballoc_align() to allocate memory for buffers.
*/
void *rzalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
{
void *ptr = rmalloc(zone, flags, caps, bytes);

memset(ptr, 0, bytes);

return ptr;
}

/**
* Allocates memory block from SOF_MEM_ZONE_BUFFER.
* @param flags Flags, see SOF_MEM_FLAG_...
* @param caps Capabilities, see SOF_MEM_CAPS_...
* @param bytes Size in bytes.
* @param alignment Alignment in bytes.
* @return Pointer to the allocated memory or NULL if failed.
*/
void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
uint32_t alignment)
{
return heap_alloc_aligned_cached(&sof_heap, alignment, bytes);
}

/*
* Free's memory allocated by above alloc calls.
*/
void rfree(void *ptr)
{
if (!ptr)
return;

/* select heap based on address range */
if (is_uncached(ptr)) {
heap_free(&sof_heap_shared, ptr);
return;
}

ptr = cache_to_uncache(ptr);
heap_free(&sof_heap, ptr);
}

/* debug only - only needed for linking */
void heap_trace_all(int force)
{
}
Loading