47#ifdef HEAPMEM_CONF_ARENA_SIZE
51#define LOG_MODULE "HeapMem"
52#define LOG_LEVEL LOG_LEVEL_WARN
56#ifndef HEAPMEM_CONF_PRINTF
58#define HEAPMEM_PRINTF printf
60#define HEAPMEM_PRINTF HEAPMEM_CONF_PRINTF
65#define HEAPMEM_ARENA_SIZE HEAPMEM_CONF_ARENA_SIZE
73#ifdef HEAPMEM_CONF_SEARCH_MAX
74#define CHUNK_SEARCH_MAX HEAPMEM_CONF_SEARCH_MAX
76#define CHUNK_SEARCH_MAX 16
83#ifdef HEAPMEM_CONF_REALLOC
84#define HEAPMEM_REALLOC HEAPMEM_CONF_REALLOC
86#define HEAPMEM_REALLOC 1
89#if __STDC_VERSION__ >= 201112L
91#define HEAPMEM_DEFAULT_ALIGNMENT alignof(max_align_t)
93#define HEAPMEM_DEFAULT_ALIGNMENT sizeof(size_t)
98#ifdef HEAPMEM_CONF_ALIGNMENT
99#define HEAPMEM_ALIGNMENT HEAPMEM_CONF_ALIGNMENT
101#define HEAPMEM_ALIGNMENT HEAPMEM_DEFAULT_ALIGNMENT
105 (((size) + (HEAPMEM_ALIGNMENT - 1)) & ~(HEAPMEM_ALIGNMENT - 1))
108#define NEXT_CHUNK(chunk) \
109 ((chunk_t *)((char *)(chunk) + sizeof(chunk_t) + (chunk)->size))
110#define IS_LAST_CHUNK(chunk) \
111 ((char *)NEXT_CHUNK(chunk) == &heap_base[heap_usage])
115#define GET_CHUNK(ptr) \
116 ((chunk_t *)((char *)(ptr) - sizeof(chunk_t)))
117#define GET_PTR(chunk) \
118 (char *)((chunk) + 1)
121#define CHUNK_FLAG_ALLOCATED 0x1
123#define CHUNK_ALLOCATED(chunk) \
124 ((chunk)->flags & CHUNK_FLAG_ALLOCATED)
125#define CHUNK_FREE(chunk) \
126 (~(chunk)->flags & CHUNK_FLAG_ALLOCATED)
144#ifdef HEAPMEM_CONF_MAX_ZONES
145#define HEAPMEM_MAX_ZONES HEAPMEM_CONF_MAX_ZONES
147#define HEAPMEM_MAX_ZONES 1
150#if HEAPMEM_MAX_ZONES < 1
151#error At least one HeapMem zone must be configured.
154static struct heapmem_zone zones[HEAPMEM_MAX_ZONES] = {
155 {.name =
"GENERAL", .zone_size = HEAPMEM_ARENA_SIZE}
163typedef struct chunk {
177static char heap_base[HEAPMEM_ARENA_SIZE] CC_ALIGN(HEAPMEM_ALIGNMENT);
178static size_t heap_usage;
179static size_t max_heap_usage;
181static chunk_t *free_list;
183#define IN_HEAP(ptr) ((ptr) != NULL && \
184 (char *)(ptr) >= (char *)heap_base) && \
185 ((char *)(ptr) < (char *)heap_base + heap_usage)
190extend_space(
size_t size)
192 if(size > HEAPMEM_ARENA_SIZE - heap_usage) {
196 char *old_usage = &heap_base[heap_usage];
198 if(heap_usage > max_heap_usage) {
199 max_heap_usage = heap_usage;
207free_chunk(chunk_t *
const chunk)
209 chunk->flags &= ~CHUNK_FLAG_ALLOCATED;
211 if(IS_LAST_CHUNK(chunk)) {
213 heap_usage -=
sizeof(chunk_t) + chunk->size;
217 chunk->next = free_list;
218 if(free_list != NULL) {
219 free_list->prev = chunk;
228remove_chunk_from_free_list(chunk_t *
const chunk)
230 if(chunk == free_list) {
231 free_list = chunk->next;
232 if(free_list != NULL) {
233 free_list->prev = NULL;
236 chunk->prev->next = chunk->next;
239 if(chunk->next != NULL) {
240 chunk->next->prev = chunk->prev;
250split_chunk(chunk_t *
const chunk,
size_t offset)
252 offset = ALIGN(offset);
254 if(offset +
sizeof(chunk_t) < chunk->size) {
255 chunk_t *new_chunk = (chunk_t *)(GET_PTR(chunk) + offset);
256 new_chunk->size = chunk->size -
sizeof(chunk_t) - offset;
257 new_chunk->flags = 0;
258 free_chunk(new_chunk);
260 chunk->size = offset;
261 chunk->next = chunk->prev = NULL;
268coalesce_chunks(chunk_t *chunk)
270 for(chunk_t *next = NEXT_CHUNK(chunk);
271 (
char *)next < &heap_base[heap_usage] && CHUNK_FREE(next);
272 next = NEXT_CHUNK(next)) {
273 chunk->size +=
sizeof(chunk_t) + next->size;
274 LOG_DBG(
"Coalesce chunk of %zu bytes\n", next->size);
275 remove_chunk_from_free_list(next);
285 int i = CHUNK_SEARCH_MAX;
286 for(chunk_t *chunk = free_list; chunk != NULL; chunk = chunk->next) {
290 coalesce_chunks(chunk);
297get_free_chunk(
const size_t size)
302 chunk_t *best = NULL;
304 int i = CHUNK_SEARCH_MAX;
305 for(chunk_t *chunk = free_list; chunk != NULL; chunk = chunk->next) {
312 if(size <= chunk->size) {
313 if(best == NULL || chunk->size < best->size) {
316 if(best->size == size) {
326 remove_chunk_from_free_list(best);
327 split_chunk(best, size);
343 if(zone_size > zones[HEAPMEM_ZONE_GENERAL].zone_size) {
344 LOG_ERR(
"Too large zone allocation limit: %zu\n", zone_size);
345 return HEAPMEM_ZONE_INVALID;
346 }
else if(name == NULL || zone_size == 0) {
347 return HEAPMEM_ZONE_INVALID;
350 for(heapmem_zone_t i = HEAPMEM_ZONE_GENERAL + 1; i < HEAPMEM_MAX_ZONES; i++) {
351 if(zones[i].name == NULL) {
353 zones[i].name = name;
354 zones[i].zone_size = zone_size;
357 zones[HEAPMEM_ZONE_GENERAL].zone_size -= zone_size;
358 LOG_INFO(
"Registered zone \"%s\" with ID %u\n", name, i);
360 }
else if(strcmp(zones[i].name, name) == 0) {
361 LOG_ERR(
"Duplicate zone registration: %s\n", name);
362 return HEAPMEM_ZONE_INVALID;
366 LOG_ERR(
"Cannot allocate more zones\n");
368 return HEAPMEM_ZONE_INVALID;
387heapmem_zone_alloc_debug(heapmem_zone_t zone,
size_t size,
388 const char *file,
const unsigned line)
393 if(zone >= HEAPMEM_MAX_ZONES || zones[zone].name == NULL) {
394 LOG_WARN(
"Attempt to allocate from invalid zone: %u\n", zone);
398 if(size > HEAPMEM_ARENA_SIZE || size == 0) {
404 if(
sizeof(chunk_t) + size >
405 zones[zone].zone_size - zones[zone].allocated) {
406 LOG_ERR(
"Cannot allocate %zu bytes because of the zone limit\n", size);
410 chunk_t *chunk = get_free_chunk(size);
412 chunk = extend_space(
sizeof(chunk_t) + size);
419 chunk->flags = CHUNK_FLAG_ALLOCATED;
426 LOG_DBG(
"%s ptr %p size %zu\n", __func__, GET_PTR(chunk), size);
429 zones[zone].allocated +=
sizeof(chunk_t) + size;
431 return GET_PTR(chunk);
447heapmem_free_debug(
void *ptr,
const char *file,
const unsigned line)
454 LOG_WARN(
"%s: ptr %p is not in the heap\n", __func__, ptr);
459 chunk_t *chunk = GET_CHUNK(ptr);
460 if(!CHUNK_ALLOCATED(chunk)) {
461 LOG_WARN(
"%s: ptr %p has already been deallocated\n", __func__, ptr);
466 LOG_DBG(
"%s: ptr %p, allocated at %s:%u\n", __func__, ptr,
467 chunk->file, chunk->line);
470 zones[chunk->zone].allocated -=
sizeof(chunk_t) + chunk->size;
495heapmem_realloc_debug(
void *ptr,
size_t size,
496 const char *file,
const unsigned line)
503 if(ptr != NULL && !IN_HEAP(ptr)) {
504 LOG_WARN(
"%s: ptr %p is not in the heap\n", __func__, ptr);
509 LOG_DBG(
"%s: ptr %p size %zu at %s:%u\n",
510 __func__, ptr, size, file, line);
514 if(size > HEAPMEM_ARENA_SIZE) {
521 }
else if(size == 0) {
526 chunk_t *chunk = GET_CHUNK(ptr);
527 if(!CHUNK_ALLOCATED(chunk)) {
528 LOG_WARN(
"%s: ptr %p is not allocated\n", __func__, ptr);
538 int size_adj = size - chunk->size;
543 split_chunk(chunk, size);
544 zones[chunk->zone].allocated += size_adj;
549 if(IS_LAST_CHUNK(chunk)) {
555 if(extend_space(size_adj) != NULL) {
557 zones[chunk->zone].allocated += size_adj;
566 coalesce_chunks(chunk);
567 if(chunk->size >= size) {
570 split_chunk(chunk, size);
571 zones[chunk->zone].allocated += size_adj;
587 memcpy(newptr, ptr, chunk->size);
597heapmem_calloc_debug(
size_t nmemb,
size_t size,
598 const char *file,
const unsigned line)
603 size_t total_size = nmemb * size;
606 if(size == 0 || total_size / size != nmemb) {
612 memset(ptr, 0, total_size);
621 memset(stats, 0,
sizeof(*stats));
623 for(chunk_t *chunk = (chunk_t *)heap_base;
624 (
char *)chunk < &heap_base[heap_usage];
625 chunk = NEXT_CHUNK(chunk)) {
626 if(CHUNK_ALLOCATED(chunk)) {
627 stats->allocated += chunk->size;
628 stats->overhead +=
sizeof(chunk_t);
630 coalesce_chunks(chunk);
631 stats->available += chunk->size;
634 stats->available += HEAPMEM_ARENA_SIZE - heap_usage;
635 stats->footprint = heap_usage;
636 stats->max_footprint = max_heap_usage;
637 stats->chunks = stats->overhead /
sizeof(chunk_t);
645 heapmem_stats_t stats;
648 HEAPMEM_PRINTF(
"* HeapMem statistics\n");
649 HEAPMEM_PRINTF(
"* Allocated memory: %zu\n", stats.allocated);
650 HEAPMEM_PRINTF(
"* Available memory: %zu\n", stats.available);
651 HEAPMEM_PRINTF(
"* Heap usage: %zu\n", stats.footprint);
652 HEAPMEM_PRINTF(
"* Max heap usage: %zu\n", stats.max_footprint);
653 HEAPMEM_PRINTF(
"* Allocated chunks: %zu\n", stats.chunks);
654 HEAPMEM_PRINTF(
"* Chunk size: %zu\n",
sizeof(chunk_t));
655 HEAPMEM_PRINTF(
"* Total chunk overhead: %zu\n", stats.overhead);
658 HEAPMEM_PRINTF(
"* Allocated chunks:\n");
659 for(chunk_t *chunk = (chunk_t *)heap_base;
660 (
char *)chunk < &heap_base[heap_usage];
661 chunk = NEXT_CHUNK(chunk)) {
662 if(CHUNK_ALLOCATED(chunk)) {
664 HEAPMEM_PRINTF(
"* Chunk: heap offset %"PRIuPTR
", obj %p, flags 0x%x (%s:%u)\n",
665 (uintptr_t)((
char *)chunk - (
char *)heap_base),
666 GET_PTR(chunk), chunk->flags, chunk->file, chunk->line);
668 HEAPMEM_PRINTF(
"* Chunk: heap offset %"PRIuPTR
", obj %p, flags 0x%x\n",
669 (uintptr_t)((
char *)chunk - (
char *)heap_base),
670 GET_PTR(chunk), chunk->flags);
681 return HEAPMEM_ALIGNMENT;
Default definitions of C compiler quirk work-arounds.
void * heapmem_zone_alloc(heapmem_zone_t zone, size_t size)
Allocate a chunk of memory in the heap.
void * heapmem_realloc(void *ptr, size_t size)
Reallocate a chunk of memory in the heap.
#define heapmem_alloc(size)
Allocate a chunk of memory in the general zone of the heap.
heapmem_zone_t heapmem_zone_register(const char *name, size_t zone_size)
Register a zone with a reserved subdivision of the heap.
void heapmem_stats(heapmem_stats_t *stats)
Obtain internal heapmem statistics regarding the allocated chunks.
size_t heapmem_alignment(void)
Obtain the minimum alignment of allocated addresses.
bool heapmem_free(void *ptr)
Deallocate a chunk of memory.
void heapmem_print_debug_info(bool print_chunks)
Print debugging information for the heap memory management.
void * heapmem_calloc(size_t nmemb, size_t size)
Allocate memory for a zero-initialized array.
Header file for the dynamic heap memory allocator.
Header file for the logging system.