47#ifdef HEAPMEM_CONF_ARENA_SIZE
51#define LOG_MODULE "HeapMem"
52#define LOG_LEVEL LOG_LEVEL_WARN
56#define HEAPMEM_ARENA_SIZE HEAPMEM_CONF_ARENA_SIZE
64#ifdef HEAPMEM_CONF_SEARCH_MAX
65#define CHUNK_SEARCH_MAX HEAPMEM_CONF_SEARCH_MAX
67#define CHUNK_SEARCH_MAX 16
74#ifdef HEAPMEM_CONF_REALLOC
75#define HEAPMEM_REALLOC HEAPMEM_CONF_REALLOC
77#define HEAPMEM_REALLOC 1
80#if __STDC_VERSION__ >= 201112L
82#define HEAPMEM_DEFAULT_ALIGNMENT alignof(max_align_t)
84#define HEAPMEM_DEFAULT_ALIGNMENT sizeof(size_t)
91#ifdef HEAPMEM_CONF_ALIGNMENT
92#define HEAPMEM_ALIGNMENT HEAPMEM_CONF_ALIGNMENT
94#define HEAPMEM_ALIGNMENT HEAPMEM_DEFAULT_ALIGNMENT
98 (((size) + (HEAPMEM_ALIGNMENT - 1)) & ~(HEAPMEM_ALIGNMENT - 1))
101#define NEXT_CHUNK(chunk) \
102 ((chunk_t *)((char *)(chunk) + sizeof(chunk_t) + (chunk)->size))
103#define IS_LAST_CHUNK(chunk) \
104 ((char *)NEXT_CHUNK(chunk) == &heap_base[heap_usage])
108#define GET_CHUNK(ptr) \
109 ((chunk_t *)((char *)(ptr) - sizeof(chunk_t)))
110#define GET_PTR(chunk) \
111 (char *)((chunk) + 1)
114#define CHUNK_FLAG_ALLOCATED 0x1
116#define CHUNK_ALLOCATED(chunk) \
117 ((chunk)->flags & CHUNK_FLAG_ALLOCATED)
118#define CHUNK_FREE(chunk) \
119 (~(chunk)->flags & CHUNK_FLAG_ALLOCATED)
136#ifdef HEAPMEM_CONF_MAX_ZONES
137#define HEAPMEM_MAX_ZONES HEAPMEM_CONF_MAX_ZONES
139#define HEAPMEM_MAX_ZONES 1
142#if HEAPMEM_MAX_ZONES < 1
143#error At least one HeapMem zone must be configured.
146static struct heapmem_zone zones[HEAPMEM_MAX_ZONES] = {
147 {.name =
"GENERAL", .zone_size = HEAPMEM_ARENA_SIZE}
155typedef struct chunk {
169static char heap_base[HEAPMEM_ARENA_SIZE] CC_ALIGN(HEAPMEM_ALIGNMENT);
170static size_t heap_usage;
172static chunk_t *first_chunk = (chunk_t *)heap_base;
173static chunk_t *free_list;
175#define IN_HEAP(ptr) ((char *)(ptr) >= (char *)heap_base) && \
176 ((char *)(ptr) < (char *)heap_base + heap_usage)
181extend_space(
size_t size)
183 if(size > HEAPMEM_ARENA_SIZE - heap_usage) {
187 char *old_usage = &heap_base[heap_usage];
195free_chunk(chunk_t *
const chunk)
197 chunk->flags &= ~CHUNK_FLAG_ALLOCATED;
199 if(IS_LAST_CHUNK(chunk)) {
201 heap_usage -=
sizeof(chunk_t) + chunk->size;
205 chunk->next = free_list;
206 if(free_list != NULL) {
207 free_list->prev = chunk;
216remove_chunk_from_free_list(chunk_t *
const chunk)
218 if(chunk == free_list) {
219 free_list = chunk->next;
220 if(free_list != NULL) {
221 free_list->prev = NULL;
224 chunk->prev->next = chunk->next;
227 if(chunk->next != NULL) {
228 chunk->next->prev = chunk->prev;
238split_chunk(chunk_t *
const chunk,
size_t offset)
240 offset = ALIGN(offset);
242 if(offset +
sizeof(chunk_t) < chunk->size) {
243 chunk_t *new_chunk = (chunk_t *)(GET_PTR(chunk) + offset);
244 new_chunk->size = chunk->size -
sizeof(chunk_t) - offset;
245 new_chunk->flags = 0;
246 free_chunk(new_chunk);
248 chunk->size = offset;
249 chunk->next = chunk->prev = NULL;
256coalesce_chunks(chunk_t *chunk)
258 for(chunk_t *next = NEXT_CHUNK(chunk);
259 (
char *)next < &heap_base[heap_usage] && CHUNK_FREE(next);
260 next = NEXT_CHUNK(next)) {
261 chunk->size +=
sizeof(chunk_t) + next->size;
262 LOG_DBG(
"Coalesce chunk of %zu bytes\n", next->size);
263 remove_chunk_from_free_list(next);
273 int i = CHUNK_SEARCH_MAX;
274 for(chunk_t *chunk = free_list; chunk != NULL; chunk = chunk->next) {
278 coalesce_chunks(chunk);
285get_free_chunk(
const size_t size)
290 chunk_t *best = NULL;
292 int i = CHUNK_SEARCH_MAX;
293 for(chunk_t *chunk = free_list; chunk != NULL; chunk = chunk->next) {
302 if(size <= chunk->size) {
303 if(best == NULL || chunk->size < best->size) {
306 if(best->size == size) {
315 remove_chunk_from_free_list(best);
316 split_chunk(best, size);
332 if(zone_size > zones[HEAPMEM_ZONE_GENERAL].zone_size) {
333 LOG_ERR(
"Too large zone allocation limit: %zu\n", zone_size);
334 return HEAPMEM_ZONE_INVALID;
335 }
else if(name == NULL || zone_size == 0) {
336 return HEAPMEM_ZONE_INVALID;
339 for(heapmem_zone_t i = HEAPMEM_ZONE_GENERAL + 1; i < HEAPMEM_MAX_ZONES; i++) {
340 if(zones[i].name == NULL) {
342 zones[i].name = name;
343 zones[i].zone_size = zone_size;
346 zones[HEAPMEM_ZONE_GENERAL].zone_size -= zone_size;
347 LOG_INFO(
"Registered zone \"%s\" with ID %u\n", name, i);
349 }
else if(strcmp(zones[i].name, name) == 0) {
350 LOG_ERR(
"Duplicate zone registration: %s\n", name);
351 return HEAPMEM_ZONE_INVALID;
355 LOG_ERR(
"Cannot allocate more zones\n");
357 return HEAPMEM_ZONE_INVALID;
376heapmem_zone_alloc_debug(heapmem_zone_t zone,
size_t size,
377 const char *file,
const unsigned line)
383 if(size > HEAPMEM_ARENA_SIZE) {
387 if(zone >= HEAPMEM_MAX_ZONES || zones[zone].name == NULL) {
388 LOG_WARN(
"Attempt to allocate from invalid zone: %u\n", zone);
394 if(
sizeof(chunk_t) + size >
395 zones[zone].zone_size - zones[zone].allocated) {
396 LOG_ERR(
"Cannot allocate %zu bytes because of the zone limit\n", size);
400 chunk_t *chunk = get_free_chunk(size);
402 chunk = extend_space(
sizeof(chunk_t) + size);
409 chunk->flags = CHUNK_FLAG_ALLOCATED;
416 LOG_DBG(
"%s ptr %p size %zu\n", __func__, GET_PTR(chunk), size);
419 zones[zone].allocated +=
sizeof(chunk_t) + size;
421 return GET_PTR(chunk);
439heapmem_free_debug(
void *ptr,
const char *file,
const unsigned line)
445 LOG_WARN(
"%s: ptr %p is not in the heap\n", __func__, ptr);
449 chunk_t *chunk = GET_CHUNK(ptr);
450 if(!CHUNK_ALLOCATED(chunk)) {
451 LOG_WARN(
"%s: ptr %p has already been deallocated\n", __func__, ptr);
456 LOG_DBG(
"%s: ptr %p, allocated at %s:%u\n", __func__, ptr,
457 chunk->file, chunk->line);
460 zones[chunk->zone].allocated -=
sizeof(chunk_t) + chunk->size;
485heapmem_realloc_debug(
void *ptr,
size_t size,
486 const char *file,
const unsigned line)
493 if(ptr != NULL && !IN_HEAP(ptr)) {
494 LOG_WARN(
"%s: ptr %p is not in the heap\n", __func__, ptr);
499 LOG_DBG(
"%s: ptr %p size %zu at %s:%u\n",
500 __func__, ptr, size, file, line);
504 if(size > HEAPMEM_ARENA_SIZE) {
511 }
else if(size == 0) {
516 chunk_t *chunk = GET_CHUNK(ptr);
517 if(!CHUNK_ALLOCATED(chunk)) {
518 LOG_WARN(
"%s: ptr %p is not allocated\n", __func__, ptr);
528 int size_adj = size - chunk->size;
533 split_chunk(chunk, size);
534 zones[chunk->zone].allocated += size_adj;
539 if(IS_LAST_CHUNK(chunk)) {
545 if(extend_space(size_adj) != NULL) {
547 zones[chunk->zone].allocated += size_adj;
556 coalesce_chunks(chunk);
557 if(chunk->size >= size) {
560 split_chunk(chunk, size);
561 zones[chunk->zone].allocated += size_adj;
577 memcpy(newptr, ptr, chunk->size);
588 memset(stats, 0,
sizeof(*stats));
590 for(chunk_t *chunk = first_chunk;
591 (
char *)chunk < &heap_base[heap_usage];
592 chunk = NEXT_CHUNK(chunk)) {
593 if(CHUNK_ALLOCATED(chunk)) {
594 stats->allocated += chunk->size;
595 stats->overhead +=
sizeof(chunk_t);
597 coalesce_chunks(chunk);
598 stats->available += chunk->size;
601 stats->available += HEAPMEM_ARENA_SIZE - heap_usage;
602 stats->footprint = heap_usage;
603 stats->chunks = stats->overhead /
sizeof(chunk_t);
610 return HEAPMEM_ALIGNMENT;
Default definitions of C compiler quirk work-arounds.
void * heapmem_zone_alloc(heapmem_zone_t zone, size_t size)
Allocate a chunk of memory in the heap.
void * heapmem_realloc(void *ptr, size_t size)
Reallocate a chunk of memory in the heap.
#define heapmem_alloc(size)
Allocate a chunk of memory in the general zone of the heap.
heapmem_zone_t heapmem_zone_register(const char *name, size_t zone_size)
Register a zone with a reserved subdivision of the heap.
void heapmem_stats(heapmem_stats_t *stats)
Obtain internal heapmem statistics regarding the allocated chunks.
size_t heapmem_alignment(void)
Obtain the minimum alignment of allocated addresses.
bool heapmem_free(void *ptr)
Deallocate a chunk of memory.
Header file for the dynamic heap memory allocator.
Header file for the logging system.