Contiki-NG
Loading...
Searching...
No Matches
heapmem.c
Go to the documentation of this file.
1/*
2 * Copyright (c) 2005, Nicolas Tsiftes
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of the contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
20 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
24 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
27 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31/**
32 * \file
33 * HeapMem: a dynamic memory allocation module for
34 * resource-constrained devices.
35 * \author
36 * Nicolas Tsiftes <nvt@acm.org>
37 */
38
39#include <stddef.h>
40#include <stdint.h>
41#include <string.h>
42
43#include "contiki.h"
44#include "lib/heapmem.h"
45#include "sys/cc.h"
46
47#ifdef HEAPMEM_CONF_ARENA_SIZE
48
49/* Log configuration */
50#include "sys/log.h"
51#define LOG_MODULE "HeapMem"
52#define LOG_LEVEL LOG_LEVEL_WARN
53
54/* The HEAPMEM_CONF_PRINTF function determines which function to use for
55 printing debug information. */
56#ifndef HEAPMEM_CONF_PRINTF
57#include <stdio.h>
58#define HEAPMEM_PRINTF printf
59#else
60#define HEAPMEM_PRINTF HEAPMEM_CONF_PRINTF
61#endif /* !HEAPMEM_CONF_PRINTF */
62
63/* The HEAPMEM_CONF_ARENA_SIZE parameter determines the size of the
64 space that will be statically allocated in this module. */
65#define HEAPMEM_ARENA_SIZE HEAPMEM_CONF_ARENA_SIZE
66
67/*
68 * The HEAPMEM_CONF_SEARCH_MAX parameter limits the time spent on
69 * chunk allocation and defragmentation. The lower this number is, the
70 * faster the operations become. The cost of this speedup, however, is
71 * that the space overhead might increase.
72 */
73#ifdef HEAPMEM_CONF_SEARCH_MAX
74#define CHUNK_SEARCH_MAX HEAPMEM_CONF_SEARCH_MAX
75#else
76#define CHUNK_SEARCH_MAX 16
77#endif /* HEAPMEM_CONF_SEARCH_MAX */
78
79/*
80 * The HEAPMEM_CONF_REALLOC parameter determines whether
81 * heapmem_realloc() is enabled (non-zero value) or not (zero value).
82 */
83#ifdef HEAPMEM_CONF_REALLOC
84#define HEAPMEM_REALLOC HEAPMEM_CONF_REALLOC
85#else
86#define HEAPMEM_REALLOC 1
87#endif /* HEAPMEM_CONF_REALLOC */
88
89#if __STDC_VERSION__ >= 201112L
90#include <stdalign.h>
91#define HEAPMEM_DEFAULT_ALIGNMENT alignof(max_align_t)
92#else
93#define HEAPMEM_DEFAULT_ALIGNMENT sizeof(size_t)
94#endif
95
96/* The HEAPMEM_CONF_ALIGNMENT parameter determines the minimum
97 alignment for allocated data. */
98#ifdef HEAPMEM_CONF_ALIGNMENT
99#define HEAPMEM_ALIGNMENT HEAPMEM_CONF_ALIGNMENT
100#else
101#define HEAPMEM_ALIGNMENT HEAPMEM_DEFAULT_ALIGNMENT
102#endif /* HEAPMEM_CONF_ALIGNMENT */
103
104#define ALIGN(size) \
105 (((size) + (HEAPMEM_ALIGNMENT - 1)) & ~(HEAPMEM_ALIGNMENT - 1))
106
107/* Macros for chunk iteration. */
108#define NEXT_CHUNK(chunk) \
109 ((chunk_t *)((char *)(chunk) + sizeof(chunk_t) + (chunk)->size))
110#define IS_LAST_CHUNK(chunk) \
111 ((char *)NEXT_CHUNK(chunk) == &heap_base[heap_usage])
112
113/* Macros for retrieving the data pointer from a chunk, and the other
114 way around. */
115#define GET_CHUNK(ptr) \
116 ((chunk_t *)((char *)(ptr) - sizeof(chunk_t)))
117#define GET_PTR(chunk) \
118 (char *)((chunk) + 1)
119
120/* Macros for determining the status of a chunk. */
121#define CHUNK_FLAG_ALLOCATED 0x1
122
123#define CHUNK_ALLOCATED(chunk) \
124 ((chunk)->flags & CHUNK_FLAG_ALLOCATED)
125#define CHUNK_FREE(chunk) \
126 (~(chunk)->flags & CHUNK_FLAG_ALLOCATED)
127
128/*
129 * A heapmem zone denotes a logical subdivision of the heap that is
130 * dedicated to a specific purpose. The concept of zones can help
131 * developers to maintain various memory management strategies for
132 * embedded systems (e.g., by having a fixed memory space for packet
133 * buffers), yet maintains a level of dynamism offered by a
134 * malloc-like API. The rest of the heap area that is not dedicated to
135 * a specific zone belongs to the "GENERAL" zone, which can be used by
136 * any module.
137 */
138struct heapmem_zone {
139 const char *name;
140 size_t zone_size;
141 size_t allocated;
142};
143
144#ifdef HEAPMEM_CONF_MAX_ZONES
145#define HEAPMEM_MAX_ZONES HEAPMEM_CONF_MAX_ZONES
146#else
147#define HEAPMEM_MAX_ZONES 1
148#endif
149
150#if HEAPMEM_MAX_ZONES < 1
151#error At least one HeapMem zone must be configured.
152#endif
153
154static struct heapmem_zone zones[HEAPMEM_MAX_ZONES] = {
155 {.name = "GENERAL", .zone_size = HEAPMEM_ARENA_SIZE}
156};
157
158/*
159 * We use a double-linked list of chunks, with a slight space overhead
160 * compared to a single-linked list, but with the advantage of having
161 * much faster list removals.
162 */
163typedef struct chunk {
164 struct chunk *prev;
165 struct chunk *next;
166 size_t size;
167 uint8_t flags;
168 heapmem_zone_t zone;
169#if HEAPMEM_DEBUG
170 const char *file;
171 unsigned line;
172#endif
173} chunk_t;
174
175/* All allocated space is located within a heap, which is
176 statically allocated with a configurable size. */
177static char heap_base[HEAPMEM_ARENA_SIZE] CC_ALIGN(HEAPMEM_ALIGNMENT);
178static size_t heap_usage;
179static size_t max_heap_usage;
180
181static chunk_t *free_list;
182
183#define IN_HEAP(ptr) ((ptr) != NULL && \
184 (char *)(ptr) >= (char *)heap_base) && \
185 ((char *)(ptr) < (char *)heap_base + heap_usage)
186
187/* extend_space: Increases the current footprint used in the heap, and
188 returns a pointer to the old end. */
189static void *
190extend_space(size_t size)
191{
192 if(size > HEAPMEM_ARENA_SIZE - heap_usage) {
193 return NULL;
194 }
195
196 char *old_usage = &heap_base[heap_usage];
197 heap_usage += size;
198 if(heap_usage > max_heap_usage) {
199 max_heap_usage = heap_usage;
200 }
201
202 return old_usage;
203}
204
205/* free_chunk: Mark a chunk as being free, and put it on the free list. */
206static void
207free_chunk(chunk_t * const chunk)
208{
209 chunk->flags &= ~CHUNK_FLAG_ALLOCATED;
210
211 if(IS_LAST_CHUNK(chunk)) {
212 /* Release the chunk back into the wilderness. */
213 heap_usage -= sizeof(chunk_t) + chunk->size;
214 } else {
215 /* Put the chunk on the free list. */
216 chunk->prev = NULL;
217 chunk->next = free_list;
218 if(free_list != NULL) {
219 free_list->prev = chunk;
220 }
221 free_list = chunk;
222 }
223}
224
225/* remove_chunk_from_free_list: Mark a chunk as being allocated, and
226 remove it from the free list. */
227static void
228remove_chunk_from_free_list(chunk_t * const chunk)
229{
230 if(chunk == free_list) {
231 free_list = chunk->next;
232 if(free_list != NULL) {
233 free_list->prev = NULL;
234 }
235 } else {
236 chunk->prev->next = chunk->next;
237 }
238
239 if(chunk->next != NULL) {
240 chunk->next->prev = chunk->prev;
241 }
242}
243
244/*
245 * split_chunk: When allocating a chunk, we may have found one that is
246 * larger than needed, so this function is called to keep the rest of
247 * the original chunk free.
248 */
249static void
250split_chunk(chunk_t * const chunk, size_t offset)
251{
252 offset = ALIGN(offset);
253
254 if(offset + sizeof(chunk_t) < chunk->size) {
255 chunk_t *new_chunk = (chunk_t *)(GET_PTR(chunk) + offset);
256 new_chunk->size = chunk->size - sizeof(chunk_t) - offset;
257 new_chunk->flags = 0;
258 free_chunk(new_chunk);
259
260 chunk->size = offset;
261 chunk->next = chunk->prev = NULL;
262 }
263}
264
265/* coalesce_chunks: Coalesce a specific free chunk with as many
266 adjacent free chunks as possible. */
267static void
268coalesce_chunks(chunk_t *chunk)
269{
270 for(chunk_t *next = NEXT_CHUNK(chunk);
271 (char *)next < &heap_base[heap_usage] && CHUNK_FREE(next);
272 next = NEXT_CHUNK(next)) {
273 chunk->size += sizeof(chunk_t) + next->size;
274 LOG_DBG("Coalesce chunk of %zu bytes\n", next->size);
275 remove_chunk_from_free_list(next);
276 }
277}
278
279/* defrag_chunks: Scan the free list for chunks that can be coalesced,
280 and stop within a bounded time. */
281static void
282defrag_chunks(void)
283{
284 /* Limit the time we spend on searching the free list. */
285 int i = CHUNK_SEARCH_MAX;
286 for(chunk_t *chunk = free_list; chunk != NULL; chunk = chunk->next) {
287 if(i-- == 0) {
288 break;
289 }
290 coalesce_chunks(chunk);
291 }
292}
293
294/* get_free_chunk: Search the free list for the most suitable chunk,
295 as determined by its size, to satisfy an allocation request. */
296static chunk_t *
297get_free_chunk(const size_t size)
298{
299 /* Defragment chunks only right before they are needed for allocation. */
300 defrag_chunks();
301
302 chunk_t *best = NULL;
303 /* Limit the time we spend on searching the free list. */
304 int i = CHUNK_SEARCH_MAX;
305 for(chunk_t *chunk = free_list; chunk != NULL; chunk = chunk->next) {
306 if(i-- == 0) {
307 break;
308 }
309
310 /* To avoid fragmenting large chunks, we select the chunk with the
311 smallest size that is larger than or equal to the requested size. */
312 if(size <= chunk->size) {
313 if(best == NULL || chunk->size < best->size) {
314 best = chunk;
315 }
316 if(best->size == size) {
317 /* We found a perfect chunk -- stop the search. */
318 break;
319 }
320 }
321 }
322
323 if(best != NULL) {
324 /* We found a chunk that can hold an object of the requested
325 allocation size. Split it if possible. */
326 remove_chunk_from_free_list(best);
327 split_chunk(best, size);
328 }
329
330 return best;
331}
332
333/*
334 * heapmem_zone_register: Register a new zone, which is essentially a
335 * subdivision of the heap with a reserved allocation space. This
336 * feature ensures that certain modules can get a dedicated heap for
337 * prioritized memory -- unlike what can be attained when allocating
338 * from the general zone.
339 */
340heapmem_zone_t
341heapmem_zone_register(const char *name, size_t zone_size)
342{
343 if(zone_size > zones[HEAPMEM_ZONE_GENERAL].zone_size) {
344 LOG_ERR("Too large zone allocation limit: %zu\n", zone_size);
345 return HEAPMEM_ZONE_INVALID;
346 } else if(name == NULL || zone_size == 0) {
347 return HEAPMEM_ZONE_INVALID;
348 }
349
350 for(heapmem_zone_t i = HEAPMEM_ZONE_GENERAL + 1; i < HEAPMEM_MAX_ZONES; i++) {
351 if(zones[i].name == NULL) {
352 /* Found a free slot. */
353 zones[i].name = name;
354 zones[i].zone_size = zone_size;
355 /* The general zone has a lower priority than registered zones,
356 so we transfer a part of the general zone to this one. */
357 zones[HEAPMEM_ZONE_GENERAL].zone_size -= zone_size;
358 LOG_INFO("Registered zone \"%s\" with ID %u\n", name, i);
359 return i;
360 } else if(strcmp(zones[i].name, name) == 0) {
361 LOG_ERR("Duplicate zone registration: %s\n", name);
362 return HEAPMEM_ZONE_INVALID;
363 }
364 }
365
366 LOG_ERR("Cannot allocate more zones\n");
367
368 return HEAPMEM_ZONE_INVALID;
369}
370
371/*
372 * heapmem_alloc: Allocate an object of the specified size, returning
373 * a pointer to it in case of success, and NULL in case of failure.
374 *
375 * When allocating memory, heapmem_alloc() will first try to find a
376 * free chunk of the same size as the requested one. If none can be
377 * found, we pick a larger chunk that is as close in size as possible,
378 * and possibly split it so that the remaining part becomes a chunk
379 * available for allocation. At most CHUNK_SEARCH_MAX chunks on the
380 * free list will be examined.
381 *
382 * As a last resort, heapmem_alloc() will try to extend the heap
383 * space, and thereby create a new chunk available for use.
384 */
385void *
386#if HEAPMEM_DEBUG
387heapmem_zone_alloc_debug(heapmem_zone_t zone, size_t size,
388 const char *file, const unsigned line)
389#else
390heapmem_zone_alloc(heapmem_zone_t zone, size_t size)
391#endif
392{
393 if(zone >= HEAPMEM_MAX_ZONES || zones[zone].name == NULL) {
394 LOG_WARN("Attempt to allocate from invalid zone: %u\n", zone);
395 return NULL;
396 }
397
398 if(size > HEAPMEM_ARENA_SIZE || size == 0) {
399 return NULL;
400 }
401
402 size = ALIGN(size);
403
404 if(sizeof(chunk_t) + size >
405 zones[zone].zone_size - zones[zone].allocated) {
406 LOG_ERR("Cannot allocate %zu bytes because of the zone limit\n", size);
407 return NULL;
408 }
409
410 chunk_t *chunk = get_free_chunk(size);
411 if(chunk == NULL) {
412 chunk = extend_space(sizeof(chunk_t) + size);
413 if(chunk == NULL) {
414 return NULL;
415 }
416 chunk->size = size;
417 }
418
419 chunk->flags = CHUNK_FLAG_ALLOCATED;
420
421#if HEAPMEM_DEBUG
422 chunk->file = file;
423 chunk->line = line;
424#endif
425
426 LOG_DBG("%s ptr %p size %zu\n", __func__, GET_PTR(chunk), size);
427
428 chunk->zone = zone;
429 zones[zone].allocated += sizeof(chunk_t) + size;
430
431 return GET_PTR(chunk);
432}
433
434/*
435 * heapmem_free: Deallocate a previously allocated object.
436 *
437 * The pointer must exactly match one returned from an earlier call
438 * from heapmem_alloc or heapmem_realloc, without any call to
439 * heapmem_free in between.
440 *
441 * When deallocating a chunk, the chunk will be inserted into the free
442 * list. Moreover, all free chunks that are adjacent in memory will be
443 * merged into a single chunk in order to mitigate fragmentation.
444 */
445bool
446#if HEAPMEM_DEBUG
447heapmem_free_debug(void *ptr, const char *file, const unsigned line)
448#else
449heapmem_free(void *ptr)
450#endif
451{
452 if(!IN_HEAP(ptr)) {
453 if(ptr) {
454 LOG_WARN("%s: ptr %p is not in the heap\n", __func__, ptr);
455 }
456 return false;
457 }
458
459 chunk_t *chunk = GET_CHUNK(ptr);
460 if(!CHUNK_ALLOCATED(chunk)) {
461 LOG_WARN("%s: ptr %p has already been deallocated\n", __func__, ptr);
462 return false;
463 }
464
465#if HEAPMEM_DEBUG
466 LOG_DBG("%s: ptr %p, allocated at %s:%u\n", __func__, ptr,
467 chunk->file, chunk->line);
468#endif
469
470 zones[chunk->zone].allocated -= sizeof(chunk_t) + chunk->size;
471
472 free_chunk(chunk);
473 return true;
474}
475
476#if HEAPMEM_REALLOC
477/*
478 * heapmem_realloc: Reallocate an object with a different size,
479 * possibly moving it in memory. In case of success, the function
480 * returns a pointer to the object's new location. In case of failure,
481 * it returns NULL.
482 *
483 * If the size of the new chunk is larger than that of the allocated
484 * chunk, heapmem_realloc() will first attempt to extend the currently
485 * allocated chunk. If the adjacent memory is not free,
486 * heapmem_realloc() will attempt to allocate a completely new chunk,
487 * copy the old data to the new chunk, and deallocate the old chunk.
488 *
489 * If the size of the new chunk is smaller than the allocated one, we
490 * split the allocated chunk if the remaining chunk would be large
491 * enough to justify the overhead of creating a new chunk.
492 */
493void *
494#if HEAPMEM_DEBUG
495heapmem_realloc_debug(void *ptr, size_t size,
496 const char *file, const unsigned line)
497#else
498heapmem_realloc(void *ptr, size_t size)
499#endif
500{
501 /* Allow the special case of ptr being NULL as an alias
502 for heapmem_alloc(). */
503 if(ptr != NULL && !IN_HEAP(ptr)) {
504 LOG_WARN("%s: ptr %p is not in the heap\n", __func__, ptr);
505 return NULL;
506 }
507
508#if HEAPMEM_DEBUG
509 LOG_DBG("%s: ptr %p size %zu at %s:%u\n",
510 __func__, ptr, size, file, line);
511#endif
512
513 /* Fail early on too large allocation requests to prevent wrapping values. */
514 if(size > HEAPMEM_ARENA_SIZE) {
515 return NULL;
516 }
517
518 /* Special cases in which we can hand off the execution to other functions. */
519 if(ptr == NULL) {
520 return heapmem_alloc(size);
521 } else if(size == 0) {
522 heapmem_free(ptr);
523 return NULL;
524 }
525
526 chunk_t *chunk = GET_CHUNK(ptr);
527 if(!CHUNK_ALLOCATED(chunk)) {
528 LOG_WARN("%s: ptr %p is not allocated\n", __func__, ptr);
529 return NULL;
530 }
531
532#if HEAPMEM_DEBUG
533 chunk->file = file;
534 chunk->line = line;
535#endif
536
537 size = ALIGN(size);
538 int size_adj = size - chunk->size;
539
540 if(size_adj <= 0) {
541 /* Request to make the object smaller or to keep its size.
542 In the former case, the chunk will be split if possible. */
543 split_chunk(chunk, size);
544 zones[chunk->zone].allocated += size_adj;
545 return ptr;
546 }
547
548 /* Request to make the object larger. (size_adj > 0) */
549 if(IS_LAST_CHUNK(chunk)) {
550 /*
551 * If the object belongs to the last allocated chunk (i.e., the
552 * one before the end of the heap footprint, we just attempt to
553 * extend the heap.
554 */
555 if(extend_space(size_adj) != NULL) {
556 chunk->size = size;
557 zones[chunk->zone].allocated += size_adj;
558 return ptr;
559 }
560 } else {
561 /*
562 * Here we attempt to enlarge an allocated object, whose
563 * adjacent space may already be allocated. We attempt to
564 * coalesce chunks in order to make as much room as possible.
565 */
566 coalesce_chunks(chunk);
567 if(chunk->size >= size) {
568 /* There was enough free adjacent space to extend the chunk in
569 its current place. */
570 split_chunk(chunk, size);
571 zones[chunk->zone].allocated += size_adj;
572 return ptr;
573 }
574 }
575
576 /*
577 * Failed to enlarge the object in its current place, since the
578 * adjacent chunk is allocated. Hence, we try to place the new
579 * object elsewhere in the heap, and remove the old chunk that was
580 * holding it.
581 */
582 void *newptr = heapmem_zone_alloc(chunk->zone, size);
583 if(newptr == NULL) {
584 return NULL;
585 }
586
587 memcpy(newptr, ptr, chunk->size);
588 free_chunk(chunk);
589
590 return newptr;
591}
592#endif /* HEAPMEM_REALLOC */
593
594/* heapmem_calloc: Allocates memory for a zero-initialized array. */
595void *
596#if HEAPMEM_DEBUG
597heapmem_calloc_debug(size_t nmemb, size_t size,
598 const char *file, const unsigned line)
599#else
600heapmem_calloc(size_t nmemb, size_t size)
601#endif
602{
603 size_t total_size = nmemb * size;
604
605 /* Overflow check. */
606 if(size == 0 || total_size / size != nmemb) {
607 return NULL;
608 }
609
610 void *ptr = heapmem_alloc(total_size);
611 if(ptr != NULL) {
612 memset(ptr, 0, total_size);
613 }
614 return ptr;
615}
616
617/* heapmem_stats: Provides statistics regarding heap memory usage. */
618void
619heapmem_stats(heapmem_stats_t *stats)
620{
621 memset(stats, 0, sizeof(*stats));
622
623 for(chunk_t *chunk = (chunk_t *)heap_base;
624 (char *)chunk < &heap_base[heap_usage];
625 chunk = NEXT_CHUNK(chunk)) {
626 if(CHUNK_ALLOCATED(chunk)) {
627 stats->allocated += chunk->size;
628 stats->overhead += sizeof(chunk_t);
629 } else {
630 coalesce_chunks(chunk);
631 stats->available += chunk->size;
632 }
633 }
634 stats->available += HEAPMEM_ARENA_SIZE - heap_usage;
635 stats->footprint = heap_usage;
636 stats->max_footprint = max_heap_usage;
637 stats->chunks = stats->overhead / sizeof(chunk_t);
638}
639
640/* heapmem_print_stats: Print all the statistics collected through the
641 heapmem_stats function. */
642void
643heapmem_print_debug_info(bool print_chunks)
644{
645 heapmem_stats_t stats;
646 heapmem_stats(&stats);
647
648 HEAPMEM_PRINTF("* HeapMem statistics\n");
649 HEAPMEM_PRINTF("* Allocated memory: %zu\n", stats.allocated);
650 HEAPMEM_PRINTF("* Available memory: %zu\n", stats.available);
651 HEAPMEM_PRINTF("* Heap usage: %zu\n", stats.footprint);
652 HEAPMEM_PRINTF("* Max heap usage: %zu\n", stats.max_footprint);
653 HEAPMEM_PRINTF("* Allocated chunks: %zu\n", stats.chunks);
654 HEAPMEM_PRINTF("* Chunk size: %zu\n", sizeof(chunk_t));
655 HEAPMEM_PRINTF("* Total chunk overhead: %zu\n", stats.overhead);
656
657 if(print_chunks) {
658 HEAPMEM_PRINTF("* Allocated chunks:\n");
659 for(chunk_t *chunk = (chunk_t *)heap_base;
660 (char *)chunk < &heap_base[heap_usage];
661 chunk = NEXT_CHUNK(chunk)) {
662 if(CHUNK_ALLOCATED(chunk)) {
663#if HEAPMEM_DEBUG
664 HEAPMEM_PRINTF("* Chunk: heap offset %"PRIuPTR", obj %p, flags 0x%x (%s:%u)\n",
665 (uintptr_t)((char *)chunk - (char *)heap_base),
666 GET_PTR(chunk), chunk->flags, chunk->file, chunk->line);
667#else
668 HEAPMEM_PRINTF("* Chunk: heap offset %"PRIuPTR", obj %p, flags 0x%x\n",
669 (uintptr_t)((char *)chunk - (char *)heap_base),
670 GET_PTR(chunk), chunk->flags);
671#endif /* HEAPMEM_DEBUG */
672 }
673 }
674 }
675}
676
677/* heapmem_alignment: Returns the minimum alignment of allocated addresses. */
678size_t
680{
681 return HEAPMEM_ALIGNMENT;
682}
683
684#endif /* HEAPMEM_CONF_ARENA_SIZE */
Default definitions of C compiler quirk work-arounds.
void * heapmem_zone_alloc(heapmem_zone_t zone, size_t size)
Allocate a chunk of memory in the heap.
void * heapmem_realloc(void *ptr, size_t size)
Reallocate a chunk of memory in the heap.
#define heapmem_alloc(size)
Allocate a chunk of memory in the general zone of the heap.
Definition heapmem.h:147
heapmem_zone_t heapmem_zone_register(const char *name, size_t zone_size)
Register a zone with a reserved subdivision of the heap.
void heapmem_stats(heapmem_stats_t *stats)
Obtain internal heapmem statistics regarding the allocated chunks.
size_t heapmem_alignment(void)
Obtain the minimum alignment of allocated addresses.
bool heapmem_free(void *ptr)
Deallocate a chunk of memory.
void heapmem_print_debug_info(bool print_chunks)
Print debugging information for the heap memory management.
void * heapmem_calloc(size_t nmemb, size_t size)
Allocate memory for a zero-initialized array.
Header file for the dynamic heap memory allocator.
Header file for the logging system.