Contiki-NG
heapmem.c
Go to the documentation of this file.
1/*
2 * Copyright (c) 2005, Nicolas Tsiftes
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of the contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
20 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
24 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
27 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31/**
32 * \file
33 * HeapMem: a dynamic memory allocation module for
34 * resource-constrained devices.
35 * \author
36 * Nicolas Tsiftes <nvt@acm.org>
37 */
38
39#include <stddef.h>
40#include <stdint.h>
41#include <string.h>
42
43#include "contiki.h"
44#include "lib/heapmem.h"
45#include "sys/cc.h"
46
47#ifdef HEAPMEM_CONF_ARENA_SIZE
48
49/* Log configuration */
50#include "sys/log.h"
51#define LOG_MODULE "HeapMem"
52#define LOG_LEVEL LOG_LEVEL_WARN
53
54/* The HEAPMEM_CONF_ARENA_SIZE parameter determines the size of the
55 space that will be statically allocated in this module. */
56#define HEAPMEM_ARENA_SIZE HEAPMEM_CONF_ARENA_SIZE
57
58/*
59 * The HEAPMEM_CONF_SEARCH_MAX parameter limits the time spent on
60 * chunk allocation and defragmentation. The lower this number is, the
61 * faster the operations become. The cost of this speedup, however, is
62 * that the space overhead might increase.
63 */
64#ifdef HEAPMEM_CONF_SEARCH_MAX
65#define CHUNK_SEARCH_MAX HEAPMEM_CONF_SEARCH_MAX
66#else
67#define CHUNK_SEARCH_MAX 16
68#endif /* HEAPMEM_CONF_SEARCH_MAX */
69
70/*
71 * The HEAPMEM_CONF_REALLOC parameter determines whether
72 * heapmem_realloc() is enabled (non-zero value) or not (zero value).
73 */
74#ifdef HEAPMEM_CONF_REALLOC
75#define HEAPMEM_REALLOC HEAPMEM_CONF_REALLOC
76#else
77#define HEAPMEM_REALLOC 1
78#endif /* HEAPMEM_CONF_REALLOC */
79
80#if __STDC_VERSION__ >= 201112L
81#include <stdalign.h>
82#define HEAPMEM_DEFAULT_ALIGNMENT alignof(max_align_t)
83#else
84#define HEAPMEM_DEFAULT_ALIGNMENT sizeof(size_t)
85#endif
86
87/*
88 * The HEAPMEM_CONF_ALIGNMENT parameter decides what the minimum
89 * alignment for allocated data should be.
90 */
91#ifdef HEAPMEM_CONF_ALIGNMENT
92#define HEAPMEM_ALIGNMENT HEAPMEM_CONF_ALIGNMENT
93#else
94#define HEAPMEM_ALIGNMENT HEAPMEM_DEFAULT_ALIGNMENT
95#endif /* HEAPMEM_CONF_ALIGNMENT */
96
97#define ALIGN(size) \
98 (((size) + (HEAPMEM_ALIGNMENT - 1)) & ~(HEAPMEM_ALIGNMENT - 1))
99
100/* Macros for chunk iteration. */
101#define NEXT_CHUNK(chunk) \
102 ((chunk_t *)((char *)(chunk) + sizeof(chunk_t) + (chunk)->size))
103#define IS_LAST_CHUNK(chunk) \
104 ((char *)NEXT_CHUNK(chunk) == &heap_base[heap_usage])
105
106/* Macros for retrieving the data pointer from a chunk, and the other
107 way around. */
108#define GET_CHUNK(ptr) \
109 ((chunk_t *)((char *)(ptr) - sizeof(chunk_t)))
110#define GET_PTR(chunk) \
111 (char *)((chunk) + 1)
112
113/* Macros for determining the status of a chunk. */
114#define CHUNK_FLAG_ALLOCATED 0x1
115
116#define CHUNK_ALLOCATED(chunk) \
117 ((chunk)->flags & CHUNK_FLAG_ALLOCATED)
118#define CHUNK_FREE(chunk) \
119 (~(chunk)->flags & CHUNK_FLAG_ALLOCATED)
120
121/*
122 * A heapmem zone denotes a logical subdivision of the heap that is
123 * dedicated for a specific purpose. This mimics the behavior of
124 * various memory management strategies for embedded systems (e.g., by
125 * having a fixed memory space for packet buffers), yet maintains a
126 * level of dynamism offered by a malloc-like API. The rest of the
127 * heap area that is not dedicated to a specific zone belongs to the
128 * "GENERAL" zone, which can be used by any module.
129 */
130struct heapmem_zone {
131 const char *name;
132 size_t zone_size;
133 size_t allocated;
134};
135
136#ifdef HEAPMEM_CONF_MAX_ZONES
137#define HEAPMEM_MAX_ZONES HEAPMEM_CONF_MAX_ZONES
138#else
139#define HEAPMEM_MAX_ZONES 1
140#endif
141
142#if HEAPMEM_MAX_ZONES < 1
143#error At least one HeapMem zone must be configured.
144#endif
145
146static struct heapmem_zone zones[HEAPMEM_MAX_ZONES] = {
147 {.name = "GENERAL", .zone_size = HEAPMEM_ARENA_SIZE}
148};
149
150/*
151 * We use a double-linked list of chunks, with a slight space overhead
152 * compared to a single-linked list, but with the advantage of having
153 * much faster list removals.
154 */
155typedef struct chunk {
156 struct chunk *prev;
157 struct chunk *next;
158 size_t size;
159 uint8_t flags;
160 heapmem_zone_t zone;
161#if HEAPMEM_DEBUG
162 const char *file;
163 unsigned line;
164#endif
165} chunk_t;
166
167/* All allocated space is located within a heap, which is
168 statically allocated with a configurable size. */
169static char heap_base[HEAPMEM_ARENA_SIZE] CC_ALIGN(HEAPMEM_ALIGNMENT);
170static size_t heap_usage;
171
172static chunk_t *first_chunk = (chunk_t *)heap_base;
173static chunk_t *free_list;
174
175#define IN_HEAP(ptr) ((char *)(ptr) >= (char *)heap_base) && \
176 ((char *)(ptr) < (char *)heap_base + heap_usage)
177
178/* extend_space: Increases the current footprint used in the heap, and
179 returns a pointer to the old end. */
180static void *
181extend_space(size_t size)
182{
183 if(size > HEAPMEM_ARENA_SIZE - heap_usage) {
184 return NULL;
185 }
186
187 char *old_usage = &heap_base[heap_usage];
188 heap_usage += size;
189
190 return old_usage;
191}
192
193/* free_chunk: Mark a chunk as being free, and put it on the free list. */
194static void
195free_chunk(chunk_t * const chunk)
196{
197 chunk->flags &= ~CHUNK_FLAG_ALLOCATED;
198
199 if(IS_LAST_CHUNK(chunk)) {
200 /* Release the chunk back into the wilderness. */
201 heap_usage -= sizeof(chunk_t) + chunk->size;
202 } else {
203 /* Put the chunk on the free list. */
204 chunk->prev = NULL;
205 chunk->next = free_list;
206 if(free_list != NULL) {
207 free_list->prev = chunk;
208 }
209 free_list = chunk;
210 }
211}
212
213/* remove_chunk_from_free_list: Mark a chunk as being allocated, and
214 remove it from the free list. */
215static void
216remove_chunk_from_free_list(chunk_t * const chunk)
217{
218 if(chunk == free_list) {
219 free_list = chunk->next;
220 if(free_list != NULL) {
221 free_list->prev = NULL;
222 }
223 } else {
224 chunk->prev->next = chunk->next;
225 }
226
227 if(chunk->next != NULL) {
228 chunk->next->prev = chunk->prev;
229 }
230}
231
232/*
233 * split_chunk: When allocating a chunk, we may have found one that is
234 * larger than needed, so this function is called to keep the rest of
235 * the original chunk free.
236 */
237static void
238split_chunk(chunk_t * const chunk, size_t offset)
239{
240 offset = ALIGN(offset);
241
242 if(offset + sizeof(chunk_t) < chunk->size) {
243 chunk_t *new_chunk = (chunk_t *)(GET_PTR(chunk) + offset);
244 new_chunk->size = chunk->size - sizeof(chunk_t) - offset;
245 new_chunk->flags = 0;
246 free_chunk(new_chunk);
247
248 chunk->size = offset;
249 chunk->next = chunk->prev = NULL;
250 }
251}
252
253/* coalesce_chunks: Coalesce a specific free chunk with as many
254 adjacent free chunks as possible. */
255static void
256coalesce_chunks(chunk_t *chunk)
257{
258 for(chunk_t *next = NEXT_CHUNK(chunk);
259 (char *)next < &heap_base[heap_usage] && CHUNK_FREE(next);
260 next = NEXT_CHUNK(next)) {
261 chunk->size += sizeof(chunk_t) + next->size;
262 LOG_DBG("Coalesce chunk of %zu bytes\n", next->size);
263 remove_chunk_from_free_list(next);
264 }
265}
266
267/* defrag_chunks: Scan the free list for chunks that can be coalesced,
268 and stop within a bounded time. */
269static void
270defrag_chunks(void)
271{
272 /* Limit the time we spend on searching the free list. */
273 int i = CHUNK_SEARCH_MAX;
274 for(chunk_t *chunk = free_list; chunk != NULL; chunk = chunk->next) {
275 if(i-- == 0) {
276 break;
277 }
278 coalesce_chunks(chunk);
279 }
280}
281
282/* get_free_chunk: Search the free list for the most suitable chunk,
283 as determined by its size, to satisfy an allocation request. */
284static chunk_t *
285get_free_chunk(const size_t size)
286{
287 /* Defragment chunks only right before they are needed for allocation. */
288 defrag_chunks();
289
290 chunk_t *best = NULL;
291 /* Limit the time we spend on searching the free list. */
292 int i = CHUNK_SEARCH_MAX;
293 for(chunk_t *chunk = free_list; chunk != NULL; chunk = chunk->next) {
294 if(i-- == 0) {
295 break;
296 }
297
298 /*
299 * To avoid fragmenting large chunks, we select the chunk with the
300 * smallest size that is larger than or equal to the requested size.
301 */
302 if(size <= chunk->size) {
303 if(best == NULL || chunk->size < best->size) {
304 best = chunk;
305 }
306 if(best->size == size) {
307 /* We found a perfect chunk -- stop the search. */
308 break;
309 }
310 }
311 }
312
313 if(best != NULL) {
314 /* We found a chunk for the allocation. Split it if necessary. */
315 remove_chunk_from_free_list(best);
316 split_chunk(best, size);
317 }
318
319 return best;
320}
321
322/*
323 * heapmem_zone_register: Register a new zone, which is essentially a
324 * subdivision of the heap with a reserved allocation space. This
325 * feature ensures that certain modules can get a dedicated heap for
326 * prioritized memory -- unlike what can be attained when allocating
327 * from the general zone.
328 */
329heapmem_zone_t
330heapmem_zone_register(const char *name, size_t zone_size)
331{
332 if(zone_size > zones[HEAPMEM_ZONE_GENERAL].zone_size) {
333 LOG_ERR("Too large zone allocation limit: %zu\n", zone_size);
334 return HEAPMEM_ZONE_INVALID;
335 } else if(name == NULL || zone_size == 0) {
336 return HEAPMEM_ZONE_INVALID;
337 }
338
339 for(heapmem_zone_t i = HEAPMEM_ZONE_GENERAL + 1; i < HEAPMEM_MAX_ZONES; i++) {
340 if(zones[i].name == NULL) {
341 /* Found a free slot. */
342 zones[i].name = name;
343 zones[i].zone_size = zone_size;
344 /* The general zone has a lower priority than registered zones,
345 so we transfer a part of the general zone to this one. */
346 zones[HEAPMEM_ZONE_GENERAL].zone_size -= zone_size;
347 LOG_INFO("Registered zone \"%s\" with ID %u\n", name, i);
348 return i;
349 } else if(strcmp(zones[i].name, name) == 0) {
350 LOG_ERR("Duplicate zone registration: %s\n", name);
351 return HEAPMEM_ZONE_INVALID;
352 }
353 }
354
355 LOG_ERR("Cannot allocate more zones\n");
356
357 return HEAPMEM_ZONE_INVALID;
358}
359
360/*
361 * heapmem_alloc: Allocate an object of the specified size, returning
362 * a pointer to it in case of success, and NULL in case of failure.
363 *
364 * When allocating memory, heapmem_alloc() will first try to find a
365 * free chunk of the same size and the requested one. If none can be
366 * find, we pick a larger chunk that is as close in size as possible,
367 * and possibly split it so that the remaining part becomes a chunk
368 * available for allocation. At most CHUNK_SEARCH_MAX chunks on the
369 * free list will be examined.
370 *
371 * As a last resort, heapmem_alloc() will try to extend the heap
372 * space, and thereby create a new chunk available for use.
373 */
374void *
375#if HEAPMEM_DEBUG
376heapmem_zone_alloc_debug(heapmem_zone_t zone, size_t size,
377 const char *file, const unsigned line)
378#else
379heapmem_zone_alloc(heapmem_zone_t zone, size_t size)
380#endif
381{
382 /* Fail early on too large allocation requests to prevent wrapping values. */
383 if(size > HEAPMEM_ARENA_SIZE) {
384 return NULL;
385 }
386
387 if(zone >= HEAPMEM_MAX_ZONES || zones[zone].name == NULL) {
388 LOG_WARN("Attempt to allocate from invalid zone: %u\n", zone);
389 return NULL;
390 }
391
392 size = ALIGN(size);
393
394 if(sizeof(chunk_t) + size >
395 zones[zone].zone_size - zones[zone].allocated) {
396 LOG_ERR("Cannot allocate %zu bytes because of the zone limit\n", size);
397 return NULL;
398 }
399
400 chunk_t *chunk = get_free_chunk(size);
401 if(chunk == NULL) {
402 chunk = extend_space(sizeof(chunk_t) + size);
403 if(chunk == NULL) {
404 return NULL;
405 }
406 chunk->size = size;
407 }
408
409 chunk->flags = CHUNK_FLAG_ALLOCATED;
410
411#if HEAPMEM_DEBUG
412 chunk->file = file;
413 chunk->line = line;
414#endif
415
416 LOG_DBG("%s ptr %p size %zu\n", __func__, GET_PTR(chunk), size);
417
418 chunk->zone = zone;
419 zones[zone].allocated += sizeof(chunk_t) + size;
420
421 return GET_PTR(chunk);
422
423}
424
425/*
426 * heapmem_free: Deallocate a previously allocated object.
427 *
428 * The pointer must exactly match one returned from an earlier call
429 * from heapmem_alloc or heapmem_realloc, without any call to
430 * heapmem_free in between.
431 *
432 * When performing a deallocation of a chunk, the chunk will be put on
433 * a list of free chunks internally. All free chunks that are adjacent
434 * in memory will be merged into a single chunk in order to mitigate
435 * fragmentation.
436 */
437bool
438#if HEAPMEM_DEBUG
439heapmem_free_debug(void *ptr, const char *file, const unsigned line)
440#else
441heapmem_free(void *ptr)
442#endif
443{
444 if(!IN_HEAP(ptr)) {
445 LOG_WARN("%s: ptr %p is not in the heap\n", __func__, ptr);
446 return false;
447 }
448
449 chunk_t *chunk = GET_CHUNK(ptr);
450 if(!CHUNK_ALLOCATED(chunk)) {
451 LOG_WARN("%s: ptr %p has already been deallocated\n", __func__, ptr);
452 return false;
453 }
454
455#if HEAPMEM_DEBUG
456 LOG_DBG("%s: ptr %p, allocated at %s:%u\n", __func__, ptr,
457 chunk->file, chunk->line);
458#endif
459
460 zones[chunk->zone].allocated -= sizeof(chunk_t) + chunk->size;
461
462 free_chunk(chunk);
463 return true;
464}
465
466#if HEAPMEM_REALLOC
467/*
468 * heapmem_realloc: Reallocate an object with a different size,
469 * possibly moving it in memory. In case of success, the function
470 * returns a pointer to the objects new location. In case of failure,
471 * it returns NULL.
472 *
473 * If the size of the new chunk is larger than that of the allocated
474 * chunk, heapmem_realloc() will first attempt to extend the currently
475 * allocated chunk. If that memory is not free, heapmem_ralloc() will
476 * attempt to allocate a completely new chunk, copy the old data to
477 * the new chunk, and deallocate the old chunk.
478 *
479 * If the size of the new chunk is smaller than the allocated one, we
480 * split the allocated chunk if the remaining chunk would be large
481 * enough to justify the overhead of creating a new chunk.
482 */
483void *
484#if HEAPMEM_DEBUG
485heapmem_realloc_debug(void *ptr, size_t size,
486 const char *file, const unsigned line)
487#else
488heapmem_realloc(void *ptr, size_t size)
489#endif
490{
491 /* Allow the special case of ptr being NULL as an alias
492 for heapmem_alloc(). */
493 if(ptr != NULL && !IN_HEAP(ptr)) {
494 LOG_WARN("%s: ptr %p is not in the heap\n", __func__, ptr);
495 return NULL;
496 }
497
498#if HEAPMEM_DEBUG
499 LOG_DBG("%s: ptr %p size %zu at %s:%u\n",
500 __func__, ptr, size, file, line);
501#endif
502
503 /* Fail early on too large allocation requests to prevent wrapping values. */
504 if(size > HEAPMEM_ARENA_SIZE) {
505 return NULL;
506 }
507
508 /* Special cases in which we can hand off the execution to other functions. */
509 if(ptr == NULL) {
510 return heapmem_alloc(size);
511 } else if(size == 0) {
512 heapmem_free(ptr);
513 return NULL;
514 }
515
516 chunk_t *chunk = GET_CHUNK(ptr);
517 if(!CHUNK_ALLOCATED(chunk)) {
518 LOG_WARN("%s: ptr %p is not allocated\n", __func__, ptr);
519 return false;
520 }
521
522#if HEAPMEM_DEBUG
523 chunk->file = file;
524 chunk->line = line;
525#endif
526
527 size = ALIGN(size);
528 int size_adj = size - chunk->size;
529
530 if(size_adj <= 0) {
531 /* Request to make the object smaller or to keep its size.
532 In the former case, the chunk will be split if possible. */
533 split_chunk(chunk, size);
534 zones[chunk->zone].allocated += size_adj;
535 return ptr;
536 }
537
538 /* Request to make the object larger. (size_adj > 0) */
539 if(IS_LAST_CHUNK(chunk)) {
540 /*
541 * If the object is within the last allocated chunk (i.e., the
542 * one before the end of the heap footprint, we just attempt to
543 * extend the heap.
544 */
545 if(extend_space(size_adj) != NULL) {
546 chunk->size = size;
547 zones[chunk->zone].allocated += size_adj;
548 return ptr;
549 }
550 } else {
551 /*
552 * Here we attempt to enlarge an allocated object, whose
553 * adjacent space may already be allocated. We attempt to
554 * coalesce chunks in order to make as much room as possible.
555 */
556 coalesce_chunks(chunk);
557 if(chunk->size >= size) {
558 /* There was enough free adjacent space to extend the chunk in
559 its current place. */
560 split_chunk(chunk, size);
561 zones[chunk->zone].allocated += size_adj;
562 return ptr;
563 }
564 }
565
566 /*
567 * Failed to enlarge the object in its current place, since the
568 * adjacent chunk is allocated. Hence, we try to place the new
569 * object elsewhere in the heap, and remove the old chunk that was
570 * holding it.
571 */
572 void *newptr = heapmem_zone_alloc(chunk->zone, size);
573 if(newptr == NULL) {
574 return NULL;
575 }
576
577 memcpy(newptr, ptr, chunk->size);
578 free_chunk(chunk);
579
580 return newptr;
581}
582#endif /* HEAPMEM_REALLOC */
583
584/* heapmem_stats: Calculate statistics regarding memory usage. */
585void
586heapmem_stats(heapmem_stats_t *stats)
587{
588 memset(stats, 0, sizeof(*stats));
589
590 for(chunk_t *chunk = first_chunk;
591 (char *)chunk < &heap_base[heap_usage];
592 chunk = NEXT_CHUNK(chunk)) {
593 if(CHUNK_ALLOCATED(chunk)) {
594 stats->allocated += chunk->size;
595 stats->overhead += sizeof(chunk_t);
596 } else {
597 coalesce_chunks(chunk);
598 stats->available += chunk->size;
599 }
600 }
601 stats->available += HEAPMEM_ARENA_SIZE - heap_usage;
602 stats->footprint = heap_usage;
603 stats->chunks = stats->overhead / sizeof(chunk_t);
604}
605
606/* heapmem_alignment: return the minimum alignment of allocated addresses. */
607size_t
609{
610 return HEAPMEM_ALIGNMENT;
611}
612
613#endif /* HEAPMEM_CONF_ARENA_SIZE */
Default definitions of C compiler quirk work-arounds.
void * heapmem_zone_alloc(heapmem_zone_t zone, size_t size)
Allocate a chunk of memory in the heap.
void * heapmem_realloc(void *ptr, size_t size)
Reallocate a chunk of memory in the heap.
#define heapmem_alloc(size)
Allocate a chunk of memory in the general zone of the heap.
Definition: heapmem.h:141
heapmem_zone_t heapmem_zone_register(const char *name, size_t zone_size)
Register a zone with a reserved subdivision of the heap.
void heapmem_stats(heapmem_stats_t *stats)
Obtain internal heapmem statistics regarding the allocated chunks.
size_t heapmem_alignment(void)
Obtain the minimum alignment of allocated addresses.
bool heapmem_free(void *ptr)
Deallocate a chunk of memory.
Header file for the dynamic heap memory allocator.
Header file for the logging system.