Contiki-NG
heapmem.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2005, Nicolas Tsiftes
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  * notice, this list of conditions and the following disclaimer in the
12  * documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of the contributors
14  * may be used to endorse or promote products derived from this software
15  * without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
20  * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
24  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
27  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /**
32  * \file
33  * Dynamic memory allocation module.
34  * \author
35  * Nicolas Tsiftes <nvt@acm.org>
36  */
37 
38 #ifndef DEBUG
39 #define DEBUG 0
40 #endif
41 
42 #if DEBUG
43 #include <stdio.h>
44 #define PRINTF(...) printf(__VA_ARGS__)
45 #undef HEAPMEM_DEBUG
46 #define HEAPMEM_DEBUG 1
47 #else
48 #define PRINTF(...)
49 #endif
50 
51 #ifdef PROJECT_CONF_PATH
52 /* Load the heapmem configuration from a project configuration file. */
53 #include PROJECT_CONF_PATH
54 #endif
55 
56 #include <stdint.h>
57 #include <string.h>
58 
59 #include "heapmem.h"
60 
61 #include "sys/cc.h"
62 
63 /* The HEAPMEM_CONF_ARENA_SIZE parameter determines the size of the
64  space that will be statically allocated in this module. */
65 #ifdef HEAPMEM_CONF_ARENA_SIZE
66 #define HEAPMEM_ARENA_SIZE HEAPMEM_CONF_ARENA_SIZE
67 #else
68 /* If the heap size is not set, we use a minimal size that will ensure
69  that all allocation attempts fail. */
70 #define HEAPMEM_ARENA_SIZE 1
71 #endif
72 /* HEAPMEM_CONF_ARENA_SIZE */
73 
74 /*
75  * The HEAPMEM_CONF_SEARCH_MAX parameter limits the time spent on
76  * chunk allocation and defragmentation. The lower this number is, the
77  * faster the operations become. The cost of this speedup, however, is
78  * that the space overhead might increase.
79  */
80 #ifdef HEAPMEM_CONF_SEARCH_MAX
81 #define CHUNK_SEARCH_MAX HEAPMEM_CONF_SEARCH_MAX
82 #else
83 #define CHUNK_SEARCH_MAX 16
84 #endif /* HEAPMEM_CONF_SEARCH_MAX */
85 
86 /*
87  * The HEAPMEM_CONF_REALLOC parameter determines whether heapmem_realloc() is
88  * enabled (non-zero value) or not (zero value).
89  */
90 #ifdef HEAPMEM_CONF_REALLOC
91 #define HEAPMEM_REALLOC HEAPMEM_CONF_REALLOC
92 #else
93 #define HEAPMEM_REALLOC 1
94 #endif /* HEAPMEM_CONF_REALLOC */
95 
96 /*
97  * The HEAPMEM_CONF_ALIGNMENT parameter decides what the minimum alignment
98  * for allocated data should be.
99  */
100 #ifdef HEAPMEM_CONF_ALIGNMENT
101 #define HEAPMEM_ALIGNMENT HEAPMEM_CONF_ALIGNMENT
102 #else
103 #define HEAPMEM_ALIGNMENT sizeof(int)
104 #endif /* HEAPMEM_CONF_ALIGNMENT */
105 
106 #define ALIGN(size) \
107  (((size) + (HEAPMEM_ALIGNMENT - 1)) & ~(HEAPMEM_ALIGNMENT - 1))
108 
109 /* Macros for chunk iteration. */
110 #define NEXT_CHUNK(chunk) \
111  ((chunk_t *)((char *)(chunk) + sizeof(chunk_t) + (chunk)->size))
112 #define IS_LAST_CHUNK(chunk) \
113  ((char *)NEXT_CHUNK(chunk) == &heap_base[heap_usage])
114 
115 /* Macros for retrieving the data pointer from a chunk,
116  and the other way around. */
117 #define GET_CHUNK(ptr) \
118  ((chunk_t *)((char *)(ptr) - sizeof(chunk_t)))
119 #define GET_PTR(chunk) \
120  (char *)((chunk) + 1)
121 
122 /* Macros for determining the status of a chunk. */
123 #define CHUNK_FLAG_ALLOCATED 0x1
124 
125 #define CHUNK_ALLOCATED(chunk) \
126  ((chunk)->flags & CHUNK_FLAG_ALLOCATED)
127 #define CHUNK_FREE(chunk) \
128  (~(chunk)->flags & CHUNK_FLAG_ALLOCATED)
129 
130 /*
131  * We use a double-linked list of chunks, with a slight space overhead compared
132  * to a single-linked list, but with the advantage of having much faster
133  * list removals.
134  */
135 typedef struct chunk {
136  struct chunk *prev;
137  struct chunk *next;
138  size_t size;
139  uint8_t flags;
140 #if HEAPMEM_DEBUG
141  const char *file;
142  unsigned line;
143 #endif
144 } chunk_t;
145 
146 /* All allocated space is located within an "heap", which is statically
147  allocated with a pre-configured size. */
148 static char heap_base[HEAPMEM_ARENA_SIZE] CC_ALIGN(HEAPMEM_ALIGNMENT);
149 static size_t heap_usage;
150 
151 static chunk_t *first_chunk = (chunk_t *)heap_base;
152 static chunk_t *free_list;
153 
154 /* extend_space: Increases the current footprint used in the heap, and
155  returns a pointer to the old end. */
156 static void *
157 extend_space(size_t size)
158 {
159  char *old_usage;
160 
161  if(heap_usage + size > HEAPMEM_ARENA_SIZE) {
162  return NULL;
163  }
164 
165  old_usage = &heap_base[heap_usage];
166  heap_usage += size;
167 
168  return old_usage;
169 }
170 
171 /* free_chunk: Mark a chunk as being free, and put it on the free list. */
172 static void
173 free_chunk(chunk_t * const chunk)
174 {
175  chunk->flags &= ~CHUNK_FLAG_ALLOCATED;
176 
177  if(IS_LAST_CHUNK(chunk)) {
178  /* Release the chunk back into the wilderness. */
179  heap_usage -= sizeof(chunk_t) + chunk->size;
180  } else {
181  /* Put the chunk on the free list. */
182  chunk->prev = NULL;
183  chunk->next = free_list;
184  if(free_list != NULL) {
185  free_list->prev = chunk;
186  }
187  free_list = chunk;
188  }
189 }
190 
191 /* allocate_chunk: Mark a chunk as being allocated, and remove it
192  from the free list. */
193 static void
194 allocate_chunk(chunk_t * const chunk)
195 {
196  chunk->flags |= CHUNK_FLAG_ALLOCATED;
197 
198  if(chunk == free_list) {
199  free_list = chunk->next;
200  if(free_list != NULL) {
201  free_list->prev = NULL;
202  }
203  } else {
204  chunk->prev->next = chunk->next;
205  }
206 
207  if(chunk->next != NULL) {
208  chunk->next->prev = chunk->prev;
209  }
210 }
211 
212 /*
213  * split_chunk: When allocating a chunk, we may have found one that is
214  * larger than needed, so this function is called to keep the rest of
215  * the original chunk free.
216  */
217 static void
218 split_chunk(chunk_t * const chunk, size_t offset)
219 {
220  chunk_t *new_chunk;
221 
222  offset = ALIGN(offset);
223 
224  if(offset + sizeof(chunk_t) < chunk->size) {
225  new_chunk = (chunk_t *)(GET_PTR(chunk) + offset);
226  new_chunk->size = chunk->size - sizeof(chunk_t) - offset;
227  new_chunk->flags = 0;
228  free_chunk(new_chunk);
229 
230  chunk->size = offset;
231  chunk->next = chunk->prev = NULL;
232  }
233 }
234 
235 /* coalesce_chunks: Coalesce a specific free chunk with as many adjacent
236  free chunks as possible. */
237 static void
238 coalesce_chunks(chunk_t *chunk)
239 {
240  chunk_t *next;
241 
242  for(next = NEXT_CHUNK(chunk);
243  (char *)next < &heap_base[heap_usage] && CHUNK_FREE(next);
244  next = NEXT_CHUNK(next)) {
245  chunk->size += sizeof(chunk_t) + next->size;
246  allocate_chunk(next);
247  }
248 }
249 
250 /* defrag_chunks: Scan the free list for chunks that can be coalesced,
251  and stop within a bounded time. */
252 static void
253 defrag_chunks(void)
254 {
255  int i;
256  chunk_t *chunk;
257 
258  /* Limit the time we spend on searching the free list. */
259  i = CHUNK_SEARCH_MAX;
260  for(chunk = free_list; chunk != NULL; chunk = chunk->next) {
261  if(i-- == 0) {
262  break;
263  }
264  coalesce_chunks(chunk);
265  }
266 }
267 
268 /* get_free_chunk: Search the free list for the most suitable chunk, as
269  determined by its size, to satisfy an allocation request. */
270 static chunk_t *
271 get_free_chunk(const size_t size)
272 {
273  int i;
274  chunk_t *chunk, *best;
275 
276  /* Defragment chunks only right before they are needed for allocation. */
277  defrag_chunks();
278 
279  best = NULL;
280  /* Limit the time we spend on searching the free list. */
281  i = CHUNK_SEARCH_MAX;
282  for(chunk = free_list; chunk != NULL; chunk = chunk->next) {
283  if(i-- == 0) {
284  break;
285  }
286 
287  /*
288  * To avoid fragmenting large chunks, we select the chunk with the
289  * smallest size that is larger than or equal to the requested size.
290  */
291  if(size <= chunk->size) {
292  if(best == NULL || chunk->size < best->size) {
293  best = chunk;
294  }
295  if(best->size == size) {
296  /* We found a perfect chunk -- stop the search. */
297  break;
298  }
299  }
300  }
301 
302  if(best != NULL) {
303  /* We found a chunk for the allocation. Split it if necessary. */
304  allocate_chunk(best);
305  split_chunk(best, size);
306  }
307 
308  return best;
309 }
310 
311 /*
312  * heapmem_alloc: Allocate an object of the specified size, returning
313  * a pointer to it in case of success, and NULL in case of failure.
314  *
315  * When allocating memory, heapmem_alloc() will first try to find a
316  * free chunk of the same size and the requested one. If none can be
317  * find, we pick a larger chunk that is as close in size as possible,
318  * and possibly split it so that the remaining part becomes a chunk
319  * available for allocation. At most CHUNK_SEARCH_MAX chunks on the
320  * free list will be examined.
321  *
322  * As a last resort, heapmem_alloc() will try to extend the heap
323  * space, and thereby create a new chunk available for use.
324  */
325 void *
326 #if HEAPMEM_DEBUG
327 heapmem_alloc_debug(size_t size, const char *file, const unsigned line)
328 #else
329 heapmem_alloc(size_t size)
330 #endif
331 {
332  chunk_t *chunk;
333 
334  size = ALIGN(size);
335 
336  chunk = get_free_chunk(size);
337  if(chunk == NULL) {
338  chunk = extend_space(sizeof(chunk_t) + size);
339  if(chunk == NULL) {
340  return NULL;
341  }
342  chunk->size = size;
343  }
344 
345  chunk->flags = CHUNK_FLAG_ALLOCATED;
346 
347 #if HEAPMEM_DEBUG
348  chunk->file = file;
349  chunk->line = line;
350 #endif
351 
352  PRINTF("%s ptr %p size %lu\n", __func__, GET_PTR(chunk), (unsigned long)size);
353 
354  return GET_PTR(chunk);
355 }
356 
357 /*
358  * heapmem_free: Deallocate a previously allocated object.
359  *
360  * The pointer must exactly match one returned from an earlier call
361  * from heapmem_alloc or heapmem_realloc, without any call to
362  * heapmem_free in between.
363  *
364  * When performing a deallocation of a chunk, the chunk will be put on
365  * a list of free chunks internally. All free chunks that are adjacent
366  * in memory will be merged into a single chunk in order to mitigate
367  * fragmentation.
368  */
369 void
370 #if HEAPMEM_DEBUG
371 heapmem_free_debug(void *ptr, const char *file, const unsigned line)
372 #else
373 heapmem_free(void *ptr)
374 #endif
375 {
376  chunk_t *chunk;
377 
378  if(ptr) {
379  chunk = GET_CHUNK(ptr);
380 
381  PRINTF("%s ptr %p, allocated at %s:%u\n", __func__, ptr,
382  chunk->file, chunk->line);
383 
384  free_chunk(chunk);
385  }
386 }
387 
388 #if HEAPMEM_REALLOC
389 /*
390  * heapmem_realloc: Reallocate an object with a different size,
391  * possibly moving it in memory. In case of success, the function
392  * returns a pointer to the objects new location. In case of failure,
393  * it returns NULL.
394  *
395  * If the size of the new chunk is larger than that of the allocated
396  * chunk, heapmem_realloc() will first attempt to extend the currently
397  * allocated chunk. If that memory is not free, heapmem_ralloc() will
398  * attempt to allocate a completely new chunk, copy the old data to
399  * the new chunk, and deallocate the old chunk.
400  *
401  * If the size of the new chunk is smaller than the allocated one, we
402  * split the allocated chunk if the remaining chunk would be large
403  * enough to justify the overhead of creating a new chunk.
404  */
405 void *
406 #if HEAPMEM_DEBUG
407 heapmem_realloc_debug(void *ptr, size_t size,
408  const char *file, const unsigned line)
409 #else
410 heapmem_realloc(void *ptr, size_t size)
411 #endif
412 {
413  void *newptr;
414  chunk_t *chunk;
415  int size_adj;
416 
417  PRINTF("%s ptr %p size %u at %s:%u\n",
418  __func__, ptr, (unsigned)size, file, line);
419 
420  /* Special cases in which we can hand off the execution to other functions. */
421  if(ptr == NULL) {
422  return heapmem_alloc(size);
423  } else if(size == 0) {
424  heapmem_free(ptr);
425  return NULL;
426  }
427 
428  chunk = GET_CHUNK(ptr);
429 #if HEAPMEM_DEBUG
430  chunk->file = file;
431  chunk->line = line;
432 #endif
433 
434  size = ALIGN(size);
435  size_adj = size - chunk->size;
436 
437  if(size_adj <= 0) {
438  /* Request to make the object smaller or to keep its size.
439  In the former case, the chunk will be split if possible. */
440  split_chunk(chunk, size);
441  return ptr;
442  }
443 
444  /* Request to make the object larger. (size_adj > 0) */
445  if(IS_LAST_CHUNK(chunk)) {
446  /*
447  * If the object is within the last allocated chunk (i.e., the
448  * one before the end of the heap footprint, we just attempt to
449  * extend the heap.
450  */
451  if(extend_space(size_adj) != NULL) {
452  chunk->size = size;
453  return ptr;
454  }
455  } else {
456  /*
457  * Here we attempt to enlarge an allocated object, whose
458  * adjacent space may already be allocated. We attempt to
459  * coalesce chunks in order to make as much room as possible.
460  */
461  coalesce_chunks(chunk);
462  if(chunk->size >= size) {
463  /* There was enough free adjacent space to extend the chunk in
464  its current place. */
465  split_chunk(chunk, size);
466  return ptr;
467  }
468  }
469 
470  /*
471  * Failed to enlarge the object in its current place, since the
472  * adjacent chunk is allocated. Hence, we try to place the new
473  * object elsewhere in the heap, and remove the old chunk that was
474  * holding it.
475  */
476  newptr = heapmem_alloc(size);
477  if(newptr == NULL) {
478  return NULL;
479  }
480 
481  memcpy(newptr, ptr, chunk->size);
482  free_chunk(chunk);
483 
484  return newptr;
485 }
486 #endif /* HEAPMEM_REALLOC */
487 
488 /* heapmem_stats: Calculate statistics regarding memory usage. */
489 void
490 heapmem_stats(heapmem_stats_t *stats)
491 {
492  chunk_t *chunk;
493 
494  memset(stats, 0, sizeof(*stats));
495 
496  for(chunk = first_chunk;
497  (char *)chunk < &heap_base[heap_usage];
498  chunk = NEXT_CHUNK(chunk)) {
499  if(CHUNK_ALLOCATED(chunk)) {
500  stats->allocated += chunk->size;
501  } else {
502  coalesce_chunks(chunk);
503  stats->available += chunk->size;
504  }
505  stats->overhead += sizeof(chunk_t);
506  }
507  stats->available += HEAPMEM_ARENA_SIZE - heap_usage;
508  stats->footprint = heap_usage;
509  stats->chunks = stats->overhead / sizeof(chunk_t);
510 }
void heapmem_free(void *ptr)
Deallocate a chunk of memory.
Definition: heapmem.c:373
void * heapmem_alloc(size_t size)
Allocate a chunk of memory in the heap.
Definition: heapmem.c:329
Header file for the dynamic heap memory allocator.
Default definitions of C compiler quirk work-arounds.
void heapmem_stats(heapmem_stats_t *stats)
Obtain internal heapmem statistics regarding the allocated chunks.
Definition: heapmem.c:490
void * heapmem_realloc(void *ptr, size_t size)
Reallocate a chunk of memory in the heap.
Definition: heapmem.c:410