Contiki-NG
index-maxheap.c
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010, Swedish Institute of Computer Science
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the Institute nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/**
31 * \file
32 * MaxHeap - A binary maximum heap index for flash memory.
33 *
34 * The idea behind the MaxHeap index is to write entries sequentially
35 * into small buckets, which are indexed in a binary maximum heap.
36 * Although sequential writes make the entries unsorted within a
37 * bucket, the time to load and scan a single bucket is small. The
38 * sequential write is important for flash memories, which are
39 * unable to handle multiple rewrites of the same page without doing
40 * an expensive erase operation between the rewrites.
41 *
42 * Each bucket specifies a range (a,b) of values that it accepts.
43 * Once a bucket fills up, two buckets are created with the ranges
44 * (a,mean) and (mean+1, b), respectively. The entries from the
45 * original bucket are then copied into the appropriate new bucket
46 * before the old bucket gets deleted.
47 * \author
48 * Nicolas Tsiftes <nvt@sics.se>
49 */
50
51#include <limits.h>
52#include <stdio.h>
53#include <stdlib.h>
54#include <string.h>
55
56#include "cfs/cfs.h"
57#include "cfs/cfs-coffee.h"
58#include "lib/memb.h"
59#include "lib/random.h"
60
61#include "db-options.h"
62#include "index.h"
63#include "result.h"
64#include "storage.h"
65
66#define DEBUG DEBUG_NONE
67#include "net/ipv6/uip-debug.h"
68
69#define BRANCH_FACTOR 2
70#define BUCKET_SIZE 128
71#define NODE_LIMIT 511
72#define NODE_DEPTH 9
73
74#if (1 << NODE_DEPTH) != (NODE_LIMIT + 1)
75#error "NODE_DEPTH is set incorrectly."
76#endif
77
78#define EMPTY_NODE(node) ((node)->min == 0 && (node)->max == 0)
79#define EMPTY_PAIR(pair) ((pair)->key == 0 && (pair)->value == 0)
80
81typedef uint16_t maxheap_key_t;
82typedef uint16_t maxheap_value_t;
83
84#define KEY_MIN 0
85#define KEY_MAX 65535
86
87struct heap_node {
88 maxheap_key_t min;
89 maxheap_key_t max;
90};
91typedef struct heap_node heap_node_t;
92
93struct key_value_pair {
94 maxheap_key_t key;
95 maxheap_value_t value;
96};
97
98struct bucket {
99 struct key_value_pair pairs[BUCKET_SIZE];
100};
101typedef struct bucket bucket_t;
102
103struct heap {
104 db_storage_id_t heap_storage;
105 db_storage_id_t bucket_storage;
106 /* Remember where the next free slot for each bucket is located. */
107 uint8_t next_free_slot[NODE_LIMIT];
108};
109typedef struct heap heap_t;
110
111struct bucket_cache {
112 heap_t *heap;
113 uint16_t bucket_id;
114 bucket_t bucket;
115};
116
117/* Keep a cache of buckets read from storage. */
118static struct bucket_cache bucket_cache[DB_HEAP_CACHE_LIMIT];
119MEMB(heaps, heap_t, DB_HEAP_INDEX_LIMIT);
120
121static struct bucket_cache *get_cache(heap_t *, int);
122static struct bucket_cache *get_cache_free(void);
123static void invalidate_cache(void);
124static maxheap_key_t transform_key(maxheap_key_t);
125static int heap_read(heap_t *, int, heap_node_t *);
126static int heap_write(heap_t *, int, heap_node_t *);
127static int heap_insert(heap_t *, maxheap_key_t, maxheap_key_t);
128static int heap_find(heap_t *, maxheap_key_t key, int *iterator);
129#if HEAP_DEBUG
130static void heap_print(heap_t *);
131#endif
132static int bucket_read(heap_t *, int, bucket_t *);
133static struct bucket_cache *bucket_load(heap_t *, int);
134static int bucket_append(heap_t *, int, struct key_value_pair *);
135static int bucket_split(heap_t *, int);
136
137static db_result_t create(index_t *);
138static db_result_t destroy(index_t *);
139static db_result_t load(index_t *);
140static db_result_t release(index_t *);
141static db_result_t insert(index_t *, attribute_value_t *, tuple_id_t);
142static db_result_t delete(index_t *, attribute_value_t *);
143static tuple_id_t get_next(index_iterator_t *);
144
145index_api_t index_maxheap = {
146 INDEX_MAXHEAP,
147 INDEX_API_EXTERNAL,
148 create,
149 destroy,
150 load,
151 release,
152 insert,
153 delete,
154 get_next
155};
156
157static struct bucket_cache *
158get_cache(heap_t *heap, int bucket_id)
159{
160 int i;
161
162 for(i = 0; i < DB_HEAP_CACHE_LIMIT; i++) {
163 if(bucket_cache[i].heap == heap && bucket_cache[i].bucket_id == bucket_id) {
164 return &bucket_cache[i];
165 }
166 }
167 return NULL;
168}
169
170static struct bucket_cache *
171get_cache_free(void)
172{
173 int i;
174
175 for(i = 0; i < DB_HEAP_CACHE_LIMIT; i++) {
176 if(bucket_cache[i].heap == NULL) {
177 return &bucket_cache[i];
178 }
179 }
180 return NULL;
181}
182
183static void
184invalidate_cache(void)
185{
186 int i;
187
188 for(i = 0; i < DB_HEAP_CACHE_LIMIT; i++) {
189 if(bucket_cache[i].heap != NULL) {
190 bucket_cache[i].heap = NULL;
191 break;
192 }
193 }
194}
195
196static maxheap_key_t
197transform_key(maxheap_key_t key)
198{
199 random_init(key);
200 return random_rand();
201}
202
203static int
204heap_read(heap_t *heap, int bucket_id, heap_node_t *node)
205{
206 if(DB_ERROR(storage_read(heap->heap_storage, node,
207 DB_MAX_FILENAME_LENGTH + (unsigned long)bucket_id * sizeof(*node), sizeof(*node)))) {
208 return 0;
209 }
210
211 return 1;
212}
213
214static int
215heap_write(heap_t *heap, int bucket_id, heap_node_t *node)
216{
217 if(DB_ERROR(storage_write(heap->heap_storage, node,
218 DB_MAX_FILENAME_LENGTH + (unsigned long)bucket_id * sizeof(*node), sizeof(*node)))) {
219 return 0;
220 }
221
222 return 1;
223}
224
225static int
226heap_insert(heap_t *heap, maxheap_key_t min, maxheap_key_t max)
227{
228 int i;
229 heap_node_t node;
230
231 PRINTF("DB: Insert node (%ld,%ld) into the heap\n", (long)min, (long)max);
232
233 if(min > max) {
234 return -1;
235 }
236
237 for(i = 0; i < NODE_LIMIT;) {
238 if(heap_read(heap, i, &node) == 0) {
239 PRINTF("DB: Failed to read heap node %d\n", i);
240 return -1;
241 }
242
243 if(EMPTY_NODE(&node)) {
244 node.min = min;
245 node.max = max;
246 if(heap_write(heap, i, &node) == 0) {
247 PRINTF("DB: Failed to write heap node %d\n", i);
248 return -1;
249 }
250 return i;
251 } else if(node.min <= min && max <= node.max) {
252 i = BRANCH_FACTOR * i + 1;
253 } else {
254 i++;
255 }
256 }
257
258 PRINTF("DB: No more nodes available\n");
259 return -1;
260}
261
262static int
263heap_find(heap_t *heap, maxheap_key_t key, int *iterator)
264{
265 maxheap_key_t hashed_key;
266 int i;
267 int first_child;
268 static heap_node_t node;
269
270 hashed_key = transform_key(key);
271
272 for(i = *iterator; i < NODE_LIMIT;) {
273 if(heap_read(heap, i, &node) == 0) {
274 break;
275 }
276 if(EMPTY_NODE(&node)) {
277 break;
278 } else if(node.min <= hashed_key && hashed_key <= node.max) {
279 first_child = BRANCH_FACTOR * i + 1;
280
281 if(first_child >= NODE_LIMIT) {
282 break;
283 }
284 *iterator = first_child;
285 return i;
286 } else {
287 i++;
288 }
289 }
290
291 return -1;
292}
293
294#if HEAP_DEBUG
295static void
296heap_print(heap_t *heap)
297{
298 int level_count;
299 int branch_count;
300 int branch_amount;
301 int i, j;
302 heap_node_t node;
303
304 level_count = 0;
305 branch_count = 0;
306 branch_amount = BRANCH_FACTOR;
307
308 for(i = 0;; i++) {
309 if(heap_read(heap, i, &node) == 0 || EMPTY_NODE(&node)) {
310 break;
311 }
312
313 for(j = 0; j < level_count; j++) {
314 PRINTF("\t");
315 }
316 PRINTF("(%ld,%ld)\n", (long)node.min, (long)node.max);
317 if(level_count == 0) {
318 level_count++;
319 } else if(branch_count + 1 == branch_amount) {
320 level_count++;
321 branch_count = 0;
322 branch_amount = branch_amount * BRANCH_FACTOR;
323 } else {
324 branch_count++;
325 }
326 }
327}
328#endif /* HEAP_DEBUG */
329
330static int
331bucket_read(heap_t *heap, int bucket_id, bucket_t *bucket)
332{
333 size_t size;
334
335 if(heap->next_free_slot[bucket_id] == 0) {
336 size = BUCKET_SIZE;
337 } else {
338 size = heap->next_free_slot[bucket_id];
339 }
340
341 size *= sizeof(struct key_value_pair);
342
343 if(DB_ERROR(storage_read(heap->bucket_storage, bucket,
344 (unsigned long)bucket_id * sizeof(*bucket), size))) {
345 return 0;
346 }
347
348 return 1;
349}
350
351static struct bucket_cache *
352bucket_load(heap_t *heap, int bucket_id)
353{
354 int i;
355 struct bucket_cache *cache;
356
357 cache = get_cache(heap, bucket_id);
358 if(cache != NULL) {
359 return cache;
360 }
361
362 cache = get_cache_free();
363 if(cache == NULL) {
364 invalidate_cache();
365 cache = get_cache_free();
366 if(cache == NULL) {
367 return NULL;
368 }
369 }
370
371 if(bucket_read(heap, bucket_id, &cache->bucket) == 0) {
372 return NULL;
373 }
374
375 cache->heap = heap;
376 cache->bucket_id = bucket_id;
377
378 if(heap->next_free_slot[bucket_id] == 0) {
379 for(i = 0; i < BUCKET_SIZE; i++) {
380 if(EMPTY_PAIR(&cache->bucket.pairs[i])) {
381 break;
382 }
383 }
384
385 heap->next_free_slot[bucket_id] = i;
386 }
387
388 PRINTF("DB: Loaded bucket %d, the next free slot is %u\n", bucket_id,
389 (unsigned)heap->next_free_slot[bucket_id]);
390
391 return cache;
392}
393
394static int
395bucket_append(heap_t *heap, int bucket_id, struct key_value_pair *pair)
396{
397 unsigned long offset;
398
399 if(heap->next_free_slot[bucket_id] >= BUCKET_SIZE) {
400 PRINTF("DB: Invalid write attempt to the full bucket %d\n", bucket_id);
401 return 0;
402 }
403
404 offset = (unsigned long)bucket_id * sizeof(bucket_t);
405 offset += heap->next_free_slot[bucket_id] * sizeof(struct key_value_pair);
406
407 if(DB_ERROR(storage_write(heap->bucket_storage, pair, offset, sizeof(*pair)))) {
408 return 0;
409 }
410
411 heap->next_free_slot[bucket_id]++;
412
413 return 1;
414}
415
416static int
417bucket_split(heap_t *heap, int bucket_id)
418{
419 heap_node_t node;
420 maxheap_key_t mean;
421 int small_bucket_index;
422 int large_bucket_index;
423
424 if(heap_read(heap, bucket_id, &node) == 0) {
425 return 0;
426 }
427
428 mean = node.min + ((node.max - node.min) / 2);
429
430 PRINTF("DB: Split bucket %d (%ld, %ld) at mean value %ld\n", bucket_id,
431 (long)node.min, (long)node.max, (long)mean);
432
433 small_bucket_index = heap_insert(heap, node.min, mean);
434 if(small_bucket_index < 0) {
435 return 0;
436 }
437
438 large_bucket_index = heap_insert(heap, mean + 1, node.max);
439 if(large_bucket_index < 0) {
440 /*heap_remove(small_bucket);*/
441 return 0;
442 }
443
444 return 1;
445}
446
447int
448insert_item(heap_t *heap, maxheap_key_t key, maxheap_value_t value)
449{
450 int heap_iterator;
451 int bucket_id, last_good_bucket_id;
452 struct key_value_pair pair;
453
454 for(heap_iterator = 0, last_good_bucket_id = -1;;) {
455 bucket_id = heap_find(heap, key, &heap_iterator);
456 if(bucket_id < 0) {
457 break;
458 }
459 last_good_bucket_id = bucket_id;
460 }
461 bucket_id = last_good_bucket_id;
462
463 if(bucket_id < 0) {
464 PRINTF("DB: No bucket for key %ld\n", (long)key);
465 return 0;
466 }
467
468 pair.key = key;
469 pair.value = value;
470
471 if(heap->next_free_slot[bucket_id] == BUCKET_SIZE) {
472 PRINTF("DB: Bucket %d is full\n", bucket_id);
473 if(bucket_split(heap, bucket_id) == 0) {
474 return 0;
475 }
476
477 /* Select one of the newly created buckets. */
478 bucket_id = heap_find(heap, key, &heap_iterator);
479 if(bucket_id < 0) {
480 return 0;
481 }
482 }
483
484 if(bucket_append(heap, bucket_id, &pair) == 0) {
485 return 0;
486 }
487
488 PRINTF("DB: Inserted key %ld (hash %ld) into the heap at bucket_id %d\n",
489 (long)key, (long)transform_key(key), bucket_id);
490
491 return 1;
492}
493
494static db_result_t
495create(index_t *index)
496{
497 char heap_filename[DB_MAX_FILENAME_LENGTH];
498 char bucket_filename[DB_MAX_FILENAME_LENGTH];
499 char *filename;
500 db_result_t result;
501 heap_t *heap;
502
503 heap = NULL;
504 filename = NULL;
505 bucket_filename[0] = '\0';
506
507 /* Generate the heap file, which is the main index file that is
508 referenced from the metadata of the relation. */
509 filename = storage_generate_file("heap",
510 (unsigned long)NODE_LIMIT * sizeof(heap_node_t));
511 if(filename == NULL) {
512 PRINTF("DB: Failed to generate a heap file\n");
513 return DB_INDEX_ERROR;
514 }
515
516 memcpy(index->descriptor_file, filename,
517 sizeof(index->descriptor_file));
518
519 PRINTF("DB: Generated the heap file \"%s\" using %lu bytes of space\n",
520 index->descriptor_file, (unsigned long)NODE_LIMIT * sizeof(heap_node_t));
521
522 index->opaque_data = heap = memb_alloc(&heaps);
523 if(heap == NULL) {
524 PRINTF("DB: Failed to allocate a heap\n");
525 result = DB_ALLOCATION_ERROR;
526 goto end;
527 }
528 heap->heap_storage = -1;
529 heap->bucket_storage = -1;
530
531 /* Generate the bucket file, which stores the (key, value) pairs. */
532 filename = storage_generate_file("bucket",
533 (unsigned long)NODE_LIMIT * sizeof(bucket_t));
534 if(filename == NULL) {
535 PRINTF("DB: Failed to generate a bucket file\n");
536 result = DB_INDEX_ERROR;
537 goto end;
538 }
539 memcpy(bucket_filename, filename, sizeof(bucket_filename));
540
541 PRINTF("DB: Generated the bucket file \"%s\" using %lu bytes of space\n",
542 bucket_filename, (unsigned long)NODE_LIMIT * sizeof(bucket_t));
543
544 /* Initialize the heap. */
545 memset(&heap->next_free_slot, 0, sizeof(heap->next_free_slot));
546
547 heap->heap_storage = storage_open(index->descriptor_file);
548 heap->bucket_storage = storage_open(bucket_filename);
549 if(heap->heap_storage < 0 || heap->bucket_storage < 0) {
550 result = DB_STORAGE_ERROR;
551 goto end;
552 }
553
554 if(DB_ERROR(storage_write(heap->heap_storage, &bucket_filename, 0,
555 sizeof(bucket_filename)))) {
556 result = DB_STORAGE_ERROR;
557 goto end;
558 }
559
560 if(heap_insert(heap, KEY_MIN, KEY_MAX) < 0) {
561 PRINTF("DB: Heap insertion error\n");
562 result = DB_INDEX_ERROR;
563 goto end;
564 }
565
566 PRINTF("DB: Created a heap index\n");
567 result = DB_OK;
568
569 end:
570 if(result != DB_OK) {
571 if(heap != NULL) {
572 storage_close(heap->bucket_storage);
573 storage_close(heap->heap_storage);
574 memb_free(&heaps, heap);
575 }
576 if(index->descriptor_file[0] != '\0') {
577 cfs_remove(heap_filename);
578 index->descriptor_file[0] = '\0';
579 }
580 if(bucket_filename[0] != '\0') {
581 cfs_remove(bucket_filename);
582 }
583 }
584 return result;
585}
586
587static db_result_t
588destroy(index_t *index)
589{
590 release(index);
591 return DB_INDEX_ERROR;
592}
593
594static db_result_t
595load(index_t *index)
596{
597 heap_t *heap;
598 db_storage_id_t fd;
599 char bucket_file[DB_MAX_FILENAME_LENGTH];
600
601 index->opaque_data = heap = memb_alloc(&heaps);
602 if(heap == NULL) {
603 PRINTF("DB: Failed to allocate a heap\n");
604 return DB_ALLOCATION_ERROR;
605 }
606
607 fd = storage_open(index->descriptor_file);
608 if(fd < 0) {
609 return DB_STORAGE_ERROR;
610 }
611
612 if(storage_read(fd, bucket_file, 0, sizeof(bucket_file)) !=
613 sizeof(bucket_file)) {
614 storage_close(fd);
615 return DB_STORAGE_ERROR;
616 }
617
618 storage_close(fd);
619
620 heap->heap_storage = storage_open(index->descriptor_file);
621 heap->bucket_storage = storage_open(bucket_file);
622
623 memset(&heap->next_free_slot, 0, sizeof(heap->next_free_slot));
624
625 PRINTF("DB: Loaded max-heap index from file %s and bucket file %s\n",
626 index->descriptor_file, bucket_file);
627
628 return DB_OK;
629}
630
631static db_result_t
632release(index_t *index)
633{
634 heap_t *heap;
635
636 heap = index->opaque_data;
637
638 storage_close(heap->bucket_storage);
639 storage_close(heap->heap_storage);
640 memb_free(&heaps, index->opaque_data);
641 return DB_INDEX_ERROR;
642}
643
644static db_result_t
645insert(index_t *index, attribute_value_t *key, tuple_id_t value)
646{
647 heap_t *heap;
648 long long_key;
649
650 heap = (heap_t *)index->opaque_data;
651
652 long_key = db_value_to_long(key);
653
654 if(insert_item(heap, (maxheap_key_t)long_key,
655 (maxheap_value_t)value) == 0) {
656 PRINTF("DB: Failed to insert key %ld into a max-heap index\n", long_key);
657 return DB_INDEX_ERROR;
658 }
659 return DB_OK;
660}
661
662static db_result_t
663delete(index_t *index, attribute_value_t *value)
664{
665 return DB_INDEX_ERROR;
666}
667
668static tuple_id_t
669get_next(index_iterator_t *iterator)
670{
671 struct iteration_cache {
672 index_iterator_t *index_iterator;
673 int heap_iterator;
674 tuple_id_t found_items;
675 uint8_t start;
676 int visited_buckets[NODE_DEPTH];
677 int end;
678 };
679 static struct iteration_cache cache;
680 heap_t *heap;
681 maxheap_key_t key;
682 int bucket_id;
683 int tmp_heap_iterator;
684 int i;
685 struct bucket_cache *bcache;
686 uint8_t next_free_slot;
687
688 heap = (heap_t *)iterator->index->opaque_data;
689 key = VALUE_INT(&iterator->min_value);
690
691 if(cache.index_iterator != iterator || iterator->next_item_no == 0) {
692 /* Initialize the cache for a new search. */
693 cache.end = NODE_DEPTH - 1;
694 cache.found_items = cache.start = 0;
695 cache.index_iterator = iterator;
696
697 /* Find the downward path through the heap consisting of all nodes
698 that could possibly contain the key. */
699 for(i = tmp_heap_iterator = 0; i < NODE_DEPTH; i++) {
700 cache.visited_buckets[i] = heap_find(heap, key, &tmp_heap_iterator);
701 if(cache.visited_buckets[i] < 0) {
702 cache.end = i - 1;
703 break;
704 }
705 }
706 cache.heap_iterator = cache.end;
707 }
708
709 /*
710 * Search for the key in each heap node, starting from the bottom
711 * of the heap. Because the bottom nodes contain are very narrow
712 * range of keys, there is a much higher chance that the key will be
713 * there rather than at the top.
714 */
715 for(; cache.heap_iterator >= 0; cache.heap_iterator--) {
716 bucket_id = cache.visited_buckets[cache.heap_iterator];
717
718 PRINTF("DB: Find key %lu in bucket %d\n", (unsigned long)key, bucket_id);
719
720 if((bcache = bucket_load(heap, bucket_id)) == NULL) {
721 PRINTF("DB: Failed to load bucket %d\n", bucket_id);
722 return INVALID_TUPLE;
723 }
724
725 /* Because keys are stored in an unsorted order in the bucket, we
726 * need to search the bucket sequentially. */
727 next_free_slot = heap->next_free_slot[bucket_id];
728 for(i = cache.start; i < next_free_slot; i++) {
729 if(bcache->bucket.pairs[i].key == key) {
730 if(cache.found_items++ == iterator->next_item_no) {
731 iterator->next_item_no++;
732 cache.start = i + 1;
733 PRINTF("DB: Found key %ld with value %lu\n", (long)key,
734 (unsigned long)bcache->bucket.pairs[i].value);
735 return (tuple_id_t)bcache->bucket.pairs[i].value;
736 }
737 }
738 }
739 }
740
741 if(VALUE_INT(&iterator->min_value) == VALUE_INT(&iterator->max_value)) {
742 PRINTF("DB: Could not find key %ld in the index\n", (long)key);
743 return INVALID_TUPLE;
744 }
745
746 iterator->next_item_no = 0;
747 VALUE_INT(&iterator->min_value)++;
748
749 return get_next(iterator);
750}
Header for the Coffee file system.
CFS header file.
Database configuration options.
void random_init(unsigned short seed)
Seed the cc2538 random number generator.
Definition: random.c:84
unsigned short random_rand(void)
Generates a new random number using the cc2538 RNG.
Definition: random.c:58
int cfs_remove(const char *name)
Remove a file.
Definition: cfs-cooja.c:150
int memb_free(struct memb *m, void *ptr)
Deallocate a memory block from a memory block previously declared with MEMB().
Definition: memb.c:78
void * memb_alloc(struct memb *m)
Allocate a memory block from a block of memory declared with MEMB().
Definition: memb.c:59
#define MEMB(name, structure, num)
Declare a memory block.
Definition: memb.h:91
static void start(void)
Start measurement.
Memory block allocation routines.
Declarations for the result acquisition API.
The storage interface used by the database.
A set of debugging macros for the IP stack.