2 * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
31 #include <ipxe/list.h>
32 #include <ipxe/init.h>
33 #include <ipxe/refcnt.h>
34 #include <ipxe/malloc.h>
35 #include <valgrind/memcheck.h>
39 * Dynamic memory allocation
43 /** A free block of memory */
45 /** Size of this block */
49 * This padding exists to cover the "count" field of a
50 * reference counter, in the common case where a reference
51 * counter is the first element of a dynamically-allocated
52 * object. It avoids clobbering the "count" field as soon as
53 * the memory is freed, and so allows for the possibility of
54 * detecting reference counting errors.
56 char pad[ offsetof ( struct refcnt, count ) +
57 sizeof ( ( ( struct refcnt * ) NULL )->count ) ];
58 /** List of free blocks */
59 struct list_head list;
62 #define MIN_MEMBLOCK_SIZE \
63 ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
65 /** A block of allocated memory complete with size information */
66 struct autosized_block {
67 /** Size of this block */
74 * Address for zero-length memory blocks
76 * @c malloc(0) or @c realloc(ptr,0) will return the special value @c
77 * NOWHERE. Calling @c free(NOWHERE) will have no effect.
79 * This is consistent with the ANSI C standards, which state that
80 * "either NULL or a pointer suitable to be passed to free()" must be
81 * returned in these cases. Using a special non-NULL value means that
82 * the caller can take a NULL return value to indicate failure,
83 * without first having to check for a requested size of zero.
85 * Code outside of malloc.c do not ever need to refer to the actual
86 * value of @c NOWHERE; this is an internal definition.
88 #define NOWHERE ( ( void * ) ~( ( intptr_t ) 0 ) )
90 /** List of free memory blocks */
91 static LIST_HEAD ( free_blocks );
93 /** Total amount of free memory */
99 * Currently fixed at 512kB.
101 #define HEAP_SIZE ( 512 * 1024 )
103 /** The heap itself */
104 static char heap[HEAP_SIZE] __attribute__ (( aligned ( __alignof__(void *) )));
107 * Mark all blocks in free list as defined
110 static inline void valgrind_make_blocks_defined ( void ) {
111 struct memory_block *block;
113 /* Do nothing unless running under Valgrind */
114 if ( RUNNING_ON_VALGRIND <= 0 )
117 /* Traverse free block list, marking each block structure as
118 * defined. Some contortions are necessary to avoid errors
122 /* Mark block list itself as defined */
123 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks, sizeof ( free_blocks ) );
125 /* Mark areas accessed by list_check() as defined */
126 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.prev->next,
127 sizeof ( free_blocks.prev->next ) );
128 VALGRIND_MAKE_MEM_DEFINED ( free_blocks.next,
129 sizeof ( *free_blocks.next ) );
130 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->next->prev,
131 sizeof ( free_blocks.next->next->prev ) );
133 /* Mark each block in list as defined */
134 list_for_each_entry ( block, &free_blocks, list ) {
136 /* Mark block as defined */
137 VALGRIND_MAKE_MEM_DEFINED ( block, sizeof ( *block ) );
139 /* Mark areas accessed by list_check() as defined */
140 VALGRIND_MAKE_MEM_DEFINED ( block->list.next,
141 sizeof ( *block->list.next ) );
142 VALGRIND_MAKE_MEM_DEFINED ( &block->list.next->next->prev,
143 sizeof ( block->list.next->next->prev ) );
148 * Mark all blocks in free list as inaccessible
151 static inline void valgrind_make_blocks_noaccess ( void ) {
152 struct memory_block *block;
153 struct memory_block *prev = NULL;
155 /* Do nothing unless running under Valgrind */
156 if ( RUNNING_ON_VALGRIND <= 0 )
159 /* Traverse free block list, marking each block structure as
160 * inaccessible. Some contortions are necessary to avoid
161 * errors from list_check().
164 /* Mark each block in list as inaccessible */
165 list_for_each_entry ( block, &free_blocks, list ) {
167 /* Mark previous block (if any) as inaccessible. (Current
168 * block will be accessed by list_check().)
171 VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
174 /* At the end of the list, list_check() will end up
175 * accessing the first list item. Temporarily mark
176 * this area as defined.
178 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->prev,
179 sizeof ( free_blocks.next->prev ) );
181 /* Mark last block (if any) as inaccessible */
183 VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
185 /* Mark as inaccessible the area that was temporarily marked
186 * as defined to avoid errors from list_check().
188 VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks.next->prev,
189 sizeof ( free_blocks.next->prev ) );
191 /* Mark block list itself as inaccessible */
192 VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
196 * Check integrity of the blocks in the free list
199 static inline void check_blocks ( void ) {
200 struct memory_block *block;
201 struct memory_block *prev = NULL;
206 list_for_each_entry ( block, &free_blocks, list ) {
208 /* Check that list structure is intact */
209 list_check ( &block->list );
211 /* Check that block size is not too small */
212 assert ( block->size >= sizeof ( *block ) );
213 assert ( block->size >= MIN_MEMBLOCK_SIZE );
215 /* Check that block does not wrap beyond end of address space */
216 assert ( ( ( void * ) block + block->size ) >
217 ( ( void * ) block ) );
219 /* Check that blocks remain in ascending order, and
220 * that adjacent blocks have been merged.
223 assert ( ( ( void * ) block ) > ( ( void * ) prev ) );
224 assert ( ( ( void * ) block ) >
225 ( ( ( void * ) prev ) + prev->size ) );
232 * Discard some cached data
234 * @ret discarded Number of cached items discarded
236 static unsigned int discard_cache ( void ) {
237 struct cache_discarder *discarder;
238 unsigned int discarded;
240 for_each_table_entry ( discarder, CACHE_DISCARDERS ) {
241 discarded = discarder->discard();
249 * Discard all cached data
252 static void discard_all_cache ( void ) {
253 unsigned int discarded;
256 discarded = discard_cache();
257 } while ( discarded );
261 * Allocate a memory block
263 * @v size Requested size
264 * @v align Physical alignment
265 * @v offset Offset from physical alignment
266 * @ret ptr Memory block, or NULL
268 * Allocates a memory block @b physically aligned as requested. No
269 * guarantees are provided for the alignment of the virtual address.
271 * @c align must be a power of two. @c size may not be zero.
273 void * alloc_memblock ( size_t size, size_t align, size_t offset ) {
274 struct memory_block *block;
279 struct memory_block *pre;
280 struct memory_block *post;
284 assert ( size != 0 );
285 assert ( ( align == 0 ) || ( ( align & ( align - 1 ) ) == 0 ) );
286 valgrind_make_blocks_defined();
289 /* Round up size to multiple of MIN_MEMBLOCK_SIZE and
290 * calculate alignment mask.
292 actual_size = ( ( size + MIN_MEMBLOCK_SIZE - 1 ) &
293 ~( MIN_MEMBLOCK_SIZE - 1 ) );
294 align_mask = ( ( align - 1 ) | ( MIN_MEMBLOCK_SIZE - 1 ) );
296 DBGC2 ( &heap, "Allocating %#zx (aligned %#zx+%zx)\n",
297 size, align, offset );
299 /* Search through blocks for the first one with enough space */
300 list_for_each_entry ( block, &free_blocks, list ) {
301 pre_size = ( ( offset - virt_to_phys ( block ) )
303 post_size = ( block->size - pre_size - actual_size );
304 if ( post_size >= 0 ) {
305 /* Split block into pre-block, block, and
306 * post-block. After this split, the "pre"
307 * block is the one currently linked into the
311 block = ( ( ( void * ) pre ) + pre_size );
312 post = ( ( ( void * ) block ) + actual_size );
313 DBGC2 ( &heap, "[%p,%p) -> [%p,%p) + [%p,%p)\n",
314 pre, ( ( ( void * ) pre ) + pre->size ),
316 ( ( ( void * ) pre ) + pre->size ) );
317 /* If there is a "post" block, add it in to
318 * the free list. Leak it if it is too small
319 * (which can happen only at the very end of
322 if ( (size_t) post_size >= MIN_MEMBLOCK_SIZE ) {
323 VALGRIND_MAKE_MEM_UNDEFINED
324 ( post, sizeof ( *post ) );
325 post->size = post_size;
326 list_add ( &post->list, &pre->list );
328 /* Shrink "pre" block, leaving the main block
329 * isolated and no longer part of the free
332 pre->size = pre_size;
333 /* If there is no "pre" block, remove it from
334 * the list. Also remove it (i.e. leak it) if
335 * it is too small, which can happen only at
336 * the very start of the heap.
338 if ( pre_size < MIN_MEMBLOCK_SIZE ) {
339 list_del ( &pre->list );
340 VALGRIND_MAKE_MEM_NOACCESS
341 ( pre, sizeof ( *pre ) );
343 /* Update total free memory */
344 freemem -= actual_size;
345 /* Return allocated block */
346 DBGC2 ( &heap, "Allocated [%p,%p)\n", block,
347 ( ( ( void * ) block ) + size ) );
349 VALGRIND_MAKE_MEM_UNDEFINED ( ptr, size );
354 /* Try discarding some cached data to free up memory */
355 if ( ! discard_cache() ) {
356 /* Nothing available to discard */
357 DBGC ( &heap, "Failed to allocate %#zx (aligned "
358 "%#zx)\n", size, align );
366 valgrind_make_blocks_noaccess();
371 * Free a memory block
373 * @v ptr Memory allocated by alloc_memblock(), or NULL
374 * @v size Size of the memory
376 * If @c ptr is NULL, no action is taken.
378 void free_memblock ( void *ptr, size_t size ) {
379 struct memory_block *freeing;
380 struct memory_block *block;
381 struct memory_block *tmp;
384 ssize_t gap_after = -1;
386 /* Allow for ptr==NULL */
389 VALGRIND_MAKE_MEM_NOACCESS ( ptr, size );
392 valgrind_make_blocks_defined();
395 /* Round up size to match actual size that alloc_memblock()
398 assert ( size != 0 );
399 actual_size = ( ( size + MIN_MEMBLOCK_SIZE - 1 ) &
400 ~( MIN_MEMBLOCK_SIZE - 1 ) );
402 VALGRIND_MAKE_MEM_UNDEFINED ( freeing, sizeof ( *freeing ) );
403 DBGC2 ( &heap, "Freeing [%p,%p)\n",
404 freeing, ( ( ( void * ) freeing ) + size ) );
406 /* Check that this block does not overlap the free list */
408 list_for_each_entry ( block, &free_blocks, list ) {
409 if ( ( ( ( void * ) block ) <
410 ( ( void * ) freeing + actual_size ) ) &&
411 ( ( void * ) freeing <
412 ( ( void * ) block + block->size ) ) ) {
414 DBGC ( &heap, "Double free of [%p,%p) "
415 "overlapping [%p,%p) detected from %p\n",
417 ( ( ( void * ) freeing ) + size ), block,
418 ( ( void * ) block + block->size ),
419 __builtin_return_address ( 0 ) );
424 /* Insert/merge into free list */
425 freeing->size = actual_size;
426 list_for_each_entry_safe ( block, tmp, &free_blocks, list ) {
427 /* Calculate gaps before and after the "freeing" block */
428 gap_before = ( ( ( void * ) freeing ) -
429 ( ( ( void * ) block ) + block->size ) );
430 gap_after = ( ( ( void * ) block ) -
431 ( ( ( void * ) freeing ) + freeing->size ) );
432 /* Merge with immediately preceding block, if possible */
433 if ( gap_before == 0 ) {
434 DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", block,
435 ( ( ( void * ) block ) + block->size ), freeing,
436 ( ( ( void * ) freeing ) + freeing->size ),
438 ( ( ( void * ) freeing ) + freeing->size ) );
439 block->size += actual_size;
440 list_del ( &block->list );
441 VALGRIND_MAKE_MEM_NOACCESS ( freeing,
442 sizeof ( *freeing ) );
445 /* Stop processing as soon as we reach a following block */
446 if ( gap_after >= 0 )
450 /* Insert before the immediately following block. If
451 * possible, merge the following block into the "freeing"
454 DBGC2 ( &heap, "[%p,%p)\n",
455 freeing, ( ( ( void * ) freeing ) + freeing->size ) );
456 list_add_tail ( &freeing->list, &block->list );
457 if ( gap_after == 0 ) {
458 DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", freeing,
459 ( ( ( void * ) freeing ) + freeing->size ), block,
460 ( ( ( void * ) block ) + block->size ), freeing,
461 ( ( ( void * ) block ) + block->size ) );
462 freeing->size += block->size;
463 list_del ( &block->list );
464 VALGRIND_MAKE_MEM_NOACCESS ( block, sizeof ( *block ) );
467 /* Update free memory counter */
468 freemem += actual_size;
471 valgrind_make_blocks_noaccess();
477 * @v old_ptr Memory previously allocated by malloc(), or NULL
478 * @v new_size Requested size
479 * @ret new_ptr Allocated memory, or NULL
481 * Allocates memory with no particular alignment requirement. @c
482 * new_ptr will be aligned to at least a multiple of sizeof(void*).
483 * If @c old_ptr is non-NULL, then the contents of the newly allocated
484 * memory will be the same as the contents of the previously allocated
485 * memory, up to the minimum of the old and new sizes. The old memory
488 * If allocation fails the previously allocated block is left
489 * untouched and NULL is returned.
491 * Calling realloc() with a new size of zero is a valid way to free a
494 void * realloc ( void *old_ptr, size_t new_size ) {
495 struct autosized_block *old_block;
496 struct autosized_block *new_block;
497 size_t old_total_size;
498 size_t new_total_size;
500 void *new_ptr = NOWHERE;
502 /* Allocate new memory if necessary. If allocation fails,
503 * return without touching the old block.
506 new_total_size = ( new_size +
507 offsetof ( struct autosized_block, data ) );
508 new_block = alloc_memblock ( new_total_size, 1, 0 );
511 new_block->size = new_total_size;
512 VALGRIND_MAKE_MEM_NOACCESS ( &new_block->size,
513 sizeof ( new_block->size ) );
514 new_ptr = &new_block->data;
515 VALGRIND_MALLOCLIKE_BLOCK ( new_ptr, new_size, 0, 0 );
518 /* Copy across relevant part of the old data region (if any),
519 * then free it. Note that at this point either (a) new_ptr
520 * is valid, or (b) new_size is 0; either way, the memcpy() is
523 if ( old_ptr && ( old_ptr != NOWHERE ) ) {
524 old_block = container_of ( old_ptr, struct autosized_block,
526 VALGRIND_MAKE_MEM_DEFINED ( &old_block->size,
527 sizeof ( old_block->size ) );
528 old_total_size = old_block->size;
529 assert ( old_total_size != 0 );
530 old_size = ( old_total_size -
531 offsetof ( struct autosized_block, data ) );
532 memcpy ( new_ptr, old_ptr,
533 ( ( old_size < new_size ) ? old_size : new_size ) );
534 VALGRIND_FREELIKE_BLOCK ( old_ptr, 0 );
535 free_memblock ( old_block, old_total_size );
539 DBGC ( &heap, "Possible memory corruption detected from %p\n",
540 __builtin_return_address ( 0 ) );
548 * @v size Requested size
549 * @ret ptr Memory, or NULL
551 * Allocates memory with no particular alignment requirement. @c ptr
552 * will be aligned to at least a multiple of sizeof(void*).
554 void * malloc ( size_t size ) {
557 ptr = realloc ( NULL, size );
559 DBGC ( &heap, "Possible memory corruption detected from %p\n",
560 __builtin_return_address ( 0 ) );
568 * @v ptr Memory allocated by malloc(), or NULL
570 * Memory allocated with malloc_dma() cannot be freed with free(); it
571 * must be freed with free_dma() instead.
573 * If @c ptr is NULL, no action is taken.
575 void free ( void *ptr ) {
579 DBGC ( &heap, "Possible memory corruption detected from %p\n",
580 __builtin_return_address ( 0 ) );
585 * Allocate cleared memory
587 * @v size Requested size
588 * @ret ptr Allocated memory
590 * Allocate memory as per malloc(), and zero it.
592 * This function name is non-standard, but pretty intuitive.
593 * zalloc(size) is always equivalent to calloc(1,size)
595 void * zalloc ( size_t size ) {
598 data = malloc ( size );
600 memset ( data, 0, size );
602 DBGC ( &heap, "Possible memory corruption detected from %p\n",
603 __builtin_return_address ( 0 ) );
609 * Add memory to allocation pool
611 * @v start Start address
614 * Adds a block of memory [start,end) to the allocation pool. This is
615 * a one-way operation; there is no way to reclaim this memory.
617 * @c start must be aligned to at least a multiple of sizeof(void*).
619 void mpopulate ( void *start, size_t len ) {
620 /* Prevent free_memblock() from rounding up len beyond the end
621 * of what we were actually given...
623 free_memblock ( start, ( len & ~( MIN_MEMBLOCK_SIZE - 1 ) ) );
627 * Initialise the heap
630 static void init_heap ( void ) {
631 VALGRIND_MAKE_MEM_NOACCESS ( heap, sizeof ( heap ) );
632 VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
633 mpopulate ( heap, sizeof ( heap ) );
636 /** Memory allocator initialisation function */
637 struct init_fn heap_init_fn __init_fn ( INIT_EARLY ) = {
638 .initialise = init_heap,
642 * Discard all cached data on shutdown
645 static void shutdown_cache ( int booting __unused ) {
649 /** Memory allocator shutdown function */
650 struct startup_fn heap_startup_fn __startup_fn ( STARTUP_EARLY ) = {
651 .shutdown = shutdown_cache,
657 * Dump free block list
660 void mdumpfree ( void ) {
661 struct memory_block *block;
663 printf ( "Free block list:\n" );
664 list_for_each_entry ( block, &free_blocks, list ) {
665 printf ( "[%p,%p] (size %#zx)\n", block,
666 ( ( ( void * ) block ) + block->size ), block->size );