2 * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 FILE_LICENCE ( GPL2_OR_LATER );
27 #include <ipxe/list.h>
28 #include <ipxe/init.h>
29 #include <ipxe/refcnt.h>
30 #include <ipxe/malloc.h>
31 #include <valgrind/memcheck.h>
35 * Dynamic memory allocation
39 /** A free block of memory */
41 /** Size of this block */
45 * This padding exists to cover the "count" field of a
46 * reference counter, in the common case where a reference
47 * counter is the first element of a dynamically-allocated
48 * object. It avoids clobbering the "count" field as soon as
49 * the memory is freed, and so allows for the possibility of
50 * detecting reference counting errors.
52 char pad[ offsetof ( struct refcnt, count ) +
53 sizeof ( ( ( struct refcnt * ) NULL )->count ) ];
54 /** List of free blocks */
55 struct list_head list;
58 #define MIN_MEMBLOCK_SIZE \
59 ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
61 /** A block of allocated memory complete with size information */
62 struct autosized_block {
63 /** Size of this block */
70 * Address for zero-length memory blocks
72 * @c malloc(0) or @c realloc(ptr,0) will return the special value @c
73 * NOWHERE. Calling @c free(NOWHERE) will have no effect.
75 * This is consistent with the ANSI C standards, which state that
76 * "either NULL or a pointer suitable to be passed to free()" must be
77 * returned in these cases. Using a special non-NULL value means that
78 * the caller can take a NULL return value to indicate failure,
79 * without first having to check for a requested size of zero.
81 * Code outside of malloc.c do not ever need to refer to the actual
82 * value of @c NOWHERE; this is an internal definition.
84 #define NOWHERE ( ( void * ) ~( ( intptr_t ) 0 ) )
86 /** List of free memory blocks */
87 static LIST_HEAD ( free_blocks );
89 /** Total amount of free memory */
95 * Currently fixed at 512kB.
97 #define HEAP_SIZE ( 512 * 1024 )
99 /** The heap itself */
100 static char heap[HEAP_SIZE] __attribute__ (( aligned ( __alignof__(void *) )));
103 * Mark all blocks in free list as defined
106 static inline void valgrind_make_blocks_defined ( void ) {
107 struct memory_block *block;
109 if ( RUNNING_ON_VALGRIND <= 0 )
112 /* Traverse free block list, marking each block structure as
113 * defined. Some contortions are necessary to avoid errors
117 /* Mark block list itself as defined */
118 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks, sizeof ( free_blocks ) );
120 /* Mark areas accessed by list_check() as defined */
121 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.prev->next,
122 sizeof ( free_blocks.prev->next ) );
123 VALGRIND_MAKE_MEM_DEFINED ( free_blocks.next,
124 sizeof ( *free_blocks.next ) );
125 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->next->prev,
126 sizeof ( free_blocks.next->next->prev ) );
128 /* Mark each block in list as defined */
129 list_for_each_entry ( block, &free_blocks, list ) {
131 /* Mark block as defined */
132 VALGRIND_MAKE_MEM_DEFINED ( block, sizeof ( *block ) );
134 /* Mark areas accessed by list_check() as defined */
135 VALGRIND_MAKE_MEM_DEFINED ( block->list.next,
136 sizeof ( *block->list.next ) );
137 VALGRIND_MAKE_MEM_DEFINED ( &block->list.next->next->prev,
138 sizeof ( block->list.next->next->prev ) );
143 * Mark all blocks in free list as inaccessible
146 static inline void valgrind_make_blocks_noaccess ( void ) {
147 struct memory_block *block;
148 struct memory_block *prev = NULL;
150 if ( RUNNING_ON_VALGRIND <= 0 )
153 /* Traverse free block list, marking each block structure as
154 * inaccessible. Some contortions are necessary to avoid
155 * errors from list_check().
158 /* Mark each block in list as inaccessible */
159 list_for_each_entry ( block, &free_blocks, list ) {
161 /* Mark previous block (if any) as inaccessible. (Current
162 * block will be accessed by list_check().)
165 VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
168 /* At the end of the list, list_check() will end up
169 * accessing the first list item. Temporarily mark
170 * this area as defined.
172 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->prev,
173 sizeof ( free_blocks.next->prev ) );
175 /* Mark last block (if any) as inaccessible */
177 VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
179 /* Mark as inaccessible the area that was temporarily marked
180 * as defined to avoid errors from list_check().
182 VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks.next->prev,
183 sizeof ( free_blocks.next->prev ) );
185 /* Mark block list itself as inaccessible */
186 VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
190 * Check integrity of the blocks in the free list
193 static inline void check_blocks ( void ) {
194 struct memory_block *block;
195 struct memory_block *prev = NULL;
200 list_for_each_entry ( block, &free_blocks, list ) {
202 /* Check that list structure is intact */
203 list_check ( &block->list );
205 /* Check that block size is not too small */
206 assert ( block->size >= sizeof ( *block ) );
207 assert ( block->size >= MIN_MEMBLOCK_SIZE );
209 /* Check that block does not wrap beyond end of address space */
210 assert ( ( ( void * ) block + block->size ) >
211 ( ( void * ) block ) );
213 /* Check that blocks remain in ascending order, and
214 * that adjacent blocks have been merged.
217 assert ( ( ( void * ) block ) > ( ( void * ) prev ) );
218 assert ( ( ( void * ) block ) >
219 ( ( ( void * ) prev ) + prev->size ) );
226 * Discard some cached data
228 * @ret discarded Number of cached items discarded
230 static unsigned int discard_cache ( void ) {
231 struct cache_discarder *discarder;
232 unsigned int discarded;
234 for_each_table_entry ( discarder, CACHE_DISCARDERS ) {
235 discarded = discarder->discard();
243 * Discard all cached data
246 static void discard_all_cache ( void ) {
247 unsigned int discarded;
250 discarded = discard_cache();
251 } while ( discarded );
255 * Allocate a memory block
257 * @v size Requested size
258 * @v align Physical alignment
259 * @v offset Offset from physical alignment
260 * @ret ptr Memory block, or NULL
262 * Allocates a memory block @b physically aligned as requested. No
263 * guarantees are provided for the alignment of the virtual address.
265 * @c align must be a power of two. @c size may not be zero.
267 void * alloc_memblock ( size_t size, size_t align, size_t offset ) {
268 struct memory_block *block;
272 struct memory_block *pre;
273 struct memory_block *post;
274 struct memory_block *ptr;
277 assert ( size != 0 );
278 assert ( ( align == 0 ) || ( ( align & ( align - 1 ) ) == 0 ) );
280 valgrind_make_blocks_defined();
283 /* Round up size to multiple of MIN_MEMBLOCK_SIZE and
284 * calculate alignment mask.
286 size = ( size + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
287 align_mask = ( align - 1 ) | ( MIN_MEMBLOCK_SIZE - 1 );
289 DBGC2 ( &heap, "Allocating %#zx (aligned %#zx+%zx)\n",
290 size, align, offset );
292 /* Search through blocks for the first one with enough space */
293 list_for_each_entry ( block, &free_blocks, list ) {
294 pre_size = ( ( offset - virt_to_phys ( block ) )
296 post_size = ( block->size - pre_size - size );
297 if ( post_size >= 0 ) {
298 /* Split block into pre-block, block, and
299 * post-block. After this split, the "pre"
300 * block is the one currently linked into the
304 block = ( ( ( void * ) pre ) + pre_size );
305 post = ( ( ( void * ) block ) + size );
306 DBGC2 ( &heap, "[%p,%p) -> [%p,%p) + [%p,%p)\n",
307 pre, ( ( ( void * ) pre ) + pre->size ),
309 ( ( ( void * ) pre ) + pre->size ) );
310 /* If there is a "post" block, add it in to
311 * the free list. Leak it if it is too small
312 * (which can happen only at the very end of
315 if ( (size_t) post_size >= MIN_MEMBLOCK_SIZE ) {
316 VALGRIND_MAKE_MEM_DEFINED ( post,
318 post->size = post_size;
319 list_add ( &post->list, &pre->list );
321 /* Shrink "pre" block, leaving the main block
322 * isolated and no longer part of the free
325 pre->size = pre_size;
326 /* If there is no "pre" block, remove it from
327 * the list. Also remove it (i.e. leak it) if
328 * it is too small, which can happen only at
329 * the very start of the heap.
331 if ( pre_size < MIN_MEMBLOCK_SIZE )
332 list_del ( &pre->list );
333 /* Update total free memory */
335 /* Return allocated block */
336 DBGC2 ( &heap, "Allocated [%p,%p)\n", block,
337 ( ( ( void * ) block ) + size ) );
343 /* Try discarding some cached data to free up memory */
344 if ( ! discard_cache() ) {
345 /* Nothing available to discard */
346 DBGC ( &heap, "Failed to allocate %#zx (aligned "
347 "%#zx)\n", size, align );
355 valgrind_make_blocks_noaccess();
360 * Free a memory block
362 * @v ptr Memory allocated by alloc_memblock(), or NULL
363 * @v size Size of the memory
365 * If @c ptr is NULL, no action is taken.
367 void free_memblock ( void *ptr, size_t size ) {
368 struct memory_block *freeing;
369 struct memory_block *block;
370 struct memory_block *tmp;
372 ssize_t gap_after = -1;
374 /* Allow for ptr==NULL */
378 valgrind_make_blocks_defined();
381 /* Round up size to match actual size that alloc_memblock()
384 assert ( size != 0 );
385 size = ( size + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
387 VALGRIND_MAKE_MEM_DEFINED ( freeing, sizeof ( *freeing ) );
388 DBGC2 ( &heap, "Freeing [%p,%p)\n",
389 freeing, ( ( ( void * ) freeing ) + size ) );
391 /* Check that this block does not overlap the free list */
393 list_for_each_entry ( block, &free_blocks, list ) {
394 if ( ( ( ( void * ) block ) <
395 ( ( void * ) freeing + size ) ) &&
396 ( ( void * ) freeing <
397 ( ( void * ) block + block->size ) ) ) {
399 DBGC ( &heap, "Double free of [%p,%p) "
400 "overlapping [%p,%p) detected from %p\n",
402 ( ( ( void * ) freeing ) + size ), block,
403 ( ( void * ) block + block->size ),
404 __builtin_return_address ( 0 ) );
409 /* Insert/merge into free list */
410 freeing->size = size;
411 list_for_each_entry_safe ( block, tmp, &free_blocks, list ) {
412 /* Calculate gaps before and after the "freeing" block */
413 gap_before = ( ( ( void * ) freeing ) -
414 ( ( ( void * ) block ) + block->size ) );
415 gap_after = ( ( ( void * ) block ) -
416 ( ( ( void * ) freeing ) + freeing->size ) );
417 /* Merge with immediately preceding block, if possible */
418 if ( gap_before == 0 ) {
419 DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", block,
420 ( ( ( void * ) block ) + block->size ), freeing,
421 ( ( ( void * ) freeing ) + freeing->size ),
423 ( ( ( void * ) freeing ) + freeing->size ) );
425 list_del ( &block->list );
428 /* Stop processing as soon as we reach a following block */
429 if ( gap_after >= 0 )
433 /* Insert before the immediately following block. If
434 * possible, merge the following block into the "freeing"
437 DBGC2 ( &heap, "[%p,%p)\n",
438 freeing, ( ( ( void * ) freeing ) + freeing->size ) );
439 list_add_tail ( &freeing->list, &block->list );
440 if ( gap_after == 0 ) {
441 DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", freeing,
442 ( ( ( void * ) freeing ) + freeing->size ), block,
443 ( ( ( void * ) block ) + block->size ), freeing,
444 ( ( ( void * ) block ) + block->size ) );
445 freeing->size += block->size;
446 list_del ( &block->list );
449 /* Update free memory counter */
453 valgrind_make_blocks_noaccess();
459 * @v old_ptr Memory previously allocated by malloc(), or NULL
460 * @v new_size Requested size
461 * @ret new_ptr Allocated memory, or NULL
463 * Allocates memory with no particular alignment requirement. @c
464 * new_ptr will be aligned to at least a multiple of sizeof(void*).
465 * If @c old_ptr is non-NULL, then the contents of the newly allocated
466 * memory will be the same as the contents of the previously allocated
467 * memory, up to the minimum of the old and new sizes. The old memory
470 * If allocation fails the previously allocated block is left
471 * untouched and NULL is returned.
473 * Calling realloc() with a new size of zero is a valid way to free a
476 void * realloc ( void *old_ptr, size_t new_size ) {
477 struct autosized_block *old_block;
478 struct autosized_block *new_block;
479 size_t old_total_size;
480 size_t new_total_size;
482 void *new_ptr = NOWHERE;
484 /* Allocate new memory if necessary. If allocation fails,
485 * return without touching the old block.
488 new_total_size = ( new_size +
489 offsetof ( struct autosized_block, data ) );
490 new_block = alloc_memblock ( new_total_size, 1, 0 );
493 VALGRIND_MAKE_MEM_UNDEFINED ( new_block, offsetof ( struct autosized_block, data ) );
494 new_block->size = new_total_size;
495 VALGRIND_MAKE_MEM_NOACCESS ( new_block, offsetof ( struct autosized_block, data ) );
496 new_ptr = &new_block->data;
497 VALGRIND_MALLOCLIKE_BLOCK ( new_ptr, new_size, 0, 0 );
500 /* Copy across relevant part of the old data region (if any),
501 * then free it. Note that at this point either (a) new_ptr
502 * is valid, or (b) new_size is 0; either way, the memcpy() is
505 if ( old_ptr && ( old_ptr != NOWHERE ) ) {
506 old_block = container_of ( old_ptr, struct autosized_block,
508 VALGRIND_MAKE_MEM_DEFINED ( old_block, offsetof ( struct autosized_block, data ) );
509 old_total_size = old_block->size;
510 assert ( old_total_size != 0 );
511 old_size = ( old_total_size -
512 offsetof ( struct autosized_block, data ) );
513 memcpy ( new_ptr, old_ptr,
514 ( ( old_size < new_size ) ? old_size : new_size ) );
515 free_memblock ( old_block, old_total_size );
516 VALGRIND_MAKE_MEM_NOACCESS ( old_block, offsetof ( struct autosized_block, data ) );
517 VALGRIND_FREELIKE_BLOCK ( old_ptr, 0 );
521 DBGC ( &heap, "Possible memory corruption detected from %p\n",
522 __builtin_return_address ( 0 ) );
530 * @v size Requested size
531 * @ret ptr Memory, or NULL
533 * Allocates memory with no particular alignment requirement. @c ptr
534 * will be aligned to at least a multiple of sizeof(void*).
536 void * malloc ( size_t size ) {
539 ptr = realloc ( NULL, size );
541 DBGC ( &heap, "Possible memory corruption detected from %p\n",
542 __builtin_return_address ( 0 ) );
550 * @v ptr Memory allocated by malloc(), or NULL
552 * Memory allocated with malloc_dma() cannot be freed with free(); it
553 * must be freed with free_dma() instead.
555 * If @c ptr is NULL, no action is taken.
557 void free ( void *ptr ) {
561 DBGC ( &heap, "Possible memory corruption detected from %p\n",
562 __builtin_return_address ( 0 ) );
567 * Allocate cleared memory
569 * @v size Requested size
570 * @ret ptr Allocated memory
572 * Allocate memory as per malloc(), and zero it.
574 * This function name is non-standard, but pretty intuitive.
575 * zalloc(size) is always equivalent to calloc(1,size)
577 void * zalloc ( size_t size ) {
580 data = malloc ( size );
582 memset ( data, 0, size );
584 DBGC ( &heap, "Possible memory corruption detected from %p\n",
585 __builtin_return_address ( 0 ) );
591 * Add memory to allocation pool
593 * @v start Start address
596 * Adds a block of memory [start,end) to the allocation pool. This is
597 * a one-way operation; there is no way to reclaim this memory.
599 * @c start must be aligned to at least a multiple of sizeof(void*).
601 void mpopulate ( void *start, size_t len ) {
602 /* Prevent free_memblock() from rounding up len beyond the end
603 * of what we were actually given...
605 free_memblock ( start, ( len & ~( MIN_MEMBLOCK_SIZE - 1 ) ) );
609 * Initialise the heap
612 static void init_heap ( void ) {
613 VALGRIND_MAKE_MEM_NOACCESS ( heap, sizeof ( heap ) );
614 mpopulate ( heap, sizeof ( heap ) );
617 /** Memory allocator initialisation function */
618 struct init_fn heap_init_fn __init_fn ( INIT_EARLY ) = {
619 .initialise = init_heap,
623 * Discard all cached data on shutdown
626 static void shutdown_cache ( int booting __unused ) {
630 /** Memory allocator shutdown function */
631 struct startup_fn heap_startup_fn __startup_fn ( STARTUP_EARLY ) = {
632 .shutdown = shutdown_cache,
638 * Dump free block list
641 void mdumpfree ( void ) {
642 struct memory_block *block;
644 printf ( "Free block list:\n" );
645 list_for_each_entry ( block, &free_blocks, list ) {
646 printf ( "[%p,%p] (size %#zx)\n", block,
647 ( ( ( void * ) block ) + block->size ), block->size );