- Added first-fit free-list allocator with block splitting and coalescing. Provides kmalloc(), kfree(), and kcalloc() for kernel-space dynamic memory. - Each block carries an inline header with a magic value (0xCAFEBABE) for heap corruption detection, plus double-free checking. - Memory is obtained from the paging subsystem in 4 KiB page increments. All allocations are 8-byte aligned with a 16-byte minimum block size. - Created freestanding string.c with memset, memcpy, memmove, memcmp, strlen, strcmp, strncmp, strcpy, strncpy — replacing the unavailable libc implementations. - Added documentation in docs/kmalloc.md. Tested: kmalloc(64) returns 0xD0001010 (in kernel heap) and kfree succeeds. Works with both 4 MiB and 128 MiB RAM.
254 lines
7.0 KiB
C
254 lines
7.0 KiB
C
/**
|
|
* @file kmalloc.c
|
|
* @brief Kernel memory allocator implementation.
|
|
*
|
|
* A simple first-fit free-list allocator. Memory is obtained from the paging
|
|
* subsystem in 4 KiB page increments. The allocator maintains a linked list
|
|
* of free blocks. On allocation, the first block large enough is split if
|
|
* needed. On free, adjacent blocks are coalesced to reduce fragmentation.
|
|
*
|
|
* The allocator metadata (block headers) are stored inline at the beginning
|
|
* of each block, so the minimum allocation overhead is sizeof(block_header_t).
|
|
*/
|
|
|
|
#include "kmalloc.h"
|
|
#include "paging.h"
|
|
#include <stdint.h>
|
|
#include <string.h>
|
|
|
|
/* Debug print helpers defined in kernel.c */
|
|
extern void offset_print(const char *str);
|
|
extern void print_hex(uint32_t val);
|
|
|
|
/**
|
|
* Block header stored at the start of every allocated or free block.
|
|
* The usable memory starts immediately after this header.
|
|
*/
|
|
typedef struct block_header {
|
|
uint32_t size; /**< Size of the usable area (excludes header). */
|
|
uint32_t magic; /**< Magic number for integrity checking. */
|
|
struct block_header *next; /**< Next block in the free list (free blocks only). */
|
|
uint8_t is_free; /**< 1 if block is free, 0 if allocated. */
|
|
} block_header_t;
|
|
|
|
/** Magic value to detect heap corruption. */
|
|
#define BLOCK_MAGIC 0xCAFEBABE
|
|
|
|
/** Minimum block size to avoid excessive fragmentation. */
|
|
#define MIN_BLOCK_SIZE 16
|
|
|
|
/** Alignment for all allocations (8-byte aligned). */
|
|
#define ALIGNMENT 8
|
|
|
|
/** Round up to alignment boundary. */
|
|
#define ALIGN_UP(x, a) (((x) + (a) - 1) & ~((a) - 1))
|
|
|
|
/** Head of the free list. */
|
|
static block_header_t *free_list = NULL;
|
|
|
|
/** Number of pages currently allocated for the heap. */
|
|
static uint32_t heap_pages = 0;
|
|
|
|
/**
|
|
* Request a new page from the paging subsystem and add it to the free list.
|
|
*
|
|
* @return Pointer to the new block header, or NULL on failure.
|
|
*/
|
|
static block_header_t *request_page(void) {
|
|
void *page = paging_alloc_page();
|
|
if (!page) {
|
|
return NULL;
|
|
}
|
|
heap_pages++;
|
|
|
|
block_header_t *block = (block_header_t *)page;
|
|
block->size = 4096 - sizeof(block_header_t);
|
|
block->magic = BLOCK_MAGIC;
|
|
block->next = NULL;
|
|
block->is_free = 1;
|
|
|
|
return block;
|
|
}
|
|
|
|
/**
|
|
* Insert a block into the free list, maintaining address order.
|
|
* Then attempt to coalesce with adjacent blocks.
|
|
*
|
|
* @param block The block to insert.
|
|
*/
|
|
static void insert_free_block(block_header_t *block) {
|
|
block->is_free = 1;
|
|
|
|
/* Find insertion point (maintain address order for coalescing) */
|
|
block_header_t *prev = NULL;
|
|
block_header_t *curr = free_list;
|
|
|
|
while (curr && curr < block) {
|
|
prev = curr;
|
|
curr = curr->next;
|
|
}
|
|
|
|
/* Insert between prev and curr */
|
|
block->next = curr;
|
|
if (prev) {
|
|
prev->next = block;
|
|
} else {
|
|
free_list = block;
|
|
}
|
|
|
|
/* Coalesce with next block if adjacent */
|
|
if (block->next) {
|
|
uint8_t *block_end = (uint8_t *)block + sizeof(block_header_t) + block->size;
|
|
if (block_end == (uint8_t *)block->next) {
|
|
block->size += sizeof(block_header_t) + block->next->size;
|
|
block->next = block->next->next;
|
|
}
|
|
}
|
|
|
|
/* Coalesce with previous block if adjacent */
|
|
if (prev) {
|
|
uint8_t *prev_end = (uint8_t *)prev + sizeof(block_header_t) + prev->size;
|
|
if (prev_end == (uint8_t *)block) {
|
|
prev->size += sizeof(block_header_t) + block->size;
|
|
prev->next = block->next;
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Split a block if it is large enough to hold the requested size
|
|
* plus a new block header and minimum block.
|
|
*
|
|
* @param block The block to potentially split.
|
|
* @param size The requested allocation size (aligned).
|
|
*/
|
|
static void split_block(block_header_t *block, uint32_t size) {
|
|
uint32_t remaining = block->size - size - sizeof(block_header_t);
|
|
|
|
if (remaining >= MIN_BLOCK_SIZE) {
|
|
block_header_t *new_block = (block_header_t *)((uint8_t *)block + sizeof(block_header_t) + size);
|
|
new_block->size = remaining;
|
|
new_block->magic = BLOCK_MAGIC;
|
|
new_block->is_free = 1;
|
|
new_block->next = block->next;
|
|
|
|
block->size = size;
|
|
block->next = new_block;
|
|
}
|
|
}
|
|
|
|
void init_kmalloc(void) {
|
|
/* Allocate the initial heap page */
|
|
block_header_t *initial = request_page();
|
|
if (!initial) {
|
|
offset_print(" KMALLOC: FATAL - could not allocate initial heap page\n");
|
|
return;
|
|
}
|
|
|
|
free_list = initial;
|
|
offset_print(" KMALLOC: initialized with 1 page\n");
|
|
}
|
|
|
|
void *kmalloc(size_t size) {
|
|
if (size == 0) {
|
|
return NULL;
|
|
}
|
|
|
|
/* Align the requested size */
|
|
size = ALIGN_UP(size, ALIGNMENT);
|
|
|
|
/* First-fit search through free list */
|
|
block_header_t *prev = NULL;
|
|
block_header_t *curr = free_list;
|
|
|
|
while (curr) {
|
|
if (curr->magic != BLOCK_MAGIC) {
|
|
offset_print(" KMALLOC: HEAP CORRUPTION detected!\n");
|
|
return NULL;
|
|
}
|
|
|
|
if (curr->is_free && curr->size >= size) {
|
|
/* Found a suitable block */
|
|
split_block(curr, size);
|
|
|
|
/* Remove from free list */
|
|
if (prev) {
|
|
prev->next = curr->next;
|
|
} else {
|
|
free_list = curr->next;
|
|
}
|
|
|
|
curr->is_free = 0;
|
|
curr->next = NULL;
|
|
|
|
/* Return pointer past the header */
|
|
return (void *)((uint8_t *)curr + sizeof(block_header_t));
|
|
}
|
|
|
|
prev = curr;
|
|
curr = curr->next;
|
|
}
|
|
|
|
/* No suitable block found; request a new page */
|
|
block_header_t *new_block = request_page();
|
|
if (!new_block) {
|
|
offset_print(" KMALLOC: out of memory\n");
|
|
return NULL;
|
|
}
|
|
|
|
/* If the new page is large enough, use it directly */
|
|
if (new_block->size >= size) {
|
|
split_block(new_block, size);
|
|
|
|
/* If there's a remainder, add it to the free list */
|
|
if (new_block->next && new_block->next->is_free) {
|
|
insert_free_block(new_block->next);
|
|
}
|
|
|
|
new_block->is_free = 0;
|
|
new_block->next = NULL;
|
|
return (void *)((uint8_t *)new_block + sizeof(block_header_t));
|
|
}
|
|
|
|
/* Page too small (shouldn't happen for reasonable sizes) */
|
|
offset_print(" KMALLOC: requested size too large for single page\n");
|
|
insert_free_block(new_block);
|
|
return NULL;
|
|
}
|
|
|
|
void kfree(void *ptr) {
|
|
if (!ptr) {
|
|
return;
|
|
}
|
|
|
|
/* Get the block header */
|
|
block_header_t *block = (block_header_t *)((uint8_t *)ptr - sizeof(block_header_t));
|
|
|
|
if (block->magic != BLOCK_MAGIC) {
|
|
offset_print(" KMALLOC: kfree() invalid pointer or corruption!\n");
|
|
return;
|
|
}
|
|
|
|
if (block->is_free) {
|
|
offset_print(" KMALLOC: kfree() double free detected!\n");
|
|
return;
|
|
}
|
|
|
|
insert_free_block(block);
|
|
}
|
|
|
|
void *kcalloc(size_t count, size_t size) {
|
|
size_t total = count * size;
|
|
|
|
/* Overflow check */
|
|
if (count != 0 && total / count != size) {
|
|
return NULL;
|
|
}
|
|
|
|
void *ptr = kmalloc(total);
|
|
if (ptr) {
|
|
memset(ptr, 0, total);
|
|
}
|
|
return ptr;
|
|
}
|