The "Holy Bible" for embedded engineers
Memory pool allocation is a critical technique for embedded systems where predictable memory usage and fast allocation/deallocation are essential. Unlike traditional heap allocation, memory pools provide deterministic performance and eliminate fragmentation issues that can be problematic in resource-constrained environments.
Memory pool allocation is a memory management technique where a large block of memory is pre-allocated and divided into smaller, fixed-size chunks called “blocks” or “slots.” Instead of using the system’s heap allocator (like malloc
and free
), the application manages these pre-allocated blocks directly.
Memory Pool Structure:
┌─────────────────────────────────────────────────────────────┐
│ Memory Pool │
├─────────────────┬─────────────────┬─────────────────┬───────┤
│ Block 0 │ Block 1 │ Block 2 │ ... │
│ [Free List] │ [Data Area] │ [Data Area] │ │
├─────────────────┼─────────────────┼─────────────────┼───────┤
│ Block N-1 │ Block N │ │ │
│ [Data Area] │ [Data Area] │ │ │
└─────────────────┴─────────────────┴─────────────────┴───────┘
Free List: Block 0 → Block 2 → Block 5 → NULL
Allocated: Block 1, Block 3, Block 4
Use Memory Pools When:
Avoid Memory Pools When:
Pool Memory Layout:
┌─────────────────────────────────────────────────────────────┐
│ Pool Header │
│ ┌─────────────────┬─────────────────┬─────────────────┐ │
│ │ Pool Start │ Pool Size │ Block Size │ │
│ │ Block Count │ Free Count │ Free List │ │
│ └─────────────────┴─────────────────┴─────────────────┘ │
├─────────────────────────────────────────────────────────────┤
│ Block 0 │
│ ┌─────────────────┬─────────────────────────────────────┐ │
│ │ Next Pointer │ Data Area │ │
│ └─────────────────┴─────────────────────────────────────┘ │
├─────────────────────────────────────────────────────────────┤
│ Block 1 │
│ ┌─────────────────┬─────────────────────────────────────┐ │
│ │ Next Pointer │ Data Area │ │
│ └─────────────────┴─────────────────────────────────────┘ │
├─────────────────────────────────────────────────────────────┤
│ ... │
├─────────────────────────────────────────────────────────────┤
│ Block N-1 │
│ ┌─────────────────┬─────────────────────────────────────┐ │
│ │ Next Pointer │ Data Area │ │
│ └─────────────────┴─────────────────────────────────────┘ │
└─────────────────────────────────────────────────────────────┘
The free list is a linked list of available blocks. When a block is allocated, it’s removed from the free list. When a block is freed, it’s added back to the free list.
Free List Operations:
Initial State:
Free List: Block 0 → Block 1 → Block 2 → Block 3 → NULL
After Allocating Block 1:
Free List: Block 0 → Block 2 → Block 3 → NULL
After Freeing Block 1:
Free List: Block 1 → Block 0 → Block 2 → Block 3 → NULL
Fixed-size pools allocate blocks of exactly the same size. This is the most common and efficient type of memory pool.
Characteristics:
Use Cases:
Variable-size pools can handle different block sizes, but they’re more complex and can suffer from fragmentation.
Characteristics:
Use Cases:
Multi-pool systems use multiple fixed-size pools for different block sizes.
Characteristics:
Use Cases:
Factors to Consider:
Formula:
Total Pool Size = (Block Size + Overhead) × Number of Blocks
Overhead includes:
Proper alignment is crucial for performance and hardware compatibility:
// Example alignment considerations
#define ALIGNMENT 8 // 8-byte alignment
#define ALIGN_UP(size, align) (((size) + (align) - 1) & ~((align) - 1))
// Block size should be aligned
size_t aligned_block_size = ALIGN_UP(block_size, ALIGNMENT);
Common Error Scenarios:
Error Handling Strategies:
// Memory pool block header
typedef struct pool_block {
struct pool_block* next; // Next free block
uint8_t data[]; // Flexible array member
} pool_block_t;
// Memory pool structure
typedef struct {
uint8_t* pool_start; // Start of pool memory
size_t pool_size; // Total pool size
size_t block_size; // Size of each block
size_t block_count; // Number of blocks
pool_block_t* free_list; // List of free blocks
size_t free_count; // Number of free blocks
uint8_t initialized; // Pool initialization flag
} memory_pool_t;
// Pool statistics
typedef struct {
size_t total_blocks;
size_t used_blocks;
size_t free_blocks;
size_t peak_usage;
size_t allocation_count;
size_t deallocation_count;
} pool_stats_t;
// Initialize memory pool
int pool_init(memory_pool_t* pool, size_t block_size, size_t block_count) {
if (pool == NULL || block_size == 0 || block_count == 0) {
return -1; // Invalid parameters
}
// Calculate total pool size
size_t total_size = block_count * block_size;
// Allocate pool memory
pool->pool_start = malloc(total_size);
if (pool->pool_start == NULL) {
return -2; // Memory allocation failed
}
// Initialize pool structure
pool->pool_size = total_size;
pool->block_size = block_size;
pool->block_count = block_count;
pool->free_count = block_count;
pool->initialized = 1;
// Initialize free list
pool->free_list = (pool_block_t*)pool->pool_start;
// Link all blocks in free list
pool_block_t* current = pool->free_list;
for (size_t i = 0; i < block_count - 1; i++) {
current->next = (pool_block_t*)((uint8_t*)current + block_size);
current = current->next;
}
current->next = NULL; // Last block points to NULL
return 0; // Success
}
// Destroy memory pool
void pool_destroy(memory_pool_t* pool) {
if (pool != NULL && pool->initialized) {
if (pool->pool_start != NULL) {
free(pool->pool_start);
pool->pool_start = NULL;
}
pool->initialized = 0;
}
}
// Allocate block from pool
void* pool_alloc(memory_pool_t* pool) {
if (pool == NULL || !pool->initialized) {
return NULL;
}
if (pool->free_count == 0) {
return NULL; // Pool exhausted
}
// Get first free block
pool_block_t* block = pool->free_list;
pool->free_list = block->next;
pool->free_count--;
return &block->data; // Return data area
}
// Free block back to pool
void pool_free(memory_pool_t* pool, void* ptr) {
if (pool == NULL || !pool->initialized || ptr == NULL) {
return;
}
// Calculate block address
pool_block_t* block = (pool_block_t*)((uint8_t*)ptr - offsetof(pool_block_t, data));
// Validate block is within pool bounds
if ((uint8_t*)block < pool->pool_start ||
(uint8_t*)block >= pool->pool_start + pool->pool_size) {
return; // Invalid pointer
}
// Add to free list
block->next = pool->free_list;
pool->free_list = block;
pool->free_count++;
}
// Get pool statistics
pool_stats_t pool_get_stats(memory_pool_t* pool) {
pool_stats_t stats = {0};
if (pool != NULL && pool->initialized) {
stats.total_blocks = pool->block_count;
stats.free_blocks = pool->free_count;
stats.used_blocks = pool->block_count - pool->free_count;
}
return stats;
}
For single-threaded applications, no synchronization is needed:
// Single-threaded pool operations are inherently thread-safe
// No locks or atomic operations required
For multi-threaded applications, synchronization is required:
// Thread-safe pool with mutex
typedef struct {
memory_pool_t pool;
mutex_t mutex;
} thread_safe_pool_t;
void* thread_safe_pool_alloc(thread_safe_pool_t* tsp) {
mutex_lock(&tsp->mutex);
void* result = pool_alloc(&tsp->pool);
mutex_unlock(&tsp->mutex);
return result;
}
void thread_safe_pool_free(thread_safe_pool_t* tsp, void* ptr) {
mutex_lock(&tsp->mutex);
pool_free(&tsp->pool, ptr);
mutex_unlock(&tsp->mutex);
}
For high-performance applications, lock-free implementations are possible:
// Lock-free pool using atomic operations
typedef struct {
pool_block_t* free_list;
size_t free_count;
} lock_free_pool_t;
void* lock_free_pool_alloc(lock_free_pool_t* lfp) {
pool_block_t* old_head;
pool_block_t* new_head;
do {
old_head = atomic_load(&lfp->free_list);
if (old_head == NULL) {
return NULL; // Pool exhausted
}
new_head = old_head->next;
} while (!atomic_compare_exchange_weak(&lfp->free_list, &old_head, new_head));
atomic_fetch_sub(&lfp->free_count, 1);
return &old_head->data;
}
// Optimize for cache locality
typedef struct {
uint8_t data[CACHE_LINE_SIZE - sizeof(void*)];
} __attribute__((aligned(CACHE_LINE_SIZE))) cache_aligned_block_t;
Problem: Pool runs out of blocks Solution: Monitor pool usage and implement recovery strategies
// Check pool status before allocation
if (pool_get_stats(&pool).free_blocks == 0) {
// Handle pool exhaustion
handle_pool_exhaustion();
}
Problem: Attempting to free a pointer not from the pool Solution: Validate pointers before freeing
// Validate pointer is within pool bounds
if ((uint8_t*)ptr < pool->pool_start ||
(uint8_t*)ptr >= pool->pool_start + pool->pool_size) {
// Invalid pointer
return;
}
Problem: Freeing the same block twice Solution: Implement double-free detection
// Add magic number to detect double free
typedef struct pool_block {
struct pool_block* next;
uint32_t magic; // Magic number for validation
uint8_t data[];
} pool_block_t;
Problem: Buffer overflow or underflow Solution: Implement bounds checking and guard bytes
// Add guard bytes around data area
typedef struct pool_block {
struct pool_block* next;
uint32_t guard_before;
uint8_t data[];
uint32_t guard_after;
} pool_block_t;
Next Steps: Explore Memory Fragmentation to understand how memory pools prevent fragmentation issues, or dive into Aligned Memory Allocation for hardware-specific memory considerations.