Loading learning content...
In 1997, the Mars Pathfinder mission nearly failed when the Sojourner rover suffered repeated system resets during surface operations. The cause was traced to a priority inversion in the VxWorks RTOS—but the symptom manifested through memory management: a high-priority task blocked waiting for memory resources held by a lower-priority task.
This incident illustrates a fundamental truth: memory management is on the critical path of every real-time operation. Every allocation, every access, every deallocation must complete within bounded, predictable time. General-purpose memory managers, optimized for throughput and space efficiency, are unsuitable for real-time systems.
By the end of this page, you will understand why general-purpose memory management fails real-time requirements, the specialized allocation strategies used in RTOS, memory protection and isolation techniques, page locking and fault prevention, and how to design memory systems that guarantee bounded allocation time.
Memory management in real-time systems faces unique challenges that don't exist—or can be tolerated—in general-purpose systems.
| Scenario | Typical Time | Worst-Case Time | Variability Ratio |
|---|---|---|---|
| Small allocation, heap warm | 50 ns | 500 ns | 10x |
| Medium allocation, fragmented heap | 200 ns | 50 μs | 250x |
| Large allocation (mmap) | 1 μs | 10 ms | 10,000x |
| Allocation requiring brk() | 500 ns | 5 ms | 10,000x |
| With page fault on access | 100 ns | 10 ms+ | 100,000x+ |
A 10,000x variability between best and worst case means your 'fast' 100ns allocation might suddenly take 1ms. For a system with 1ms deadline, this single allocation can cause deadline failure. Real-time systems require allocation strategies where worst-case equals or is close to average-case.
The most deterministic approach to memory management is static allocation—allocating all memory at compile time or system initialization, with no dynamic allocation during runtime operation.
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
/* Static Allocation Patterns for Real-Time Systems */ #include <stdint.h>#include <stdbool.h> /* ============================================ * Pattern 1: Compile-Time Static Arrays * ============================================ */ /* All buffers allocated at compile time */#define MAX_SENSOR_READINGS 1000#define MAX_NETWORK_PACKETS 64#define MAX_LOG_ENTRIES 256 /* Static arrays - no allocation ever */static sensor_reading_t sensor_buffer[MAX_SENSOR_READINGS];static network_packet_t packet_buffer[MAX_NETWORK_PACKETS];static log_entry_t log_buffer[MAX_LOG_ENTRIES]; /* Circular buffer indices */static volatile uint32_t sensor_head = 0, sensor_tail = 0; /* Add reading - O(1), no allocation */bool add_sensor_reading(const sensor_reading_t *reading) { uint32_t next_head = (sensor_head + 1) % MAX_SENSOR_READINGS; if (next_head == sensor_tail) { return false; /* Buffer full - deterministic failure */ } sensor_buffer[sensor_head] = *reading; sensor_head = next_head; return true;} /* ============================================ * Pattern 2: Pre-allocated Task Stacks * ============================================ */ #define NUM_TASKS 8#define TASK_STACK_SIZE 4096 /* Static stack space for all tasks */static uint8_t task_stacks[NUM_TASKS][TASK_STACK_SIZE] __attribute__((aligned(8))); /* Static TCB structures */static task_control_block_t task_tcbs[NUM_TASKS]; /* Initialize task with static resources */void init_task(uint32_t task_id, void (*entry)(void*), void *arg) { if (task_id >= NUM_TASKS) { return; /* Invalid task ID */ } task_control_block_t *tcb = &task_tcbs[task_id]; /* Use pre-allocated stack */ tcb->stack_base = task_stacks[task_id]; tcb->stack_size = TASK_STACK_SIZE; tcb->stack_ptr = &task_stacks[task_id][TASK_STACK_SIZE - 64]; /* Set up initial context */ setup_initial_context(tcb, entry, arg);} /* ============================================ * Pattern 3: Message Pools with Static Storage * ============================================ */ #define MAX_MESSAGES 32#define MESSAGE_SIZE 128 typedef struct { uint8_t data[MESSAGE_SIZE]; bool in_use;} static_message_t; static static_message_t message_pool[MAX_MESSAGES];static spinlock_t pool_lock; /* Allocate message - O(n) worst case, but N is fixed and small */uint8_t *alloc_message(void) { spin_lock(&pool_lock); for (int i = 0; i < MAX_MESSAGES; i++) { if (!message_pool[i].in_use) { message_pool[i].in_use = true; spin_unlock(&pool_lock); return message_pool[i].data; } } spin_unlock(&pool_lock); return NULL; /* Pool exhausted - deterministic */} /* Free message - O(1) */void free_message(uint8_t *msg) { /* Calculate pool index from pointer */ ptrdiff_t offset = msg - message_pool[0].data; uint32_t index = offset / sizeof(static_message_t); if (index < MAX_MESSAGES) { spin_lock(&pool_lock); message_pool[index].in_use = false; spin_unlock(&pool_lock); }}Memory pools (also called slab allocators, object pools, or fixed-block allocators) provide a middle ground between fully static allocation and general-purpose dynamic allocation. They offer O(1) deterministic allocation for fixed-size objects.
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
/* Deterministic Fixed-Size Memory Pool Implementation */ #include <stdint.h>#include <stdbool.h>#include <string.h> /* Pool block header - embedded in each free block */typedef struct pool_block { struct pool_block *next;} pool_block_t; /* Memory pool structure */typedef struct { pool_block_t *free_list; /* Head of free block list */ uint8_t *pool_start; /* Start of pool memory */ uint8_t *pool_end; /* End of pool memory */ size_t block_size; /* Size of each block */ size_t total_blocks; /* Total blocks in pool */ volatile size_t free_blocks; /* Currently free blocks */ spinlock_t lock; /* Thread safety */#ifdef POOL_STATS size_t alloc_count; /* Total allocations */ size_t free_count; /* Total frees */ size_t high_water_mark; /* Maximum blocks in use */#endif} memory_pool_t; /* Initialize pool from static memory region */void pool_init(memory_pool_t *pool, void *memory, size_t memory_size, size_t block_size) { /* Ensure block size can hold the free list pointer */ if (block_size < sizeof(pool_block_t)) { block_size = sizeof(pool_block_t); } /* Align block size */ block_size = (block_size + sizeof(void*) - 1) & ~(sizeof(void*) - 1); pool->block_size = block_size; pool->pool_start = memory; pool->pool_end = (uint8_t*)memory + memory_size; pool->total_blocks = memory_size / block_size; pool->free_blocks = pool->total_blocks; pool->free_list = NULL; spinlock_init(&pool->lock); /* Link all blocks into free list */ uint8_t *block = pool->pool_start; for (size_t i = 0; i < pool->total_blocks; i++) { pool_block_t *node = (pool_block_t *)block; node->next = pool->free_list; pool->free_list = node; block += block_size; } #ifdef POOL_STATS pool->alloc_count = 0; pool->free_count = 0; pool->high_water_mark = 0;#endif} /* Allocate block - O(1) GUARANTEED */void *pool_alloc(memory_pool_t *pool) { spin_lock(&pool->lock); pool_block_t *block = pool->free_list; if (block != NULL) { /* Remove from free list - single pointer update */ pool->free_list = block->next; pool->free_blocks--; #ifdef POOL_STATS pool->alloc_count++; size_t in_use = pool->total_blocks - pool->free_blocks; if (in_use > pool->high_water_mark) { pool->high_water_mark = in_use; }#endif } spin_unlock(&pool->lock); return block; /* NULL if pool exhausted */} /* Free block - O(1) GUARANTEED */void pool_free(memory_pool_t *pool, void *ptr) { if (ptr == NULL) return; /* Validate pointer is within pool */ uint8_t *block = (uint8_t *)ptr; if (block < pool->pool_start || block >= pool->pool_end) { /* Invalid pointer - panic or log error */ return; } /* Validate alignment */ ptrdiff_t offset = block - pool->pool_start; if (offset % pool->block_size != 0) { /* Misaligned pointer */ return; } spin_lock(&pool->lock); /* Add to front of free list - single pointer update */ pool_block_t *node = (pool_block_t *)ptr; node->next = pool->free_list; pool->free_list = node; pool->free_blocks++; #ifdef POOL_STATS pool->free_count++;#endif spin_unlock(&pool->lock);} /* Check if pool can satisfy allocation - O(1) */bool pool_can_alloc(memory_pool_t *pool) { return pool->free_blocks > 0;} /* ============================================ * Lock-Free Pool for ISR-Safe Allocation * ============================================ */ typedef struct { pool_block_t * volatile free_list; uint8_t *pool_start; size_t block_size; size_t total_blocks;} lockfree_pool_t; /* ISR-safe allocation using compare-and-swap */void *lockfree_pool_alloc(lockfree_pool_t *pool) { pool_block_t *head; pool_block_t *next; do { head = pool->free_list; if (head == NULL) { return NULL; /* Pool exhausted */ } next = head->next; } while (!__sync_bool_compare_and_swap(&pool->free_list, head, next)); return head;} void lockfree_pool_free(lockfree_pool_t *pool, void *ptr) { pool_block_t *block = (pool_block_t *)ptr; pool_block_t *head; do { head = pool->free_list; block->next = head; } while (!__sync_bool_compare_and_swap(&pool->free_list, head, block));}For variable-size allocations, use multiple pools with different block sizes (e.g., 32, 64, 128, 256, 512 bytes). Round up each allocation to the next pool size. This provides O(1) allocation while handling various sizes. The trade-off is some internal fragmentation.
On systems with virtual memory, page faults introduce catastrophic non-determinism. A single page fault can trigger disk I/O with millisecond latencies—completely incompatible with microsecond real-time requirements.
| Access Type | Latency | Impact on 1ms Deadline |
|---|---|---|
| L1 Cache Hit | ~1 ns | Negligible |
| L2 Cache Hit | ~10 ns | Negligible |
| L3 Cache Hit | ~40 ns | Negligible |
| Main Memory (DRAM) | ~100 ns | 0.01% of deadline |
| Minor Page Fault (copy-on-write) | ~1-10 μs | 0.1-1% of deadline |
| Major Page Fault (disk read) | 5-15 ms | 500-1500% of deadline - FATAL |
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
/* Memory Locking for Real-Time Applications */ #include <sys/mman.h>#include <stdio.h>#include <stdlib.h>#include <unistd.h>#include <errno.h> /* ============================================ * System-Wide Memory Locking * ============================================ */ void lock_all_memory(void) { /* MCL_CURRENT: Lock all currently mapped pages */ /* MCL_FUTURE: Lock all pages mapped in the future */ if (mlockall(MCL_CURRENT | MCL_FUTURE) != 0) { perror("mlockall failed"); fprintf(stderr, "Try running as root or increase RLIMIT_MEMLOCK\n"); exit(1); } printf("All memory locked successfully\n");} /* ============================================ * Pre-faulting Stack Memory * ============================================ */ #define STACK_SIZE (1024 * 1024) /* 1MB stack */ void prefault_stack(void) { /* Allocate array on stack and touch all pages */ volatile char stack_buffer[STACK_SIZE]; /* Touch each page to ensure it's faulted in */ long page_size = sysconf(_SC_PAGESIZE); for (size_t i = 0; i < STACK_SIZE; i += page_size) { stack_buffer[i] = 0; } /* After this function returns, stack pages are resident */} /* ============================================ * Pre-faulting Heap Memory * ============================================ */ void prefault_heap(void *buffer, size_t size) { long page_size = sysconf(_SC_PAGESIZE); volatile char *ptr = (volatile char *)buffer; /* Touch each page */ for (size_t i = 0; i < size; i += page_size) { ptr[i] = 0; /* Write triggers copy-on-write if needed */ }} /* ============================================ * Real-Time Memory Preparation * ============================================ */ typedef struct { void *buffer; size_t size;} rt_buffer_t; #define MAX_RT_BUFFERS 16static rt_buffer_t rt_buffers[MAX_RT_BUFFERS];static int rt_buffer_count = 0; void prepare_realtime_buffer(size_t size) { if (rt_buffer_count >= MAX_RT_BUFFERS) { fprintf(stderr, "Too many RT buffers\n"); exit(1); } /* Allocate aligned memory */ void *buffer; if (posix_memalign(&buffer, sysconf(_SC_PAGESIZE), size) != 0) { perror("posix_memalign failed"); exit(1); } /* Lock the memory */ if (mlock(buffer, size) != 0) { perror("mlock failed"); free(buffer); exit(1); } /* Pre-fault all pages */ prefault_heap(buffer, size); /* Register for cleanup */ rt_buffers[rt_buffer_count].buffer = buffer; rt_buffers[rt_buffer_count].size = size; rt_buffer_count++; printf("Allocated and locked %zu bytes at %p\n", size, buffer);} /* ============================================ * Complete Real-Time Memory Setup * ============================================ */ void setup_realtime_memory(void) { /* 1. Check and set resource limits */ struct rlimit rlim; getrlimit(RLIMIT_MEMLOCK, &rlim); printf("Current MEMLOCK limit: %lu (soft), %lu (hard)\n", rlim.rlim_cur, rlim.rlim_max); if (rlim.rlim_cur < 1024*1024*100) { /* 100MB */ rlim.rlim_cur = rlim.rlim_max = RLIM_INFINITY; if (setrlimit(RLIMIT_MEMLOCK, &rlim) != 0) { fprintf(stderr, "Warning: Cannot increase MEMLOCK limit\n"); } } /* 2. Lock all current and future memory */ lock_all_memory(); /* 3. Pre-fault stack */ prefault_stack(); /* 4. Pre-allocate and lock application buffers */ prepare_realtime_buffer(1024 * 1024); /* 1MB working buffer */ prepare_realtime_buffer(64 * 1024); /* 64KB message buffer */ /* 5. Disable core dumps (they cause page-out) */ struct rlimit core_rlim = {0, 0}; setrlimit(RLIMIT_CORE, &core_rlim); printf("Real-time memory setup complete\n");}Locking memory has system-wide implications: locked pages cannot be swapped, potentially starving other processes of memory. On embedded systems without swap, this may be less critical. On shared systems, coordinate memory locking budget across all RT processes and ensure sufficient physical RAM.
Real-time systems often run multiple tasks with different criticality levels. Memory protection ensures that faults in low-criticality tasks don't corrupt high-criticality task memory. Memory partitioning provides spatial isolation for predictable memory access.
Memory Protection Unit (MPU)
MPUs are simpler than MMUs—they provide protection without virtual-to-physical translation:
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970
/* ARM Cortex-M MPU Configuration for Task Isolation */ #include "core_cm4.h" /* MPU region numbers */#define MPU_REGION_FLASH 0#define MPU_REGION_RAM 1#define MPU_REGION_PERIPH 2#define MPU_REGION_TASK_STACK 3#define MPU_REGION_TASK_DATA 4 /* Configure MPU for real-time system */void mpu_init(void) { /* Disable MPU during configuration */ MPU->CTRL = 0; /* Region 0: Flash (code) - Execute, Read-Only */ MPU->RNR = MPU_REGION_FLASH; MPU->RBAR = FLASH_BASE; MPU->RASR = MPU_RASR_ENABLE_Msk | MPU_RASR_SIZE_256KB | MPU_RASR_AP_RO_RO | /* Privileged RO, User RO */ MPU_RASR_XN_NO; /* Execute allowed */ /* Region 1: RAM (shared) - No Execute */ MPU->RNR = MPU_REGION_RAM; MPU->RBAR = SRAM_BASE; MPU->RASR = MPU_RASR_ENABLE_Msk | MPU_RASR_SIZE_128KB | MPU_RASR_AP_RW_RO | /* Privileged RW, User RO */ MPU_RASR_XN_YES; /* No execute */ /* Region 2: Peripherals - Privileged Only */ MPU->RNR = MPU_REGION_PERIPH; MPU->RBAR = PERIPH_BASE; MPU->RASR = MPU_RASR_ENABLE_Msk | MPU_RASR_SIZE_512MB | MPU_RASR_AP_RW_NA | /* Privileged RW, User None */ MPU_RASR_XN_YES | MPU_RASR_DEVICE; /* Device memory type */ /* Enable MPU with default map for privileged code */ MPU->CTRL = MPU_CTRL_ENABLE_Msk | MPU_CTRL_PRIVDEFENA_Msk; __DSB(); __ISB();} /* Reconfigure MPU for specific task */void mpu_configure_for_task(task_t *task) { /* Region 3: Task-specific stack */ MPU->RNR = MPU_REGION_TASK_STACK; MPU->RBAR = (uint32_t)task->stack_base; MPU->RASR = MPU_RASR_ENABLE_Msk | calc_size_bits(task->stack_size) | MPU_RASR_AP_RW_RW | /* User RW */ MPU_RASR_XN_YES; /* Region 4: Task-specific data */ MPU->RNR = MPU_REGION_TASK_DATA; MPU->RBAR = (uint32_t)task->data_base; MPU->RASR = MPU_RASR_ENABLE_Msk | calc_size_bits(task->data_size) | MPU_RASR_AP_RW_RW | MPU_RASR_XN_YES; __DSB(); __ISB();}When dynamic allocation is unavoidable, the allocator must be designed with real-time constraints in mind.
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
/* TLSF (Two-Level Segregated Fit) Allocator Overview * * TLSF is a real-time allocator providing O(1) malloc and free * with bounded fragmentation. Used in many RTOS. */ #include <stdint.h> /* First level: powers of 2 (16, 32, 64, 128, ...) */#define FL_INDEX_COUNT 16/* Second level: subdivisions within each power of 2 */#define SL_INDEX_COUNT 16 typedef struct { /* Bitmaps for quick free block lookup */ uint32_t fl_bitmap; /* First-level */ uint32_t sl_bitmap[FL_INDEX_COUNT]; /* Second-level */ /* Segregated free lists */ block_header_t *blocks[FL_INDEX_COUNT][SL_INDEX_COUNT]; /* Memory pool */ void *pool_start; size_t pool_size;} tlsf_t; /* Calculate TLSF indices from size - O(1) using CLZ */static inline void mapping_insert(size_t size, int *fl, int *sl) { if (size < 256) { *fl = 0; *sl = (size >> 4) & 0xF; /* Direct mapping for small sizes */ } else { /* Use count-leading-zeros for O(1) calculation */ int t = 31 - __builtin_clz(size); *fl = t - 4; *sl = (size >> (t - 4)) & 0xF; }} /* Find suitable free block - O(1) using bitmaps */static block_header_t *find_free_block(tlsf_t *tlsf, int *fl, int *sl) { /* Find first first-level list with blocks >= our size class */ uint32_t fl_map = tlsf->fl_bitmap & (~0U << *fl); if (fl_map == 0) { return NULL; /* No suitable block - O(1) failure detection */ } *fl = __builtin_ctz(fl_map); /* First set bit */ /* Find first second-level list */ uint32_t sl_map = tlsf->sl_bitmap[*fl]; *sl = __builtin_ctz(sl_map); /* Return head of free list */ return tlsf->blocks[*fl][*sl]; /* O(1) */} /* Allocate memory - O(1) GUARANTEED */void *tlsf_malloc(tlsf_t *tlsf, size_t size) { /* 1. Calculate size class - O(1) */ size_t adjusted_size = adjust_size(size); int fl, sl; mapping_insert(adjusted_size, &fl, &sl); /* 2. Find free block - O(1) using bitmaps */ block_header_t *block = find_free_block(tlsf, &fl, &sl); if (block == NULL) { return NULL; /* No memory - deterministic failure */ } /* 3. Remove from free list - O(1) */ remove_from_free_list(tlsf, block, fl, sl); /* 4. Split if too large - O(1) */ if (should_split(block, adjusted_size)) { block_header_t *remaining = split_block(block, adjusted_size); insert_free_block(tlsf, remaining); } /* 5. Mark as used, return - O(1) */ block->header |= BLOCK_USED; return get_payload_ptr(block);} /* Free memory - O(1) GUARANTEED */void tlsf_free(tlsf_t *tlsf, void *ptr) { block_header_t *block = get_block_from_payload(ptr); /* Mark as free */ block->header &= ~BLOCK_USED; /* Coalesce with neighbors - O(1) due to boundary tags */ block = coalesce_prev(tlsf, block); block = coalesce_next(tlsf, block); /* Insert into appropriate free list - O(1) */ insert_free_block(tlsf, block);}Safety-critical systems (DO-178C, ISO 26262, IEC 62304) have additional memory management requirements beyond determinism.
| Requirement | DO-178C (Avionics) | ISO 26262 (Auto) | IEC 62304 (Medical) |
|---|---|---|---|
| Dynamic allocation | Level A: Prohibited or severely restricted | ASIL D: Restricted, analyze WCET | Class C: Document and justify |
| Stack analysis | Static analysis required | WCSS analysis required | Document max usage |
| Pointer validation | All pointers checked | Safety mechanism required | Defensive coding |
| Memory corruption detection | Runtime checks for Level A/B | HW protection recommended | Error detection required |
| Heap fragmentation | Must not occur or be bounded | Analyze and document | Analyze and mitigate |
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
/* Safety-Critical Memory Patterns */ /* ============================================ * Stack Usage Analysis Support * ============================================ */ /* Stack filling pattern for high-water-mark detection */#define STACK_FILL_PATTERN 0xDEADBEEF void fill_stack_with_pattern(uint32_t *stack_base, size_t size_words) { for (size_t i = 0; i < size_words; i++) { stack_base[i] = STACK_FILL_PATTERN; }} size_t measure_stack_usage(uint32_t *stack_base, size_t size_words) { size_t unused = 0; /* Count words still containing pattern (unused stack) */ while (unused < size_words && stack_base[unused] == STACK_FILL_PATTERN) { unused++; } return (size_words - unused) * sizeof(uint32_t);} /* ============================================ * Memory Corruption Detection * ============================================ */ /* Guard words around critical data structures */#define GUARD_WORD_HEAD 0xFEEDFACE#define GUARD_WORD_TAIL 0xCAFEBABE typedef struct { uint32_t guard_head; flight_control_data_t data; uint32_t guard_tail;} guarded_flight_data_t; bool validate_guards(guarded_flight_data_t *gd) { if (gd->guard_head != GUARD_WORD_HEAD) { report_memory_corruption(gd, CORRUPTION_HEAD); return false; } if (gd->guard_tail != GUARD_WORD_TAIL) { report_memory_corruption(gd, CORRUPTION_TAIL); return false; } return true;} /* ============================================ * Defensive Pointer Handling * ============================================ */ /* Safe pointer dereference with range check */#define SAFE_PTR_READ(ptr, region_start, region_size, result) \ do { \ if ((uintptr_t)(ptr) >= (uintptr_t)(region_start) && \ (uintptr_t)(ptr) + sizeof(*(ptr)) <= \ (uintptr_t)(region_start) + (region_size)) { \ (result) = *(ptr); \ } else { \ report_pointer_error(ptr, __FILE__, __LINE__); \ (result) = get_safe_default_value(); \ } \ } while(0) /* Null-check macro with error reporting */#define CHECK_NULL(ptr, action_on_null) \ do { \ if ((ptr) == NULL) { \ log_null_pointer_access(__FILE__, __LINE__, #ptr); \ action_on_null; \ } \ } while(0)Congratulations! You have completed Module 4: Real-Time OS Features. You now possess a comprehensive understanding of the architectural features that enable real-time operating systems to meet strict timing constraints—from minimal latency and deterministic behavior through interrupt handling, timer resolution, and specialized memory management. These concepts form the foundation for building reliable, predictable real-time systems.