The Embedded New Testament

The "Holy Bible" for embedded engineers


Project maintained by theEmbeddedGeorge Hosted on GitHub Pages — Theme by mattgraham

DMA Buffer Management

📋 Table of Contents

🎯 Overview

DMA (Direct Memory Access) buffer management is crucial for efficient data transfer between peripherals and memory without CPU intervention. Proper DMA buffer management ensures cache coherency, optimal performance, and reliable data transfer in embedded systems.

🔧 DMA Buffer Requirements

Memory Alignment Requirements

// DMA buffer alignment requirements
#define DMA_BUFFER_ALIGNMENT 32  // 32-byte alignment for most DMA controllers
#define DMA_BUFFER_SIZE_ALIGNMENT 4  // 4-byte size alignment

typedef struct {
    void* buffer;
    size_t size;
    uint32_t physical_address;
    bool is_cacheable;
} dma_buffer_t;

// Check DMA buffer alignment
bool is_dma_buffer_aligned(const void* buffer, size_t size) {
    uintptr_t addr = (uintptr_t)buffer;
    
    // Check address alignment
    if (addr % DMA_BUFFER_ALIGNMENT != 0) {
        return false;
    }
    
    // Check size alignment
    if (size % DMA_BUFFER_SIZE_ALIGNMENT != 0) {
        return false;
    }
    
    return true;
}

// Get DMA buffer requirements
typedef struct {
    uint32_t min_alignment;
    uint32_t max_transfer_size;
    bool requires_cache_flush;
    bool requires_cache_invalidate;
} dma_requirements_t;

dma_requirements_t get_dma_requirements(void) {
    dma_requirements_t req = {
        .min_alignment = 32,
        .max_transfer_size = 65536,
        .requires_cache_flush = true,
        .requires_cache_invalidate = true
    };
    return req;
}

Physical Memory Mapping

// Map virtual to physical address for DMA
typedef struct {
    void* virtual_address;
    uint32_t physical_address;
    size_t size;
    bool is_mapped;
} dma_memory_mapping_t;

dma_memory_mapping_t* create_dma_memory_mapping(size_t size) {
    dma_memory_mapping_t* mapping = malloc(sizeof(dma_memory_mapping_t));
    if (!mapping) return NULL;
    
    // Allocate aligned memory
    mapping->virtual_address = aligned_alloc(DMA_BUFFER_ALIGNMENT, size);
    if (!mapping->virtual_address) {
        free(mapping);
        return NULL;
    }
    
    // Get physical address (platform specific)
    mapping->physical_address = get_physical_address(mapping->virtual_address);
    mapping->size = size;
    mapping->is_mapped = true;
    
    return mapping;
}

uint32_t get_physical_address(void* virtual_addr) {
    // Platform-specific implementation
    // This would use MMU or memory mapping functions
    return (uint32_t)virtual_addr;  // Simplified for demonstration
}

void destroy_dma_memory_mapping(dma_memory_mapping_t* mapping) {
    if (mapping && mapping->virtual_address) {
        free(mapping->virtual_address);
        free(mapping);
    }
}

🔄 Cache-Coherent DMA

Cache Flush and Invalidate

// Cache operations for DMA
void flush_cache_for_dma(void* buffer, size_t size) {
    // Flush cache to ensure DMA sees latest data
    __builtin___clear_cache((char*)buffer, (char*)buffer + size);
}

void invalidate_cache_after_dma(void* buffer, size_t size) {
    // Invalidate cache to ensure CPU sees DMA data
    __builtin___clear_cache((char*)buffer, (char*)buffer + size);
}

// DMA-safe buffer operations
typedef struct {
    void* buffer;
    size_t size;
    bool needs_flush;
    bool needs_invalidate;
} dma_safe_buffer_t;

dma_safe_buffer_t* create_dma_safe_buffer(size_t size) {
    dma_safe_buffer_t* safe_buffer = malloc(sizeof(dma_safe_buffer_t));
    if (!safe_buffer) return NULL;
    
    safe_buffer->buffer = aligned_alloc(DMA_BUFFER_ALIGNMENT, size);
    safe_buffer->size = size;
    safe_buffer->needs_flush = true;
    safe_buffer->needs_invalidate = true;
    
    return safe_buffer;
}

void prepare_dma_buffer_for_write(dma_safe_buffer_t* safe_buffer) {
    if (safe_buffer->needs_flush) {
        flush_cache_for_dma(safe_buffer->buffer, safe_buffer->size);
    }
}

void prepare_dma_buffer_for_read(dma_safe_buffer_t* safe_buffer) {
    if (safe_buffer->needs_invalidate) {
        invalidate_cache_after_dma(safe_buffer->buffer, safe_buffer->size);
    }
}

Non-Cacheable DMA Buffers

// Allocate non-cacheable memory for DMA
void* allocate_non_cacheable_dma_buffer(size_t size) {
    // Use uncached memory region
    void* buffer = allocate_uncached_memory(size);
    if (buffer) {
        // Ensure proper alignment
        uintptr_t addr = (uintptr_t)buffer;
        if (addr % DMA_BUFFER_ALIGNMENT != 0) {
            // Realign if necessary
            free(buffer);
            buffer = aligned_alloc(DMA_BUFFER_ALIGNMENT, size);
        }
    }
    return buffer;
}

// Platform-specific uncached memory allocation
void* allocate_uncached_memory(size_t size) {
    // This would use platform-specific functions
    // For example, on ARM with MMU, map memory as device memory
    return malloc(size);  // Simplified for demonstration
}

🗄️ DMA Buffer Allocation

DMA Buffer Pool

// DMA buffer pool for efficient allocation
typedef struct {
    void* pool_start;
    size_t pool_size;
    size_t buffer_size;
    size_t num_buffers;
    bool* buffer_used;
    void** buffer_addresses;
} dma_buffer_pool_t;

dma_buffer_pool_t* create_dma_buffer_pool(size_t buffer_size, size_t num_buffers) {
    dma_buffer_pool_t* pool = malloc(sizeof(dma_buffer_pool_t));
    if (!pool) return NULL;
    
    pool->buffer_size = buffer_size;
    pool->num_buffers = num_buffers;
    pool->pool_size = buffer_size * num_buffers;
    
    // Allocate aligned pool memory
    pool->pool_start = aligned_alloc(DMA_BUFFER_ALIGNMENT, pool->pool_size);
    if (!pool->pool_start) {
        free(pool);
        return NULL;
    }
    
    // Initialize buffer tracking
    pool->buffer_used = calloc(num_buffers, sizeof(bool));
    pool->buffer_addresses = malloc(num_buffers * sizeof(void*));
    
    if (!pool->buffer_used || !pool->buffer_addresses) {
        free(pool->pool_start);
        free(pool->buffer_used);
        free(pool->buffer_addresses);
        free(pool);
        return NULL;
    }
    
    // Initialize buffer addresses
    for (size_t i = 0; i < num_buffers; i++) {
        pool->buffer_addresses[i] = (char*)pool->pool_start + (i * buffer_size);
    }
    
    return pool;
}

void* allocate_dma_buffer_from_pool(dma_buffer_pool_t* pool) {
    for (size_t i = 0; i < pool->num_buffers; i++) {
        if (!pool->buffer_used[i]) {
            pool->buffer_used[i] = true;
            return pool->buffer_addresses[i];
        }
    }
    return NULL;  // No free buffers
}

void free_dma_buffer_to_pool(dma_buffer_pool_t* pool, void* buffer) {
    for (size_t i = 0; i < pool->num_buffers; i++) {
        if (pool->buffer_addresses[i] == buffer) {
            pool->buffer_used[i] = false;
            break;
        }
    }
}

Scatter-Gather DMA

// Scatter-gather DMA descriptor
typedef struct {
    uint32_t source_address;
    uint32_t destination_address;
    uint32_t transfer_size;
    uint32_t next_descriptor;
} dma_sg_descriptor_t;

typedef struct {
    dma_sg_descriptor_t* descriptors;
    size_t num_descriptors;
    size_t max_descriptors;
} dma_scatter_gather_t;

dma_scatter_gather_t* create_scatter_gather_dma(size_t max_descriptors) {
    dma_scatter_gather_t* sg = malloc(sizeof(dma_scatter_gather_t));
    if (!sg) return NULL;
    
    sg->descriptors = aligned_alloc(DMA_BUFFER_ALIGNMENT, 
                                   max_descriptors * sizeof(dma_sg_descriptor_t));
    sg->num_descriptors = 0;
    sg->max_descriptors = max_descriptors;
    
    return sg;
}

void add_sg_descriptor(dma_scatter_gather_t* sg, 
                      uint32_t src_addr, uint32_t dst_addr, uint32_t size) {
    if (sg->num_descriptors < sg->max_descriptors) {
        dma_sg_descriptor_t* desc = &sg->descriptors[sg->num_descriptors];
        desc->source_address = src_addr;
        desc->destination_address = dst_addr;
        desc->transfer_size = size;
        desc->next_descriptor = (sg->num_descriptors + 1 < sg->max_descriptors) ?
                               (uint32_t)&sg->descriptors[sg->num_descriptors + 1] : 0;
        sg->num_descriptors++;
    }
}

📋 DMA Descriptor Management

DMA Descriptor Structure

// DMA descriptor for different transfer types
typedef struct {
    uint32_t source_address;
    uint32_t destination_address;
    uint32_t transfer_size;
    uint32_t control;
    uint32_t next_descriptor;
} dma_descriptor_t;

#define DMA_CONTROL_ENABLE         (1 << 0)
#define DMA_CONTROL_INC_SRC        (1 << 1)
#define DMA_CONTROL_INC_DST        (1 << 2)
#define DMA_CONTROL_MEM_TO_MEM     (1 << 3)
#define DMA_CONTROL_MEM_TO_PERIPH  (1 << 4)
#define DMA_CONTROL_PERIPH_TO_MEM  (1 << 5)
#define DMA_CONTROL_INTERRUPT      (1 << 6)

// Create DMA descriptor
dma_descriptor_t* create_dma_descriptor(uint32_t src, uint32_t dst, 
                                       uint32_t size, uint32_t control) {
    dma_descriptor_t* desc = aligned_alloc(DMA_BUFFER_ALIGNMENT, 
                                          sizeof(dma_descriptor_t));
    if (!desc) return NULL;
    
    desc->source_address = src;
    desc->destination_address = dst;
    desc->transfer_size = size;
    desc->control = control;
    desc->next_descriptor = 0;
    
    return desc;
}

// Link DMA descriptors
void link_dma_descriptors(dma_descriptor_t* first, dma_descriptor_t* second) {
    if (first && second) {
        first->next_descriptor = (uint32_t)second;
    }
}

DMA Channel Management

// DMA channel configuration
typedef struct {
    uint8_t channel_id;
    dma_descriptor_t* descriptor_chain;
    bool is_active;
    uint32_t transfer_count;
    void (*completion_callback)(void*);
    void* callback_data;
} dma_channel_t;

typedef struct {
    dma_channel_t channels[MAX_DMA_CHANNELS];
    uint8_t active_channels;
} dma_controller_t;

dma_controller_t* initialize_dma_controller(void) {
    dma_controller_t* controller = malloc(sizeof(dma_controller_t));
    if (!controller) return NULL;
    
    // Initialize all channels
    for (int i = 0; i < MAX_DMA_CHANNELS; i++) {
        controller->channels[i].channel_id = i;
        controller->channels[i].descriptor_chain = NULL;
        controller->channels[i].is_active = false;
        controller->channels[i].transfer_count = 0;
        controller->channels[i].completion_callback = NULL;
        controller->channels[i].callback_data = NULL;
    }
    
    controller->active_channels = 0;
    return controller;
}

bool start_dma_transfer(dma_controller_t* controller, uint8_t channel_id) {
    if (channel_id >= MAX_DMA_CHANNELS) return false;
    
    dma_channel_t* channel = &controller->channels[channel_id];
    if (!channel->descriptor_chain) return false;
    
    // Configure DMA hardware
    configure_dma_channel(channel_id, channel->descriptor_chain);
    
    // Start transfer
    start_dma_channel(channel_id);
    channel->is_active = true;
    controller->active_channels++;
    
    return true;
}

🔄 DMA Transfer Types

Memory-to-Memory Transfer

// Memory-to-memory DMA transfer
typedef struct {
    void* source_buffer;
    void* destination_buffer;
    size_t transfer_size;
    dma_safe_buffer_t* safe_source;
    dma_safe_buffer_t* safe_destination;
} dma_mem_to_mem_transfer_t;

dma_mem_to_mem_transfer_t* create_mem_to_mem_transfer(void* src, void* dst, 
                                                       size_t size) {
    dma_mem_to_mem_transfer_t* transfer = malloc(sizeof(dma_mem_to_mem_transfer_t));
    if (!transfer) return NULL;
    
    transfer->source_buffer = src;
    transfer->destination_buffer = dst;
    transfer->transfer_size = size;
    
    // Create safe buffers if needed
    transfer->safe_source = create_dma_safe_buffer(size);
    transfer->safe_destination = create_dma_safe_buffer(size);
    
    return transfer;
}

bool execute_mem_to_mem_transfer(dma_mem_to_mem_transfer_t* transfer) {
    // Prepare source buffer
    prepare_dma_buffer_for_read(transfer->safe_source);
    
    // Prepare destination buffer
    prepare_dma_buffer_for_write(transfer->safe_destination);
    
    // Create DMA descriptor
    dma_descriptor_t* desc = create_dma_descriptor(
        (uint32_t)transfer->source_buffer,
        (uint32_t)transfer->destination_buffer,
        transfer->transfer_size,
        DMA_CONTROL_ENABLE | DMA_CONTROL_INC_SRC | DMA_CONTROL_INC_DST | 
        DMA_CONTROL_MEM_TO_MEM | DMA_CONTROL_INTERRUPT
    );
    
    // Start DMA transfer
    return start_dma_transfer_with_descriptor(desc);
}

Peripheral-to-Memory Transfer

// Peripheral-to-memory DMA transfer (e.g., ADC sampling)
typedef struct {
    uint32_t peripheral_address;
    void* memory_buffer;
    size_t buffer_size;
    uint32_t transfer_count;
    dma_safe_buffer_t* safe_buffer;
} dma_periph_to_mem_transfer_t;

dma_periph_to_mem_transfer_t* create_adc_dma_transfer(uint32_t adc_data_reg, 
                                                      void* buffer, size_t size) {
    dma_periph_to_mem_transfer_t* transfer = malloc(sizeof(dma_periph_to_mem_transfer_t));
    if (!transfer) return NULL;
    
    transfer->peripheral_address = adc_data_reg;
    transfer->memory_buffer = buffer;
    transfer->buffer_size = size;
    transfer->transfer_count = 0;
    transfer->safe_buffer = create_dma_safe_buffer(size);
    
    return transfer;
}

bool start_adc_dma_transfer(dma_periph_to_mem_transfer_t* transfer) {
    // Prepare memory buffer
    prepare_dma_buffer_for_write(transfer->safe_buffer);
    
    // Create DMA descriptor
    dma_descriptor_t* desc = create_dma_descriptor(
        transfer->peripheral_address,
        (uint32_t)transfer->memory_buffer,
        transfer->buffer_size,
        DMA_CONTROL_ENABLE | DMA_CONTROL_INC_DST | 
        DMA_CONTROL_PERIPH_TO_MEM | DMA_CONTROL_INTERRUPT
    );
    
    // Configure ADC for DMA
    configure_adc_for_dma();
    
    // Start DMA transfer
    return start_dma_transfer_with_descriptor(desc);
}

🗂️ DMA Buffer Pooling

Multi-Size Buffer Pool

// Buffer pool with multiple sizes
typedef struct {
    size_t buffer_size;
    size_t num_buffers;
    void** buffers;
    bool* buffer_used;
} dma_buffer_pool_size_t;

typedef struct {
    dma_buffer_pool_size_t* pools;
    size_t num_pool_sizes;
} dma_multi_size_pool_t;

dma_multi_size_pool_t* create_multi_size_dma_pool(size_t* sizes, 
                                                  size_t* counts, 
                                                  size_t num_sizes) {
    dma_multi_size_pool_t* multi_pool = malloc(sizeof(dma_multi_size_pool_t));
    if (!multi_pool) return NULL;
    
    multi_pool->pools = malloc(num_sizes * sizeof(dma_buffer_pool_size_t));
    multi_pool->num_pool_sizes = num_sizes;
    
    for (size_t i = 0; i < num_sizes; i++) {
        dma_buffer_pool_size_t* pool = &multi_pool->pools[i];
        pool->buffer_size = sizes[i];
        pool->num_buffers = counts[i];
        
        // Allocate buffers
        pool->buffers = malloc(counts[i] * sizeof(void*));
        pool->buffer_used = calloc(counts[i], sizeof(bool));
        
        for (size_t j = 0; j < counts[i]; j++) {
            pool->buffers[j] = aligned_alloc(DMA_BUFFER_ALIGNMENT, sizes[i]);
        }
    }
    
    return multi_pool;
}

void* allocate_dma_buffer_from_multi_pool(dma_multi_size_pool_t* multi_pool, 
                                         size_t required_size) {
    // Find appropriate pool size
    for (size_t i = 0; i < multi_pool->num_pool_sizes; i++) {
        dma_buffer_pool_size_t* pool = &multi_pool->pools[i];
        if (pool->buffer_size >= required_size) {
            // Find free buffer in this pool
            for (size_t j = 0; j < pool->num_buffers; j++) {
                if (!pool->buffer_used[j]) {
                    pool->buffer_used[j] = true;
                    return pool->buffers[j];
                }
            }
        }
    }
    return NULL;  // No suitable buffer found
}

⏱️ Real-time DMA Considerations

DMA Priority Management

// DMA priority levels
typedef enum {
    DMA_PRIORITY_LOW = 0,
    DMA_PRIORITY_MEDIUM = 1,
    DMA_PRIORITY_HIGH = 2,
    DMA_PRIORITY_CRITICAL = 3
} dma_priority_t;

typedef struct {
    uint8_t channel_id;
    dma_priority_t priority;
    uint32_t deadline_us;
    bool is_real_time;
} dma_real_time_config_t;

dma_real_time_config_t* create_real_time_dma_config(uint8_t channel, 
                                                   dma_priority_t priority,
                                                   uint32_t deadline_us) {
    dma_real_time_config_t* config = malloc(sizeof(dma_real_time_config_t));
    if (!config) return NULL;
    
    config->channel_id = channel;
    config->priority = priority;
    config->deadline_us = deadline_us;
    config->is_real_time = true;
    
    return config;
}

bool configure_real_time_dma(dma_real_time_config_t* config) {
    // Set DMA channel priority
    set_dma_channel_priority(config->channel_id, config->priority);
    
    // Configure for real-time operation
    if (config->is_real_time) {
        enable_dma_interrupts(config->channel_id);
        set_dma_deadline(config->channel_id, config->deadline_us);
    }
    
    return true;
}

DMA Deadline Monitoring

// Monitor DMA transfer deadlines
typedef struct {
    uint32_t start_time;
    uint32_t deadline;
    bool deadline_missed;
    void (*deadline_callback)(void*);
    void* callback_data;
} dma_deadline_monitor_t;

dma_deadline_monitor_t* create_dma_deadline_monitor(uint32_t deadline_us,
                                                   void (*callback)(void*),
                                                   void* data) {
    dma_deadline_monitor_t* monitor = malloc(sizeof(dma_deadline_monitor_t));
    if (!monitor) return NULL;
    
    monitor->start_time = get_current_time_us();
    monitor->deadline = deadline_us;
    monitor->deadline_missed = false;
    monitor->deadline_callback = callback;
    monitor->callback_data = data;
    
    return monitor;
}

void check_dma_deadline(dma_deadline_monitor_t* monitor) {
    uint32_t current_time = get_current_time_us();
    uint32_t elapsed = current_time - monitor->start_time;
    
    if (elapsed > monitor->deadline && !monitor->deadline_missed) {
        monitor->deadline_missed = true;
        if (monitor->deadline_callback) {
            monitor->deadline_callback(monitor->callback_data);
        }
    }
}

🚨 DMA Error Handling

DMA Error Detection

// DMA error types
typedef enum {
    DMA_ERROR_NONE = 0,
    DMA_ERROR_TIMEOUT,
    DMA_ERROR_BUS_ERROR,
    DMA_ERROR_ALIGNMENT,
    DMA_ERROR_MEMORY_ERROR
} dma_error_t;

typedef struct {
    dma_error_t error_type;
    uint32_t error_address;
    uint32_t transfer_count;
    uint8_t channel_id;
} dma_error_info_t;

dma_error_info_t* create_dma_error_info(void) {
    dma_error_info_t* error_info = malloc(sizeof(dma_error_info_t));
    if (error_info) {
        error_info->error_type = DMA_ERROR_NONE;
        error_info->error_address = 0;
        error_info->transfer_count = 0;
        error_info->channel_id = 0;
    }
    return error_info;
}

dma_error_t check_dma_errors(uint8_t channel_id) {
    // Check DMA error registers
    uint32_t error_status = read_dma_error_status(channel_id);
    
    if (error_status & DMA_ERROR_TIMEOUT_MASK) {
        return DMA_ERROR_TIMEOUT;
    } else if (error_status & DMA_ERROR_BUS_ERROR_MASK) {
        return DMA_ERROR_BUS_ERROR;
    } else if (error_status & DMA_ERROR_ALIGNMENT_MASK) {
        return DMA_ERROR_ALIGNMENT;
    }
    
    return DMA_ERROR_NONE;
}

void handle_dma_error(dma_error_info_t* error_info) {
    printf("DMA Error on channel %d: ", error_info->channel_id);
    
    switch (error_info->error_type) {
        case DMA_ERROR_TIMEOUT:
            printf("Timeout error\n");
            break;
        case DMA_ERROR_BUS_ERROR:
            printf("Bus error at address 0x%08X\n", error_info->error_address);
            break;
        case DMA_ERROR_ALIGNMENT:
            printf("Alignment error\n");
            break;
        case DMA_ERROR_MEMORY_ERROR:
            printf("Memory error\n");
            break;
        default:
            printf("Unknown error\n");
            break;
    }
    
    // Implement error recovery
    reset_dma_channel(error_info->channel_id);
}

⚠️ Common Pitfalls

1. Cache Coherency Issues

// WRONG: Not handling cache coherency
void incorrect_dma_transfer(void* buffer, size_t size) {
    // Write to buffer
    memset(buffer, 0xAA, size);
    
    // Start DMA without cache flush
    start_dma_transfer(buffer, size);  // DMA may not see latest data
}

// CORRECT: Proper cache handling
void correct_dma_transfer(void* buffer, size_t size) {
    // Write to buffer
    memset(buffer, 0xAA, size);
    
    // Flush cache before DMA
    flush_cache_for_dma(buffer, size);
    
    // Start DMA transfer
    start_dma_transfer(buffer, size);
}

2. Buffer Alignment Issues

// WRONG: Unaligned DMA buffer
void* incorrect_dma_allocation(size_t size) {
    return malloc(size);  // May not be aligned
}

// CORRECT: Aligned DMA buffer
void* correct_dma_allocation(size_t size) {
    return aligned_alloc(DMA_BUFFER_ALIGNMENT, size);
}

3. Missing Error Handling

// WRONG: No error handling
void unsafe_dma_transfer(void* buffer, size_t size) {
    start_dma_transfer(buffer, size);
    // No error checking
}

// CORRECT: Proper error handling
bool safe_dma_transfer(void* buffer, size_t size) {
    if (!is_dma_buffer_aligned(buffer, size)) {
        return false;
    }
    
    if (!start_dma_transfer(buffer, size)) {
        return false;
    }
    
    // Wait for completion with timeout
    return wait_for_dma_completion(DMA_TIMEOUT_MS);
}

✅ Best Practices

1. DMA Buffer Lifecycle Management

// Complete DMA buffer lifecycle
typedef struct {
    void* buffer;
    size_t size;
    bool is_allocated;
    bool is_in_use;
    uint32_t allocation_time;
} dma_buffer_lifecycle_t;

dma_buffer_lifecycle_t* create_dma_buffer_lifecycle(size_t size) {
    dma_buffer_lifecycle_t* lifecycle = malloc(sizeof(dma_buffer_lifecycle_t));
    if (!lifecycle) return NULL;
    
    lifecycle->buffer = aligned_alloc(DMA_BUFFER_ALIGNMENT, size);
    lifecycle->size = size;
    lifecycle->is_allocated = (lifecycle->buffer != NULL);
    lifecycle->is_in_use = false;
    lifecycle->allocation_time = get_current_time_ms();
    
    return lifecycle;
}

void destroy_dma_buffer_lifecycle(dma_buffer_lifecycle_t* lifecycle) {
    if (lifecycle) {
        if (lifecycle->buffer) {
            free(lifecycle->buffer);
        }
        free(lifecycle);
    }
}

2. DMA Transfer Validation

// Validate DMA transfer parameters
bool validate_dma_transfer(void* buffer, size_t size, uint32_t peripheral_addr) {
    // Check buffer alignment
    if (!is_dma_buffer_aligned(buffer, size)) {
        return false;
    }
    
    // Check buffer size
    if (size == 0 || size > MAX_DMA_TRANSFER_SIZE) {
        return false;
    }
    
    // Check peripheral address
    if (!is_valid_peripheral_address(peripheral_addr)) {
        return false;
    }
    
    return true;
}

bool is_valid_peripheral_address(uint32_t addr) {
    // Check if address is in peripheral memory range
    return (addr >= PERIPHERAL_BASE && addr < PERIPHERAL_END);
}

3. DMA Performance Monitoring

// Monitor DMA performance
typedef struct {
    uint32_t total_transfers;
    uint32_t successful_transfers;
    uint32_t failed_transfers;
    uint32_t total_bytes_transferred;
    uint32_t average_transfer_time_us;
} dma_performance_stats_t;

dma_performance_stats_t* create_dma_performance_stats(void) {
    dma_performance_stats_t* stats = malloc(sizeof(dma_performance_stats_t));
    if (stats) {
        memset(stats, 0, sizeof(dma_performance_stats_t));
    }
    return stats;
}

void update_dma_performance_stats(dma_performance_stats_t* stats, 
                                 bool success, uint32_t bytes, 
                                 uint32_t transfer_time_us) {
    stats->total_transfers++;
    stats->total_bytes_transferred += bytes;
    
    if (success) {
        stats->successful_transfers++;
    } else {
        stats->failed_transfers++;
    }
    
    // Update average transfer time
    stats->average_transfer_time_us = 
        (stats->average_transfer_time_us * (stats->total_transfers - 1) + 
         transfer_time_us) / stats->total_transfers;
}

🎯 Interview Questions

Basic Questions

  1. What is DMA and why is it important in embedded systems?
    • DMA: Direct Memory Access for efficient data transfer
    • Important: Reduces CPU load, improves performance
  2. What are the key requirements for DMA buffers?
    • Proper alignment
    • Cache coherency
    • Physical memory mapping
    • Size alignment
  3. How do you handle cache coherency with DMA?
    • Flush cache before DMA write
    • Invalidate cache after DMA read
    • Use non-cacheable memory

Advanced Questions

  1. How would you implement a DMA buffer pool for real-time systems?
    • Pre-allocate aligned buffers
    • Implement priority-based allocation
    • Add deadline monitoring
  2. What are the challenges of scatter-gather DMA?
    • Descriptor chain management
    • Memory alignment requirements
    • Error handling across multiple transfers
  3. How would you optimize DMA performance?
    • Use appropriate buffer sizes
    • Minimize cache operations
    • Implement efficient buffer pooling

📚 Additional Resources

Standards and Documentation

Tools and Libraries


Next Topic: Memory-Mapped I/OShared Memory Programming