#include #include "mm.h" #include "boot/k_early_panic.h" #define MM_MAX_MMAP_ENTRIES 64 extern "C" { pagedirectory_t page_directory __attribute__ ((aligned (4096))); static mm_mem_range_t mm_mmap_entries[MM_MAX_MMAP_ENTRIES]; static int mm_mmap_num_entries = 0; static int mm_num_free_pages = 0; u32_t * mm_free_page_ptr = NULL; /************************************************************************** * This function is run in segmented memory before paging is in effect. * *************************************************************************/ void mm_record_mmap_entry(mb_mmap_t * mmap) { if (mm_mmap_num_entries < MM_MAX_MMAP_ENTRIES) { if (mmap->type == MB_MMAP_TYPE_RAM) { mm_mmap_entries[mm_mmap_num_entries].base = mmap->base; mm_mmap_entries[mm_mmap_num_entries].length = mmap->length; mm_mmap_num_entries++; } } else { k_early_panic("Too many mmap_entries!"); } } /************************************************************************** * This function is run in segmented memory before paging is in effect. * * It is run after the bootloader information has been read, so we can * * overwrite that memory now. * *************************************************************************/ void mm_bootstrap() { u32_t max_ram_address = KERNEL_PHYSICAL_ADDRESS + KERNEL_SIZE - 1; if (mm_mmap_num_entries < 1) { k_early_panic("No mmap entries read from bootloader!"); } for (int mmap_idx = 0; mmap_idx < mm_mmap_num_entries; mmap_idx++) { u32_t base_address = mm_mmap_entries[mmap_idx].base; if (base_address & PAGE_LOW_MASK) { /* start of this mmap range is not page-aligned */ base_address = (base_address & PAGE_HIGH_MASK) + PAGE_SIZE; } u32_t address_limit = mm_mmap_entries[mmap_idx].base + mm_mmap_entries[mmap_idx].length; if (address_limit & PAGE_LOW_MASK) { /* end of this mmap range is not page-aligned */ address_limit &= PAGE_HIGH_MASK; } /* record the highest RAM address found */ if ((address_limit - 1) > max_ram_address) { max_ram_address = (address_limit - 1); } /* * loop through every page in the mmap range and add * pages into the free page linked list */ u32_t * last_page = NULL; while (base_address < address_limit) { /* check to make sure the RAM page isn't overlapping the kernel */ if ( base_address + PAGE_SIZE <= KERNEL_PHYSICAL_ADDRESS && base_address >= KERNEL_PHYSICAL_ADDRESS + KERNEL_SIZE ) { /* we found a page to add to the free list */ u32_t * page_virtual_address = (u32_t *)(base_address + KERNEL_OFFSET); *page_virtual_address = 0; if (last_page == NULL) { mm_free_page_ptr = (u32_t *) base_address; } else { *last_page = base_address; } last_page = page_virtual_address; mm_num_free_pages++; } base_address += PAGE_SIZE; } } /* Clear the page directory */ for (unsigned int i = 0; i < NUM_PAGETABLE_ENTRIES; i++) { page_directory[i] = 0; } } } /* extern "C" */