289 lines
10 KiB
C++
289 lines
10 KiB
C++
|
|
#include <stddef.h>
|
|
|
|
#include "mm.h"
|
|
#include "stack.h"
|
|
#include "boot/k_early_panic.h"
|
|
#include "lang/string.h"
|
|
#include "lang/kio.h"
|
|
|
|
#define MM_MAX_MMAP_ENTRIES 64
|
|
|
|
static pagedirectory_entry_t * page_directory;
|
|
static mm_mem_range_t mm_mmap_entries[MM_MAX_MMAP_ENTRIES];
|
|
static int mm_mmap_num_entries = 0;
|
|
static int mm_num_free_pages = 0;
|
|
static int mm_num_used_pages = 0;
|
|
static u32_t * mm_free_page_ptr = NULL;
|
|
gdtr_t mm_gdtr;
|
|
static u64_t * mm_gdt;
|
|
|
|
/**************************************************************************
|
|
* This function is run in segmented memory before paging is in effect. *
|
|
*************************************************************************/
|
|
void mm_record_mmap_entry(mb_mmap_t * mmap)
|
|
{
|
|
if (mm_mmap_num_entries < MM_MAX_MMAP_ENTRIES)
|
|
{
|
|
if (mmap->type == MB_MMAP_TYPE_RAM)
|
|
{
|
|
mm_mmap_entries[mm_mmap_num_entries].base = mmap->base;
|
|
mm_mmap_entries[mm_mmap_num_entries].length = mmap->length;
|
|
mm_mmap_num_entries++;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
k_early_panic("Too many mmap_entries!");
|
|
}
|
|
}
|
|
|
|
/**************************************************************************
|
|
* This function is run in segmented memory before paging is in effect. *
|
|
* It is run after the bootloader information has been read, so we can *
|
|
* overwrite that memory now. *
|
|
*************************************************************************/
|
|
void mm_bootstrap()
|
|
{
|
|
u32_t max_ram_address = KERNEL_PHYSICAL_ADDRESS + KERNEL_SIZE - 1;
|
|
|
|
if (mm_mmap_num_entries < 1)
|
|
{
|
|
k_early_panic("No mmap entries read from bootloader!");
|
|
}
|
|
|
|
for (int mmap_idx = 0; mmap_idx < mm_mmap_num_entries; mmap_idx++)
|
|
{
|
|
u32_t base_address = mm_mmap_entries[mmap_idx].base;
|
|
if (base_address & PAGE_LOW_MASK)
|
|
{
|
|
/* start of this mmap range is not page-aligned */
|
|
base_address = (base_address & PAGE_HIGH_MASK) + PAGE_SIZE;
|
|
}
|
|
u32_t address_limit = mm_mmap_entries[mmap_idx].base +
|
|
mm_mmap_entries[mmap_idx].length;
|
|
if (address_limit & PAGE_LOW_MASK)
|
|
{
|
|
/* end of this mmap range is not page-aligned */
|
|
address_limit &= PAGE_HIGH_MASK;
|
|
}
|
|
|
|
/* record the highest RAM address found */
|
|
if ((address_limit - 1) > max_ram_address)
|
|
{
|
|
max_ram_address = (address_limit - 1);
|
|
}
|
|
|
|
/*
|
|
* loop through every page in the mmap range and add
|
|
* pages into the free page linked list
|
|
*/
|
|
u32_t * last_page = NULL;
|
|
while (base_address < address_limit)
|
|
{
|
|
/* check to make sure the RAM page is ok */
|
|
if ( base_address > 0 /* don't map address 0 */
|
|
&& ( base_address + PAGE_SIZE <= KERNEL_PHYSICAL_ADDRESS
|
|
|| base_address >= KERNEL_PHYSICAL_ADDRESS + KERNEL_SIZE ) )
|
|
{
|
|
/* we found a page to add to the free list */
|
|
u32_t * page_virtual_address =
|
|
(u32_t *)(base_address + KERNEL_OFFSET);
|
|
*page_virtual_address = 0;
|
|
if (last_page == NULL)
|
|
{
|
|
mm_free_page_ptr = (u32_t *) base_address;
|
|
}
|
|
else
|
|
{
|
|
*last_page = base_address;
|
|
}
|
|
last_page = page_virtual_address;
|
|
mm_num_free_pages++;
|
|
}
|
|
base_address += PAGE_SIZE;
|
|
}
|
|
}
|
|
|
|
if (mm_num_free_pages < 10)
|
|
{
|
|
k_early_panic("Not enough free pages of RAM!");
|
|
}
|
|
|
|
/* ok, now mm_page_alloc() should be functional */
|
|
|
|
/* allocate the page directory */
|
|
u32_t page_directory_phys = mm_early_page_alloc();
|
|
page_directory = (pagedirectory_entry_t *) page_directory_phys;
|
|
pagedirectory_entry_t * page_dir_virt =
|
|
(pagedirectory_entry_t *) (page_directory_phys + KERNEL_OFFSET);
|
|
|
|
/* Clear the page directory */
|
|
for (unsigned int i = 0; i < NUM_PAGETABLE_ENTRIES; i++)
|
|
{
|
|
page_dir_virt[i] = 0;
|
|
}
|
|
|
|
/*
|
|
* map all of RAM into the virtual address space
|
|
* starting at address 0x0 (except the null page)
|
|
*/
|
|
for (u32_t page_base = PAGE_SIZE;
|
|
page_base < max_ram_address;
|
|
page_base += PAGE_SIZE)
|
|
{
|
|
mm_early_map(page_base, page_base, 0, 1);
|
|
}
|
|
|
|
/* now map the kernel's virtual address space into RAM */
|
|
for (u32_t page_base = KERNEL_VIRTUAL_ADDRESS;
|
|
page_base < KERNEL_VIRTUAL_ADDRESS + KERNEL_SIZE;
|
|
page_base += PAGE_SIZE)
|
|
{
|
|
/* map page_base to page_base - KERNEL_OFFSET */
|
|
mm_early_map(page_base, page_base - KERNEL_OFFSET, 0, 1);
|
|
}
|
|
|
|
/* set up the global descriptor table */
|
|
u32_t gdt_base = mm_early_page_alloc();
|
|
mm_gdt = (u64_t *) ((u32_t) gdt_base + (u32_t) KERNEL_OFFSET);
|
|
mm_gdt[0] = 0x0ull;
|
|
mm_gdt[1] = MAKE_DESCRIPTOR(0, 0xFFFFF, 1, 0, 1, 1);
|
|
mm_gdt[2] = MAKE_DESCRIPTOR(0, 0xFFFFF, 1, 0, 1, 0);
|
|
mm_gdtr.length = 3*sizeof(mm_gdt[0]) - 1;
|
|
mm_gdtr.phys_addr = gdt_base;
|
|
|
|
/* set the page directory base register */
|
|
set_cr3(page_directory);
|
|
}
|
|
|
|
/**************************************************************************
|
|
* Map virtual_address to physical_address. *
|
|
* Both addresses should be page-aligned. *
|
|
* This 'early' version can be used during segmented bootstrapping *
|
|
*************************************************************************/
|
|
int mm_early_map(u32_t virtual_address, u32_t physical_address,
|
|
u32_t user_mode, u32_t writable)
|
|
{
|
|
u32_t directory_index = (virtual_address >> 22) & 0x3FF;
|
|
u32_t table_index = (virtual_address >> 12) & 0x3FF;
|
|
pagedirectory_entry_t * page_dir = page_directory;
|
|
page_dir = (pagedirectory_entry_t *)((u32_t)page_dir + KERNEL_OFFSET);
|
|
|
|
if (page_dir[directory_index] == 0)
|
|
{
|
|
/* allocate a new page table */
|
|
u32_t page_table_address = mm_early_page_alloc();
|
|
if (page_table_address == 0)
|
|
{
|
|
return 0;
|
|
}
|
|
page_dir[directory_index] = page_table_address
|
|
| 0x1 << 2 /* PTs can be user mode */
|
|
| 0x1 << 1 /* writable */
|
|
| 0x1; /* present */
|
|
}
|
|
u32_t page_table_address = page_dir[directory_index] & PAGE_HIGH_MASK;
|
|
|
|
u32_t * page_table = (u32_t *) page_table_address;
|
|
page_table = (u32_t *)((u32_t)page_table + (u32_t)KERNEL_OFFSET);
|
|
|
|
page_table[table_index] = (physical_address & PAGE_HIGH_MASK)
|
|
| (user_mode & 0x1) << 2
|
|
| (writable & 0x1) << 1
|
|
| 0x1; /* present */
|
|
|
|
return 1;
|
|
}
|
|
|
|
/**************************************************************************
|
|
* Map virtual_address to physical_address. *
|
|
* Both addresses should be page-aligned. *
|
|
*************************************************************************/
|
|
int mm_map(u32_t virtual_address, u32_t physical_address,
|
|
u32_t user_mode, u32_t writable)
|
|
{
|
|
u32_t directory_index = (virtual_address >> 22) & 0x3FF;
|
|
u32_t table_index = (virtual_address >> 12) & 0x3FF;
|
|
pagedirectory_entry_t * page_dir = page_directory;
|
|
|
|
if (page_dir[directory_index] == 0)
|
|
{
|
|
/* allocate a new page table */
|
|
u32_t page_table_address = mm_page_alloc();
|
|
if (page_table_address == 0)
|
|
{
|
|
return 0;
|
|
}
|
|
page_dir[directory_index] = page_table_address
|
|
| 0x1 << 2 /* PTs can be user mode */
|
|
| 0x1 << 1 /* writable */
|
|
| 0x1; /* present */
|
|
}
|
|
u32_t page_table_address = page_dir[directory_index] & PAGE_HIGH_MASK;
|
|
|
|
u32_t * page_table = (u32_t *) page_table_address;
|
|
|
|
page_table[table_index] = (physical_address & PAGE_HIGH_MASK)
|
|
| (user_mode & 0x1) << 2
|
|
| (writable & 0x1) << 1
|
|
| 0x1; /* present */
|
|
|
|
return 1;
|
|
}
|
|
|
|
/**************************************************************************
|
|
* Returns the physical base address of a page in RAM *
|
|
* or 0 if no pages were available *
|
|
* This 'early' version is to be called during segmented bootstrapping *
|
|
*************************************************************************/
|
|
u32_t mm_early_page_alloc()
|
|
{
|
|
u32_t page_address = 0;
|
|
if (mm_free_page_ptr != NULL)
|
|
{
|
|
u32_t * page_ptr = mm_free_page_ptr;
|
|
page_ptr = (u32_t *) ((u32_t)page_ptr + (u32_t)KERNEL_OFFSET);
|
|
page_address = (u32_t) mm_free_page_ptr;
|
|
mm_free_page_ptr = (u32_t *) *page_ptr;
|
|
mm_num_free_pages--;
|
|
mm_num_used_pages++;
|
|
}
|
|
return page_address;
|
|
}
|
|
|
|
/**************************************************************************
|
|
* Returns the physical base address of a page in RAM *
|
|
* or 0 if no pages were available *
|
|
*************************************************************************/
|
|
u32_t mm_page_alloc()
|
|
{
|
|
u32_t page_address = 0;
|
|
if (mm_free_page_ptr != NULL)
|
|
{
|
|
u32_t * page_ptr = mm_free_page_ptr;
|
|
page_address = (u32_t) mm_free_page_ptr;
|
|
mm_free_page_ptr = (u32_t *) *page_ptr;
|
|
mm_num_free_pages--;
|
|
mm_num_used_pages++;
|
|
}
|
|
return page_address;
|
|
}
|
|
|
|
void mm_print_memory_map()
|
|
{
|
|
kprintf("Bootloader provided memory map:\n");
|
|
kprintf(" Base Address Length\n");
|
|
for (int i = 0; i < mm_mmap_num_entries; i++)
|
|
{
|
|
kprintf(" 0x%016X 0x%016X (%l bytes / %l KB / %l MB)\n",
|
|
mm_mmap_entries[i].base,
|
|
mm_mmap_entries[i].length,
|
|
mm_mmap_entries[i].length,
|
|
mm_mmap_entries[i].length >> 10,
|
|
mm_mmap_entries[i].length >> 20);
|
|
}
|
|
kprintf("Used pages: %d\n", mm_num_used_pages);
|
|
kprintf("Free pages: %d\n", mm_num_free_pages);
|
|
}
|