hos/kernel/mm/mm.cc
josh 5ac343a2e8 fixed mm bug, now crashing because IDT isn't mapped via paging
git-svn-id: svn://anubis/hos/trunk@97 5b3e749e-e535-0410-8002-a9bb6afbdfca
2010-01-27 05:25:05 +00:00

378 lines
13 KiB
C++

#include <stddef.h>
#include "mm.h"
#include "stack.h"
#include "boot/k_early_panic.h"
#include "lang/string.h"
#include "lang/kio.h"
#include "sys/cpu.h"
#define MM_MAX_MMAP_ENTRIES 64
/* mmap management */
static mm_mem_range_t mm_mmap_entries[MM_MAX_MMAP_ENTRIES];
static int mm_mmap_num_entries = 0;
/* free/total page statistics */
static int mm_num_free_pages = 0;
static int mm_num_total_pages = 0;
gdtr_t mm_gdtr;
static u64_t * mm_gdt; /* NOT mapped into virtual address space */
static u32_t mm_heap_base; /* virtual address of start of heap */
static pagedirectory_t * early_page_directory_ptr;
/* physical addresses of page allocation pages to map in before the heap */
static u32_t page_alloc_page_numbers[1025]; /* supports 4GB physical memory */
static int last_page_alloc_page = -1; /* index of last valid page alloc page */
static int page_alloc_page_index = -1;
static mm_page_alloc_page_t * page_alloc_pages;
static int page_alloc_pages_index = -1;
/**************************************************************************
* Internal Functions *
*************************************************************************/
static void record_phys_page(u32_t base_address);
/**************************************************************************
* Compile-time tests *
*************************************************************************/
static void build_tests()
{
/* check that a mm_page_alloc_page_t fits exactly in a page */
BUILD_BUG_ON(sizeof(mm_page_alloc_page_t) != PAGE_SIZE);
}
/**************************************************************************
* This function is run in segmented memory before paging is in effect. *
* Record an mmap entry from the bootloader into our kernel space. *
*************************************************************************/
void mm_record_mmap_entry(mb_mmap_t * mmap)
{
if (mm_mmap_num_entries < MM_MAX_MMAP_ENTRIES)
{
if (mmap->type == MB_MMAP_TYPE_RAM)
{
mm_mmap_entries[mm_mmap_num_entries].base = mmap->base;
mm_mmap_entries[mm_mmap_num_entries].length = mmap->length;
mm_mmap_num_entries++;
}
}
else
{
k_early_panic("Too many mmap_entries!");
}
}
/**************************************************************************
* This function is run in segmented memory before paging is in effect. *
* It is run after the bootloader information has been read, so we can *
* overwrite that memory now. *
*************************************************************************/
void mm_bootstrap()
{
u32_t max_ram_address = KERNEL_PHYSICAL_ADDRESS + KERNEL_SIZE - 1;
if (mm_mmap_num_entries < 1)
{
k_early_panic("No mmap entries read from bootloader!");
}
mm_heap_base = (u32_t) KERNEL_END;
page_alloc_pages = (mm_page_alloc_page_t *) mm_heap_base;
for (int mmap_idx = 0; mmap_idx < mm_mmap_num_entries; mmap_idx++)
{
u32_t base_address = mm_mmap_entries[mmap_idx].base;
u32_t address_limit = base_address + mm_mmap_entries[mmap_idx].length;
if (base_address & PAGE_LOW_MASK)
{
/* start of this mmap range is not page-aligned */
base_address = (base_address & PAGE_HIGH_MASK) + PAGE_SIZE;
}
if (address_limit & PAGE_LOW_MASK)
{
/* end of this mmap range is not page-aligned */
address_limit &= PAGE_HIGH_MASK;
}
/* record the highest RAM address found */
if ((address_limit - 1) > max_ram_address)
{
max_ram_address = (address_limit - 1);
}
/*
* loop through every page in the mmap range and add
* pages into the free page linked list
*/
for (; base_address < address_limit; base_address += PAGE_SIZE)
{
/* check to make sure the page doesn't overlap the kernel */
if ( base_address + PAGE_SIZE <= KERNEL_PHYSICAL_ADDRESS
|| base_address >= KERNEL_PHYSICAL_ADDRESS + KERNEL_SIZE )
{
record_phys_page(base_address);
mm_num_free_pages++;
}
mm_num_total_pages++;
}
}
if (mm_num_free_pages < 10)
{
k_early_panic("Not enough free pages of RAM!");
}
/* ok, now mm_early_page_alloc() should be functional */
/* move the heap back to after the page allocation pages */
mm_heap_base = (u32_t) &page_alloc_pages[last_page_alloc_page + 1];
/* allocate the page directory */
u32_t page_directory_phys = mm_early_page_alloc();
early_page_directory_ptr = (pagedirectory_t *)
(page_directory_phys + KERNEL_OFFSET);
/* Clear the page directory */
for (unsigned int i = 0; i < NUM_PAGETABLE_ENTRIES; i++)
{
(*early_page_directory_ptr)[i] = 0;
}
/* ok, now mm_early_map() should be functional */
/* map in the page directory itself so we can modify it once paging is on */
mm_early_map(PAGE_DIR_SELF_REF, page_directory_phys, 0, 1);
/* map in the physical page allocator pages */
for (int i = 0; i <= last_page_alloc_page; i++)
{
mm_early_map((u32_t) &page_alloc_pages[i],
page_alloc_page_numbers[i], 0, 1);
}
/* map the kernel's virtual address space into RAM */
for (u32_t page_base = KERNEL_VIRTUAL_ADDRESS;
page_base < KERNEL_VIRTUAL_ADDRESS + KERNEL_SIZE;
page_base += PAGE_SIZE)
{
/* map page_base to page_base - KERNEL_OFFSET */
mm_early_map(page_base, page_base - KERNEL_OFFSET, 0, 1);
}
/* map console memory */
mm_early_map(CONSOLE_MEMORY, CONSOLE_MEMORY, 0, 1);
/* set up the global descriptor table */
u32_t gdt_base = mm_early_page_alloc();
mm_gdt = (u64_t *) ((u32_t) gdt_base + (u32_t) KERNEL_OFFSET);
mm_gdt[0] = 0x0ull;
mm_gdt[1] = MAKE_DESCRIPTOR(0, 0xFFFFF, 1, 0, 1, 1); /* kernel code */
mm_gdt[2] = MAKE_DESCRIPTOR(0, 0xFFFFF, 1, 0, 1, 0); /* kernel data */
mm_gdtr.limit = 3 * sizeof(mm_gdt[0]) - 1;
mm_gdtr.base = gdt_base;
__asm__ __volatile__ (
"lgdt (mm_gdtr);\n"
"jmp $0x08, $42f;\n"
"42:\n"
"mov $0x10, %%cx\n"
"mov %%cx, %%ss\n"
"mov %%cx, %%ds\n"
"mov %%cx, %%es\n"
"mov %%cx, %%fs\n"
"mov %%cx, %%gs\n"
: /* no outputs */
: /* no inputs */
: "ecx");
/* set the page directory base register */
write_cr3(page_directory_phys);
/* set up permanent stack before enabling paging */
stack_bootstrap();
/* turn on paging */
write_cr0(read_cr0() | (1 << 31));
}
/**************************************************************************
* This function is run in segmented memory before paging is in effect. *
* Record a physical page in the page allocation pages. *
*************************************************************************/
static void record_phys_page(u32_t base_address)
{
if (page_alloc_page_index < 0)
{
/* allocate a new page alloc page */
last_page_alloc_page++;
page_alloc_pages_index++;
page_alloc_page_numbers[last_page_alloc_page] = base_address;
page_alloc_page_index = 0;
}
else
{
mm_page_alloc_page_t * current_page_alloc_page =
(mm_page_alloc_page_t *)
(page_alloc_page_numbers[page_alloc_pages_index] + KERNEL_OFFSET);
page_alloc_page_index++;
(*current_page_alloc_page)[page_alloc_page_index] = base_address;
if (page_alloc_page_index == NUM_PAGETABLE_ENTRIES - 1)
{
page_alloc_page_index = -1;
}
}
}
/**************************************************************************
* Map virtual_address to physical_address. *
* Both addresses should be page-aligned. *
* This 'early' version can be used during segmented bootstrapping *
*************************************************************************/
int mm_early_map(u32_t virtual_address, u32_t physical_address,
u32_t user_mode, u32_t writable)
{
u32_t directory_index = (virtual_address >> 22) & 0x3FF;
u32_t table_index = (virtual_address >> 12) & 0x3FF;
if ((*early_page_directory_ptr)[directory_index] == 0)
{
/* allocate a new page table */
u32_t page_table_phys = mm_early_page_alloc();
if (page_table_phys == 0)
{
return 0;
}
(*early_page_directory_ptr)[directory_index] = page_table_phys
| 0x1 << 2 /* PTs can be user mode */
| 0x1 << 1 /* writable */
| 0x1; /* present */
}
u32_t page_table_phys =
(*early_page_directory_ptr)[directory_index] & PAGE_HIGH_MASK;
u32_t * page_table = (u32_t *) (page_table_phys + KERNEL_OFFSET);
page_table[table_index] = (physical_address & PAGE_HIGH_MASK)
| (user_mode & 0x1) << 2
| (writable & 0x1) << 1
| 0x1; /* present */
return 1;
}
/**************************************************************************
* Map virtual_address to physical_address. *
* Both addresses should be page-aligned. *
*************************************************************************/
int mm_map(u32_t virtual_address, u32_t physical_address,
u32_t user_mode, u32_t writable)
{
u32_t directory_index = (virtual_address >> 22) & 0x3FF;
u32_t table_index = (virtual_address >> 12) & 0x3FF;
pagedirectory_entry_t * page_dir_entry = (pagedirectory_entry_t *)
(PAGE_DIR_SELF_REF | (PAGE_DIR_SELF_REF_INDEX << 12)
| (directory_index << 2));
if (*page_dir_entry == 0)
{
/* allocate a new page table */
u32_t page_table_phys = mm_page_alloc();
if (page_table_phys == 0)
{
return 0;
}
*page_dir_entry = page_table_phys
| 0x1 << 2 /* PTs can be user mode */
| 0x1 << 1 /* writable */
| 0x1; /* present */
}
pagedirectory_entry_t * page_table_entry = (pagedirectory_entry_t *)
(PAGE_DIR_SELF_REF | (directory_index << 12) | (table_index << 2));
*page_table_entry = (physical_address & PAGE_HIGH_MASK)
| (user_mode & 0x1) << 2
| (writable & 0x1) << 1
| 0x1; /* present */
return 1;
}
/**************************************************************************
* Returns the physical base address of a page in RAM *
* or 0 if no pages were available *
* This 'early' version can be used during segmented bootstrapping *
*************************************************************************/
u32_t mm_early_page_alloc()
{
u32_t page_address = 0;
if (page_alloc_pages_index >= 0)
{
if (page_alloc_page_index >= 0)
{
mm_page_alloc_page_t * current_pap = (mm_page_alloc_page_t *)
(page_alloc_page_numbers[page_alloc_pages_index]
+ KERNEL_OFFSET);
page_address = (*current_pap)[page_alloc_page_index];
page_alloc_page_index--;
}
else
{
/* return the page allocation page itself */
page_address = page_alloc_page_numbers[page_alloc_pages_index];
page_alloc_pages_index--;
page_alloc_page_index = NUM_PAGETABLE_ENTRIES - 1;
}
mm_num_free_pages--;
}
return page_address;
}
/**************************************************************************
* Returns the physical base address of a page in RAM *
* or 0 if no pages were available *
*************************************************************************/
u32_t mm_page_alloc()
{
u32_t page_address = 0;
if (page_alloc_pages_index >= 0)
{
if (page_alloc_page_index >= 0)
{
page_address =
page_alloc_pages[page_alloc_pages_index][page_alloc_page_index];
page_alloc_page_index--;
}
else
{
/* return the page allocation page itself */
page_address = page_alloc_page_numbers[page_alloc_pages_index];
page_alloc_pages_index--;
page_alloc_page_index = NUM_PAGETABLE_ENTRIES - 1;
}
mm_num_free_pages--;
}
return page_address;
}
void mm_print_memory_map()
{
kprintf("Bootloader provided memory map:\n");
kprintf(" Base Address Length\n");
for (int i = 0; i < mm_mmap_num_entries; i++)
{
kprintf(" 0x%016X 0x%016X (%l bytes / %l KB / %l MB)\n",
mm_mmap_entries[i].base,
mm_mmap_entries[i].length,
mm_mmap_entries[i].length,
mm_mmap_entries[i].length >> 10,
mm_mmap_entries[i].length >> 20);
}
kprintf("Used pages: %d\n", mm_num_total_pages - mm_num_free_pages);
kprintf("Free pages: %d\n", mm_num_free_pages);
}