compiling again after updating mm_early_map() and mm_map() for new page allocation scheme

git-svn-id: svn://anubis/hos/trunk@96 5b3e749e-e535-0410-8002-a9bb6afbdfca
This commit is contained in:
josh 2010-01-27 05:06:12 +00:00
parent 08491bace9
commit 1027b8c346
6 changed files with 59 additions and 43 deletions

View File

@ -7,6 +7,7 @@
#include "sys/cpu.h"
#include "k_early_panic.h"
#include "mm/mm.h"
#include "mm/stack.h"
#include "lang/kio.h"
#include "isr/interrupts.h"

View File

@ -22,8 +22,6 @@
#define KERNEL_VIRTUAL_ADDRESS ((u32_t)KERNEL_CODE)
#define KERNEL_SIZE ((u32_t)(KERNEL_END - KERNEL_CODE))
#define KERNEL_STACK_TOP 0x20000000
#define KERNEL_TIMER_FREQ 1000
#define BUILD_BUG_ON(condition) \

View File

@ -21,6 +21,7 @@ static int mm_num_total_pages = 0;
gdtr_t mm_gdtr;
static u64_t * mm_gdt; /* NOT mapped into virtual address space */
static u32_t mm_heap_base; /* virtual address of start of heap */
static pagedirectory_t * early_page_directory_ptr;
/* physical addresses of page allocation pages to map in before the heap */
static u32_t page_alloc_page_numbers[1025]; /* supports 4GB physical memory */
@ -120,30 +121,37 @@ void mm_bootstrap()
}
}
for (int i = 0; i <= last_page_alloc_page; i++)
{
mm_early_map(&page_alloc_pages[i], page_alloc_page_numbers[i], 0, 1);
}
/* move the heap back to after the page allocation pages */
mm_heap_base = &page_alloc_pages[last_page_alloc_page + 1];
if (mm_num_free_pages < 10)
{
k_early_panic("Not enough free pages of RAM!");
}
/* ok, now mm_page_alloc() should be functional */
/* ok, now mm_early_page_alloc() should be functional */
/* move the heap back to after the page allocation pages */
mm_heap_base = (u32_t) &page_alloc_pages[last_page_alloc_page + 1];
/* allocate the page directory */
u32_t page_directory_phys = mm_page_alloc();
pagedirectory_entry_t * page_dir_virt =
(pagedirectory_entry_t *) (page_directory_phys + KERNEL_OFFSET);
u32_t page_directory_phys = mm_early_page_alloc();
early_page_directory_ptr = (pagedirectory_t *)
(page_directory_phys + KERNEL_OFFSET);
/* Clear the page directory */
for (unsigned int i = 0; i < NUM_PAGETABLE_ENTRIES; i++)
{
page_dir_virt[i] = 0;
(*early_page_directory_ptr)[i] = 0;
}
/* ok, now mm_early_map() should be functional */
/* map in the page directory itself so we can modify it once paging is on */
mm_early_map(PAGE_DIR_SELF_REF, page_directory_phys, 0, 1);
/* map in the physical page allocator pages */
for (int i = 0; i <= last_page_alloc_page; i++)
{
mm_early_map((u32_t) &page_alloc_pages[i],
page_alloc_page_numbers[i], 0, 1);
}
/* map the kernel's virtual address space into RAM */
@ -159,7 +167,7 @@ void mm_bootstrap()
mm_early_map(CONSOLE_MEMORY, CONSOLE_MEMORY, 0, 1);
/* set up the global descriptor table */
u32_t gdt_base = mm_page_alloc();
u32_t gdt_base = mm_early_page_alloc();
mm_gdt = (u64_t *) ((u32_t) gdt_base + (u32_t) KERNEL_OFFSET);
mm_gdt[0] = 0x0ull;
mm_gdt[1] = MAKE_DESCRIPTOR(0, 0xFFFFF, 1, 0, 1, 1); /* kernel code */
@ -205,7 +213,8 @@ static void record_phys_page(u32_t base_address)
}
else
{
page_alloc_page_t * current_page_alloc_page = (page_alloc_page_t *)
mm_page_alloc_page_t * current_page_alloc_page =
(mm_page_alloc_page_t *)
(page_alloc_page_numbers[page_alloc_pages_index] + KERNEL_OFFSET);
page_alloc_page_index++;
(*current_page_alloc_page)[page_alloc_page_index] = base_address;
@ -226,26 +235,24 @@ int mm_early_map(u32_t virtual_address, u32_t physical_address,
{
u32_t directory_index = (virtual_address >> 22) & 0x3FF;
u32_t table_index = (virtual_address >> 12) & 0x3FF;
pagedirectory_entry_t * page_dir = page_directory;
page_dir = (pagedirectory_entry_t *)((u32_t)page_dir + KERNEL_OFFSET);
if (page_dir[directory_index] == 0)
if ((*early_page_directory_ptr)[directory_index] == 0)
{
/* allocate a new page table */
u32_t page_table_address = mm_page_alloc();
if (page_table_address == 0)
u32_t page_table_phys = mm_early_page_alloc();
if (page_table_phys == 0)
{
return 0;
}
page_dir[directory_index] = page_table_address
(*early_page_directory_ptr)[directory_index] = page_table_phys
| 0x1 << 2 /* PTs can be user mode */
| 0x1 << 1 /* writable */
| 0x1; /* present */
}
u32_t page_table_address = page_dir[directory_index] & PAGE_HIGH_MASK;
u32_t page_table_phys =
(*early_page_directory_ptr)[directory_index] & PAGE_HIGH_MASK;
u32_t * page_table = (u32_t *) page_table_address;
page_table = (u32_t *)((u32_t)page_table + (u32_t)KERNEL_OFFSET);
u32_t * page_table = (u32_t *) (page_table_phys + KERNEL_OFFSET);
page_table[table_index] = (physical_address & PAGE_HIGH_MASK)
| (user_mode & 0x1) << 2
@ -264,26 +271,28 @@ int mm_map(u32_t virtual_address, u32_t physical_address,
{
u32_t directory_index = (virtual_address >> 22) & 0x3FF;
u32_t table_index = (virtual_address >> 12) & 0x3FF;
pagedirectory_entry_t * page_dir = page_directory;
pagedirectory_entry_t * page_dir_entry = (pagedirectory_entry_t *)
(PAGE_DIR_SELF_REF | (PAGE_DIR_SELF_REF_INDEX << 12)
| (directory_index << 2));
if (page_dir[directory_index] == 0)
if (*page_dir_entry == 0)
{
/* allocate a new page table */
u32_t page_table_address = mm_page_alloc();
if (page_table_address == 0)
u32_t page_table_phys = mm_page_alloc();
if (page_table_phys == 0)
{
return 0;
}
page_dir[directory_index] = page_table_address
*page_dir_entry = page_table_phys
| 0x1 << 2 /* PTs can be user mode */
| 0x1 << 1 /* writable */
| 0x1; /* present */
}
u32_t page_table_address = page_dir[directory_index] & PAGE_HIGH_MASK;
u32_t * page_table = (u32_t *) page_table_address;
pagedirectory_entry_t * page_table_entry = (pagedirectory_entry_t *)
(PAGE_DIR_SELF_REF | (directory_index << 12) | (table_index << 2));
page_table[table_index] = (physical_address & PAGE_HIGH_MASK)
*page_table_entry = (physical_address & PAGE_HIGH_MASK)
| (user_mode & 0x1) << 2
| (writable & 0x1) << 1
| 0x1; /* present */
@ -294,6 +303,7 @@ int mm_map(u32_t virtual_address, u32_t physical_address,
/**************************************************************************
* Returns the physical base address of a page in RAM *
* or 0 if no pages were available *
* This 'early' version can be used during segmented bootstrapping *
*************************************************************************/
u32_t mm_early_page_alloc()
{
@ -302,7 +312,7 @@ u32_t mm_early_page_alloc()
{
if (page_alloc_page_index >= 0)
{
page_alloc_page_t * current_pap = (page_alloc_page_t *)
mm_page_alloc_page_t * current_pap = (mm_page_alloc_page_t *)
(page_alloc_page_numbers[page_alloc_pages_index]
+ KERNEL_OFFSET);
page_address = (*current_pap)[page_alloc_page_index];

View File

@ -13,6 +13,9 @@ typedef u32_t pagedirectory_entry_t;
#define NUM_PAGETABLE_ENTRIES (PAGE_SIZE / sizeof(pagedirectory_entry_t))
#define PAGE_DIR_SELF_REF (0x20000000 - (PAGE_SIZE * NUM_PAGETABLE_ENTRIES))
#define PAGE_DIR_SELF_REF_INDEX ((PAGE_DIR_SELF_REF >> 22) & 0x3FF)
typedef pagedirectory_entry_t pagedirectory_t[NUM_PAGETABLE_ENTRIES];
typedef struct

View File

@ -12,7 +12,7 @@ void stack_bootstrap()
u32_t stack_page_virt = KERNEL_STACK_TOP - PAGE_SIZE;
for (int i = 0; i < STACK_INITIAL_SIZE; i++)
{
u32_t stack_page_phys = mm_page_alloc();
u32_t stack_page_phys = mm_early_page_alloc();
mm_early_map(stack_page_virt, stack_page_phys, 0, 1);
stack_page_virt -= PAGE_SIZE;
}

View File

@ -2,6 +2,10 @@
#ifndef STACK_H
#define STACK_H
#include "mm/mm.h"
#define KERNEL_STACK_TOP PAGE_DIR_SELF_REF
#define STACK_INITIAL_SIZE 2 /* number of initial stack pages */
void stack_bootstrap();