reworking memory management... again

git-svn-id: svn://anubis/hos/trunk@94 5b3e749e-e535-0410-8002-a9bb6afbdfca
This commit is contained in:
josh 2010-01-25 23:03:23 +00:00
parent 24485b8143
commit 96ba9fabb1
2 changed files with 34 additions and 23 deletions

View File

@ -10,29 +10,42 @@
#define MM_MAX_MMAP_ENTRIES 64
static pagedirectory_entry_t * page_directory;
/* mmap management */
static mm_mem_range_t mm_mmap_entries[MM_MAX_MMAP_ENTRIES];
static int mm_mmap_num_entries = 0;
/* free/total page statistics */
static int mm_num_free_pages = 0;
static int mm_num_total_pages = 0;
gdtr_t mm_gdtr;
static u64_t * mm_gdt;
static u32_t mm_heap_base;
static u64_t * mm_gdt; /* NOT mapped into virtual address space */
static u32_t mm_heap_base; /* virtual address of start of heap */
/* physical addresses of page allocation pages to map in before the heap */
static u32_t page_alloc_page_numbers[1025]; /* supports 4GB physical memory */
static u32_t num_page_alloc_pages = 0;
static mm_page_alloc_page_t * current_page_alloc_page = NULL;
static u32_t page_alloc_page_index = 0;
static int last_page_alloc_page = -1; /* index of last valid page alloc page */
static int page_alloc_page_index = -1;
static mm_page_alloc_page_t * page_alloc_pages;
static int page_alloc_pages_index = -1;
/**************************************************************************
* Internal Functions *
*************************************************************************/
static void record_phys_page(u32_t base_address);
/**************************************************************************
* Compile-time tests *
*************************************************************************/
static void build_tests()
{
/* check that a mm_page_alloc_page_t fits exactly in a page */
BUILD_BUG_ON(sizeof(mm_page_alloc_page_t) != PAGE_SIZE);
}
/**************************************************************************
* This function is run in segmented memory before paging is in effect. *
* Record an mmap entry from the bootloader into our kernel space. *
*************************************************************************/
void mm_record_mmap_entry(mb_mmap_t * mmap)
{
@ -59,11 +72,6 @@ void mm_record_mmap_entry(mb_mmap_t * mmap)
void mm_bootstrap()
{
u32_t max_ram_address = KERNEL_PHYSICAL_ADDRESS + KERNEL_SIZE - 1;
/* this function requires a big enough initial stack to run */
BUILD_BUG_ON((STACK_INITIAL_SIZE * PAGE_SIZE) < 8000);
/* check that a mm_page_alloc_page_t fits exactly in a page */
BUILD_BUG_ON(sizeof(mm_page_alloc_page_t) != PAGE_SIZE);
if (mm_mmap_num_entries < 1)
{
@ -112,10 +120,9 @@ void mm_bootstrap()
}
}
for (unsigned int i = 0; i < num_page_alloc_pages; i++)
for (int i = 0; i <= last_page_alloc_page; i++)
{
mm_early_map(mm_heap_base, page_alloc_page_numbers[i], 0, 1);
current_page_alloc_page = (mm_page_alloc_page_t *) mm_heap_base;
/* move the heap back to after the page allocation pages */
mm_heap_base += PAGE_SIZE;
}
@ -129,7 +136,6 @@ void mm_bootstrap()
/* allocate the page directory */
u32_t page_directory_phys = mm_page_alloc();
page_directory = (pagedirectory_entry_t *) page_directory_phys;
pagedirectory_entry_t * page_dir_virt =
(pagedirectory_entry_t *) (page_directory_phys + KERNEL_OFFSET);
@ -139,7 +145,7 @@ void mm_bootstrap()
page_dir_virt[i] = 0;
}
/* now map the kernel's virtual address space into RAM */
/* map the kernel's virtual address space into RAM */
for (u32_t page_base = KERNEL_VIRTUAL_ADDRESS;
page_base < KERNEL_VIRTUAL_ADDRESS + KERNEL_SIZE;
page_base += PAGE_SIZE)
@ -148,6 +154,9 @@ void mm_bootstrap()
mm_early_map(page_base, page_base - KERNEL_OFFSET, 0, 1);
}
/* map console memory */
mm_early_map(CONSOLE_MEMORY, CONSOLE_MEMORY, 0, 1);
/* set up the global descriptor table */
u32_t gdt_base = mm_page_alloc();
mm_gdt = (u64_t *) ((u32_t) gdt_base + (u32_t) KERNEL_OFFSET);
@ -172,7 +181,7 @@ void mm_bootstrap()
: "ecx");
/* set the page directory base register */
write_cr3(page_directory);
write_cr3(page_directory_phys);
/* turn on paging */
write_cr0(read_cr0() | (1 << 31));
@ -180,9 +189,15 @@ void mm_bootstrap()
stack_bootstrap();
}
/**************************************************************************
* This function is run in segmented memory before paging is in effect. *
* Record a physical page in the page allocation pages. *
*************************************************************************/
static void record_phys_page(u32_t base_address)
{
if (page_alloc_page_index == 0)
static mm_page_alloc_page_t * current_page_alloc_page = NULL;
if (page_alloc_page_index < 0
|| page_alloc_page_index >= NUM_PAGETABLE_ENTRIES)
{
/* allocate a new page alloc page */
mm_page_alloc_page_t * old_page_alloc_page = current_page_alloc_page;
@ -192,7 +207,7 @@ static void record_phys_page(u32_t base_address)
page_alloc_page_index =
sizeof(current_page_alloc_page->pages)
/ sizeof(current_page_alloc_page->pages[0]) - 1;
page_alloc_page_numbers[num_page_alloc_pages++] = base_address;
page_alloc_page_numbers[last_page_alloc_page++] = base_address;
}
else
{

View File

@ -21,11 +21,7 @@ typedef struct
u64_t length;
} mm_mem_range_t;
typedef struct mm_page_alloc_page_s
{
u32_t pages[PAGE_SIZE / sizeof(u32_t) - 1];
mm_page_alloc_page_s * next;
} mm_page_alloc_page_t;
typedef u32_t mm_page_alloc_page_t[NUM_PAGETABLE_ENTRIES];
/* http://courses.ece.illinois.edu/ece391/references/descriptors.pdf */
/* granularity: 0: limit in bytes; 1: limit in pages */