kernel compiling with new page allocation mechanism but crashing
git-svn-id: svn://anubis/hos/trunk@91 5b3e749e-e535-0410-8002-a9bb6afbdfca
This commit is contained in:
parent
0bc304328d
commit
4816102139
@ -49,7 +49,7 @@ void isr(u8_t int_num, int_stack_t * istack)
|
||||
|
||||
void interrupts_bootstrap()
|
||||
{
|
||||
u32_t idt_base = mm_early_page_alloc();
|
||||
u32_t idt_base = mm_page_alloc();
|
||||
u64_t * idt = (u64_t *) (idt_base + KERNEL_OFFSET);
|
||||
idt[0] = MAKE_IDT_DESCRIPTOR(KERNEL_CODE_SEGMENT, isr_0, 0);
|
||||
idt[1] = MAKE_IDT_DESCRIPTOR(KERNEL_CODE_SEGMENT, isr_1, 0);
|
||||
|
127
kernel/mm/mm.cc
127
kernel/mm/mm.cc
@ -15,9 +15,20 @@ static mm_mem_range_t mm_mmap_entries[MM_MAX_MMAP_ENTRIES];
|
||||
static int mm_mmap_num_entries = 0;
|
||||
static int mm_num_free_pages = 0;
|
||||
static int mm_num_total_pages = 0;
|
||||
static u32_t * mm_free_page_ptr = NULL;
|
||||
gdtr_t mm_gdtr;
|
||||
static u64_t * mm_gdt;
|
||||
static u32_t mm_heap_base;
|
||||
|
||||
/* physical addresses of page allocation pages to map in before the heap */
|
||||
static u32_t page_alloc_pages[1025]; /* supports 4GB physical memory */
|
||||
static u32_t num_page_alloc_pages = 0;
|
||||
static mm_page_alloc_page_t * current_page_alloc_page = NULL;
|
||||
static u32_t page_alloc_page_index = 0;
|
||||
|
||||
/**************************************************************************
|
||||
* Internal Functions *
|
||||
*************************************************************************/
|
||||
static void record_phys_page(u32_t base_address);
|
||||
|
||||
/**************************************************************************
|
||||
* This function is run in segmented memory before paging is in effect. *
|
||||
@ -49,9 +60,9 @@ void mm_bootstrap()
|
||||
u32_t max_ram_address = KERNEL_PHYSICAL_ADDRESS + KERNEL_SIZE - 1;
|
||||
/* this function requires a big enough initial stack to run */
|
||||
BUILD_BUG_ON((STACK_INITIAL_SIZE * PAGE_SIZE) < 8000);
|
||||
u32_t page_alloc_pages[1025];
|
||||
u32_t num_page_alloc_pages = 0;
|
||||
mm_free_page_ptr = NULL;
|
||||
|
||||
/* check that a mm_page_alloc_page_t fits exactly in a page */
|
||||
BUILD_BUG_ON(sizeof(mm_page_alloc_page_t) != PAGE_SIZE);
|
||||
|
||||
if (mm_mmap_num_entries < 1)
|
||||
{
|
||||
@ -80,38 +91,32 @@ void mm_bootstrap()
|
||||
max_ram_address = (address_limit - 1);
|
||||
}
|
||||
|
||||
mm_heap_base = (u32_t) KERNEL_END;
|
||||
|
||||
/*
|
||||
* loop through every page in the mmap range and add
|
||||
* pages into the free page linked list
|
||||
*/
|
||||
for (u32_t * last_page = NULL;
|
||||
base_address < address_limit;
|
||||
base_address += PAGE_SIZE)
|
||||
for (; base_address < address_limit; base_address += PAGE_SIZE)
|
||||
{
|
||||
/* check to make sure the RAM page is ok */
|
||||
if ( base_address > 0 /* don't map address 0 */
|
||||
&& ( base_address + PAGE_SIZE <= KERNEL_PHYSICAL_ADDRESS
|
||||
|| base_address >= KERNEL_PHYSICAL_ADDRESS + KERNEL_SIZE ) )
|
||||
/* check to make sure the page doesn't overlap the kernel */
|
||||
if ( base_address + PAGE_SIZE <= KERNEL_PHYSICAL_ADDRESS
|
||||
|| base_address >= KERNEL_PHYSICAL_ADDRESS + KERNEL_SIZE )
|
||||
{
|
||||
/* we found a page to add to the free list */
|
||||
u32_t * page_virtual_address =
|
||||
(u32_t *)(base_address + KERNEL_OFFSET);
|
||||
*page_virtual_address = 0;
|
||||
if (last_page == NULL)
|
||||
{
|
||||
mm_free_page_ptr = (u32_t *) base_address;
|
||||
}
|
||||
else
|
||||
{
|
||||
*last_page = base_address;
|
||||
}
|
||||
last_page = page_virtual_address;
|
||||
record_phys_page(base_address);
|
||||
mm_num_free_pages++;
|
||||
}
|
||||
mm_num_total_pages++;
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < num_page_alloc_pages; i++)
|
||||
{
|
||||
mm_early_map(mm_heap_base, page_alloc_pages[i], 0, 1);
|
||||
current_page_alloc_page = (mm_page_alloc_page_t *) mm_heap_base;
|
||||
mm_heap_base += PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (mm_num_free_pages < 10)
|
||||
{
|
||||
k_early_panic("Not enough free pages of RAM!");
|
||||
@ -120,7 +125,7 @@ void mm_bootstrap()
|
||||
/* ok, now mm_page_alloc() should be functional */
|
||||
|
||||
/* allocate the page directory */
|
||||
u32_t page_directory_phys = mm_early_page_alloc();
|
||||
u32_t page_directory_phys = mm_page_alloc();
|
||||
page_directory = (pagedirectory_entry_t *) page_directory_phys;
|
||||
pagedirectory_entry_t * page_dir_virt =
|
||||
(pagedirectory_entry_t *) (page_directory_phys + KERNEL_OFFSET);
|
||||
@ -131,17 +136,6 @@ void mm_bootstrap()
|
||||
page_dir_virt[i] = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* map all of RAM into the virtual address space
|
||||
* starting at address 0x0 (except the null page)
|
||||
*/
|
||||
for (u32_t page_base = PAGE_SIZE;
|
||||
page_base < max_ram_address;
|
||||
page_base += PAGE_SIZE)
|
||||
{
|
||||
mm_early_map(page_base, page_base, 0, 1);
|
||||
}
|
||||
|
||||
/* now map the kernel's virtual address space into RAM */
|
||||
for (u32_t page_base = KERNEL_VIRTUAL_ADDRESS;
|
||||
page_base < KERNEL_VIRTUAL_ADDRESS + KERNEL_SIZE;
|
||||
@ -152,7 +146,7 @@ void mm_bootstrap()
|
||||
}
|
||||
|
||||
/* set up the global descriptor table */
|
||||
u32_t gdt_base = mm_early_page_alloc();
|
||||
u32_t gdt_base = mm_page_alloc();
|
||||
mm_gdt = (u64_t *) ((u32_t) gdt_base + (u32_t) KERNEL_OFFSET);
|
||||
mm_gdt[0] = 0x0ull;
|
||||
mm_gdt[1] = MAKE_DESCRIPTOR(0, 0xFFFFF, 1, 0, 1, 1); /* kernel code */
|
||||
@ -183,6 +177,27 @@ void mm_bootstrap()
|
||||
stack_bootstrap();
|
||||
}
|
||||
|
||||
static void record_phys_page(u32_t base_address)
|
||||
{
|
||||
if (page_alloc_page_index == 0)
|
||||
{
|
||||
/* allocate a new page alloc page */
|
||||
mm_page_alloc_page_t * old_page_alloc_page = current_page_alloc_page;
|
||||
current_page_alloc_page = (mm_page_alloc_page_t *)
|
||||
(base_address + KERNEL_OFFSET);
|
||||
current_page_alloc_page->next = old_page_alloc_page;
|
||||
page_alloc_page_index =
|
||||
sizeof(current_page_alloc_page->pages)
|
||||
/ sizeof(current_page_alloc_page->pages[0]) - 1;
|
||||
page_alloc_pages[num_page_alloc_pages++] = base_address;
|
||||
}
|
||||
else
|
||||
{
|
||||
current_page_alloc_page->pages[page_alloc_page_index] = base_address;
|
||||
page_alloc_page_index--;
|
||||
}
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
* Map virtual_address to physical_address. *
|
||||
* Both addresses should be page-aligned. *
|
||||
@ -199,7 +214,7 @@ int mm_early_map(u32_t virtual_address, u32_t physical_address,
|
||||
if (page_dir[directory_index] == 0)
|
||||
{
|
||||
/* allocate a new page table */
|
||||
u32_t page_table_address = mm_early_page_alloc();
|
||||
u32_t page_table_address = mm_page_alloc();
|
||||
if (page_table_address == 0)
|
||||
{
|
||||
return 0;
|
||||
@ -258,25 +273,6 @@ int mm_map(u32_t virtual_address, u32_t physical_address,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
* Returns the physical base address of a page in RAM *
|
||||
* or 0 if no pages were available *
|
||||
* This 'early' version is to be called during segmented bootstrapping *
|
||||
*************************************************************************/
|
||||
u32_t mm_early_page_alloc()
|
||||
{
|
||||
u32_t page_address = 0;
|
||||
if (mm_free_page_ptr != NULL)
|
||||
{
|
||||
u32_t * page_ptr = mm_free_page_ptr;
|
||||
page_ptr = (u32_t *) ((u32_t)page_ptr + (u32_t)KERNEL_OFFSET);
|
||||
page_address = (u32_t) mm_free_page_ptr;
|
||||
mm_free_page_ptr = (u32_t *) *page_ptr;
|
||||
mm_num_free_pages--;
|
||||
}
|
||||
return page_address;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
* Returns the physical base address of a page in RAM *
|
||||
* or 0 if no pages were available *
|
||||
@ -284,11 +280,20 @@ u32_t mm_early_page_alloc()
|
||||
u32_t mm_page_alloc()
|
||||
{
|
||||
u32_t page_address = 0;
|
||||
if (mm_free_page_ptr != NULL)
|
||||
if (current_page_alloc_page != NULL)
|
||||
{
|
||||
u32_t * page_ptr = mm_free_page_ptr;
|
||||
page_address = (u32_t) mm_free_page_ptr;
|
||||
mm_free_page_ptr = (u32_t *) *page_ptr;
|
||||
if (page_alloc_page_index == PAGE_SIZE / sizeof(u32_t) - 1)
|
||||
{
|
||||
page_address = ((u32_t) current_page_alloc_page) - KERNEL_OFFSET;
|
||||
current_page_alloc_page = current_page_alloc_page->next;
|
||||
page_alloc_page_index = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
page_alloc_page_index++;
|
||||
page_address =
|
||||
current_page_alloc_page->pages[page_alloc_page_index];
|
||||
}
|
||||
mm_num_free_pages--;
|
||||
}
|
||||
return page_address;
|
||||
|
@ -21,6 +21,12 @@ typedef struct
|
||||
u64_t length;
|
||||
} mm_mem_range_t;
|
||||
|
||||
typedef struct mm_page_alloc_page_s
|
||||
{
|
||||
u32_t pages[PAGE_SIZE / sizeof(u32_t) - 1];
|
||||
mm_page_alloc_page_s * next;
|
||||
} mm_page_alloc_page_t;
|
||||
|
||||
/* http://courses.ece.illinois.edu/ece391/references/descriptors.pdf */
|
||||
/* granularity: 0: limit in bytes; 1: limit in pages */
|
||||
/* dpl: 0: system mode; 3: user mode */
|
||||
|
@ -12,7 +12,7 @@ void stack_bootstrap()
|
||||
u32_t stack_page_virt = KERNEL_STACK_TOP - PAGE_SIZE;
|
||||
for (int i = 0; i < STACK_INITIAL_SIZE; i++)
|
||||
{
|
||||
u32_t stack_page_phys = mm_early_page_alloc();
|
||||
u32_t stack_page_phys = mm_page_alloc();
|
||||
mm_early_map(stack_page_virt, stack_page_phys, 0, 1);
|
||||
stack_page_virt -= PAGE_SIZE;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user