Reclaim bootloader page table pages
This commit is contained in:
parent
b3fb599b25
commit
71c30fa932
102
src/hulk/hippo.d
102
src/hulk/hippo.d
@ -1,20 +1,18 @@
|
||||
/**
|
||||
* HIPPO, the HOS In-place Physical Page Organizer.
|
||||
*
|
||||
* HIPPO maintains a list of free physical pages "in place", meaning that the
|
||||
* available page itself is used as the linked list entry so separate memory
|
||||
* is not needed to keep track of the available pages.
|
||||
*/
|
||||
module hulk.hippo;
|
||||
|
||||
import hulk.header;
|
||||
import hulk.bootinfo;
|
||||
import hulk.klog;
|
||||
import hulk.linker_addresses;
|
||||
import hulk.pagetable;
|
||||
|
||||
struct hippo
|
||||
{
|
||||
/**
|
||||
* Linked list node entry for a physical page.
|
||||
*/
|
||||
private struct PhysicalPage
|
||||
private static struct PhysicalPage
|
||||
{
|
||||
PhysicalPage * next;
|
||||
}
|
||||
@ -27,96 +25,25 @@ struct hippo
|
||||
/**
|
||||
* Number of free physical pages.
|
||||
*/
|
||||
private static __gshared size_t n_free_pages;
|
||||
|
||||
/**
|
||||
* Physical address limit.
|
||||
*/
|
||||
public static __gshared size_t physical_address_limit;
|
||||
|
||||
/**
|
||||
* Initialize HIPPO.
|
||||
*
|
||||
* Ok, what we do here is iterate through all of the memory map regions in
|
||||
* the bootinfo memory_map array, and create a linked list of all free
|
||||
* physical pages that are available. Within the available memory regions,
|
||||
* we have to watch out for the following items:
|
||||
* 1) HULK binary (text + data)
|
||||
* - This includes the HULK header and bootinfo structures
|
||||
* 2) HULK bss
|
||||
* 3) HULK stack
|
||||
* 4) Framebuffer
|
||||
* 5) Page tables
|
||||
*
|
||||
* @param bootinfo HULK boot information structure.
|
||||
*/
|
||||
public static void initialize(HulkHeader * header)
|
||||
{
|
||||
size_t usable_memory;
|
||||
ulong[2][4] reserved = [
|
||||
[header.bootinfo.hulk_phys, LinkerAddresses.hulk_binary_size],
|
||||
[header.bootinfo.bss_phys, LinkerAddresses.hulk_bss_size],
|
||||
[header.bootinfo.stack_phys, header.stack_size],
|
||||
[cast(ulong)header.bootinfo.fb.buffer, header.bootinfo.fb.height * header.bootinfo.fb.stride * 4u],
|
||||
];
|
||||
for (size_t ri = 0u; ri < reserved.length; ri++)
|
||||
{
|
||||
reserved[ri][1] += reserved[ri][0];
|
||||
}
|
||||
for (size_t bii = 0u; bii < header.bootinfo.memory_map_count; bii++)
|
||||
{
|
||||
if ((header.bootinfo.memory_map[bii].type == BootInfo.MemoryRegion.Type.Bootloader) ||
|
||||
(header.bootinfo.memory_map[bii].type == BootInfo.MemoryRegion.Type.Conventional))
|
||||
{
|
||||
ulong phys = header.bootinfo.memory_map[bii].base;
|
||||
ulong phys_end = phys + header.bootinfo.memory_map[bii].size;
|
||||
if (phys_end > physical_address_limit)
|
||||
{
|
||||
physical_address_limit = phys_end;
|
||||
}
|
||||
usable_memory += header.bootinfo.memory_map[bii].size;
|
||||
while (phys < phys_end)
|
||||
{
|
||||
bool is_reserved = false;
|
||||
for (size_t ri = 0u; ri < reserved.length; ri++)
|
||||
{
|
||||
if ((reserved[ri][0] <= phys) && (phys < reserved[ri][1]))
|
||||
{
|
||||
is_reserved = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!is_reserved)
|
||||
{
|
||||
free_page(phys);
|
||||
}
|
||||
phys += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
size_t usable_kb = usable_memory >> 10u;
|
||||
size_t usable_mb = usable_kb >> 10u;
|
||||
size_t frac_mb = ((1000u * (usable_kb & 0x3FFu)) + 512u) >> 10u;
|
||||
klog.writefln("Found %u.%03uMB of usable RAM", usable_mb, frac_mb);
|
||||
}
|
||||
private static __gshared size_t m_n_free_pages;
|
||||
|
||||
/**
|
||||
* Free a physical page.
|
||||
*
|
||||
* @param phys Physical page address.
|
||||
*/
|
||||
private static void free_page(ulong phys)
|
||||
public static void free_page(T)(T phys)
|
||||
{
|
||||
PhysicalPage * pp = cast(PhysicalPage *)phys;
|
||||
pp.next = free_pages;
|
||||
free_pages = pp;
|
||||
n_free_pages++;
|
||||
m_n_free_pages++;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate a physical page.
|
||||
*
|
||||
* @return Page address, or null if none available.
|
||||
* @return Page address, or null if no pages are available.
|
||||
*/
|
||||
public static void * allocate_page()
|
||||
{
|
||||
@ -125,7 +52,18 @@ struct hippo
|
||||
{
|
||||
pp = free_pages;
|
||||
free_pages = free_pages.next;
|
||||
m_n_free_pages--;
|
||||
}
|
||||
return cast(void *)pp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of free pages.
|
||||
*
|
||||
* @return The number of free pages.
|
||||
*/
|
||||
public @property size_t n_free_pages() const
|
||||
{
|
||||
return m_n_free_pages;
|
||||
}
|
||||
}
|
||||
|
@ -64,7 +64,6 @@ void hulk_start()
|
||||
|
||||
klog.writefln("Welcome to HULK, the HOS UltraLight Kernel!");
|
||||
|
||||
hippo.initialize(&hulk_header);
|
||||
hurl.initialize(&hulk_header);
|
||||
pci.initialize();
|
||||
pic.initialize();
|
||||
|
135
src/hulk/hurl.d
135
src/hulk/hurl.d
@ -10,6 +10,7 @@ import hulk.cpu;
|
||||
import hulk.hippo;
|
||||
import hulk.memory;
|
||||
import hulk.klog;
|
||||
import hulk.bootinfo;
|
||||
import hulk.header;
|
||||
import hulk.linker_addresses;
|
||||
|
||||
@ -24,20 +25,24 @@ enum ulong HULK_VIRTUAL_FRAMEBUFFER_ADDRESS = 0xFFFF_A000_0000_0000u;
|
||||
|
||||
struct hurl
|
||||
{
|
||||
/**
|
||||
* Pointer to the base page table.
|
||||
*/
|
||||
private static __gshared PageTable * m_pt_base;
|
||||
|
||||
/**
|
||||
* Initialize HURL.
|
||||
* Build HULK page tables.
|
||||
*
|
||||
* @param bootinfo HULK boot information structure.
|
||||
* @param physical_address_limit
|
||||
* Limit of physical memory.
|
||||
*/
|
||||
public static void initialize(HulkHeader * header)
|
||||
private static void build_page_tables(HulkHeader * header, size_t physical_address_limit)
|
||||
{
|
||||
m_pt_base = allocate_pt();
|
||||
/* Identity map all physical RAM. */
|
||||
map_range(0u,
|
||||
0u,
|
||||
hippo.physical_address_limit,
|
||||
physical_address_limit,
|
||||
PT_WRITABLE | PT_NO_EXECUTE);
|
||||
ulong phys_address = header.bootinfo.hulk_phys;
|
||||
ulong virt_address = HULK_VIRTUAL_BASE_ADDRESS;
|
||||
@ -86,7 +91,125 @@ struct hurl
|
||||
write_cr3(cast(ulong)m_pt_base);
|
||||
}
|
||||
|
||||
public static void map(ulong virtual, ulong physical, ulong flags)
|
||||
/**
|
||||
* Reclaim the pages that were used for the bootloader page tables.
|
||||
*
|
||||
* @param pt
|
||||
* The page table to reclaim pages from.
|
||||
* @param level
|
||||
* Page table level (internally used while recursing the page tables).
|
||||
*
|
||||
* @return Number of pages reclaimed.
|
||||
*/
|
||||
private static size_t reclaim_bootloader_page_table_pages(PageTable * pt, size_t level = 0u)
|
||||
{
|
||||
map(pt, pt, PT_WRITABLE | PT_NO_EXECUTE);
|
||||
size_t reclaimed_pages;
|
||||
for (size_t i = 0u; i < PageTable.N_ENTRIES; i++)
|
||||
{
|
||||
PageTableEntry pte = (*pt)[i];
|
||||
if (pte.present && (level < 2u) && !pte.huge)
|
||||
{
|
||||
/* For the first two levels of page tables, first
|
||||
* recurse and free pages from the lower level page
|
||||
* tables before reclaiming this entry. */
|
||||
reclaimed_pages += reclaim_bootloader_page_table_pages(pte.follow(), level + 1u);
|
||||
}
|
||||
}
|
||||
hippo.free_page(pt);
|
||||
reclaimed_pages++;
|
||||
return reclaimed_pages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize HURL.
|
||||
*
|
||||
* @param bootinfo HULK boot information structure.
|
||||
*/
|
||||
public static void initialize(HulkHeader * header)
|
||||
{
|
||||
PageTable * bootloader_pt_base = cast(PageTable *)read_cr3();
|
||||
|
||||
/*
|
||||
* Ok, what we do here is iterate through all of the memory map regions
|
||||
* in the bootinfo memory_map array and mark each page that is not in
|
||||
* use in a reserved range as available. Within the available memory
|
||||
* regions, we have to watch out for the following items:
|
||||
* 1) HULK binary (header + text + rodata + data)
|
||||
* 2) Framebuffer
|
||||
* In addition to these ranges, there are also pages for the following
|
||||
* purposes that are not represented in the memory map from the
|
||||
* bootloader:
|
||||
* 3) HULK BSS
|
||||
* 4) HULK stack
|
||||
* 5) bootloader page table pages
|
||||
*/
|
||||
size_t usable_memory;
|
||||
size_t physical_address_limit;
|
||||
ulong[2][2] reserved = [
|
||||
[header.bootinfo.hulk_phys, LinkerAddresses.hulk_binary_size],
|
||||
[cast(ulong)header.bootinfo.fb.buffer, header.bootinfo.fb.height * header.bootinfo.fb.stride * 4u],
|
||||
];
|
||||
for (size_t ri = 0u; ri < reserved.length; ri++)
|
||||
{
|
||||
reserved[ri][1] += reserved[ri][0];
|
||||
}
|
||||
for (size_t bii = 0u; bii < header.bootinfo.memory_map_count; bii++)
|
||||
{
|
||||
if ((header.bootinfo.memory_map[bii].type == BootInfo.MemoryRegion.Type.Bootloader) ||
|
||||
(header.bootinfo.memory_map[bii].type == BootInfo.MemoryRegion.Type.Conventional))
|
||||
{
|
||||
ulong phys = header.bootinfo.memory_map[bii].base;
|
||||
ulong phys_end = phys + header.bootinfo.memory_map[bii].size;
|
||||
if (phys_end > physical_address_limit)
|
||||
{
|
||||
physical_address_limit = phys_end;
|
||||
}
|
||||
usable_memory += header.bootinfo.memory_map[bii].size;
|
||||
while (phys < phys_end)
|
||||
{
|
||||
bool is_reserved = false;
|
||||
for (size_t ri = 0u; ri < reserved.length; ri++)
|
||||
{
|
||||
if ((reserved[ri][0] <= phys) && (phys < reserved[ri][1]))
|
||||
{
|
||||
is_reserved = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!is_reserved)
|
||||
{
|
||||
hippo.free_page(phys);
|
||||
}
|
||||
phys += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that we have available physical pages to allocate from, we can
|
||||
* build new page tables to replace the bootloader page tables.
|
||||
*/
|
||||
build_page_tables(header, physical_address_limit);
|
||||
|
||||
/*
|
||||
* After we have switched to the newly constructed page tables, we can
|
||||
* iterate through and free the bootloader page tables. They are most
|
||||
* likely already mapped because we just identity mapped every page up
|
||||
* to the physical_address_limit determined above. But just in case the
|
||||
* bootloader used the last pages of RAM for the page table pages, we
|
||||
* will make sure to map them as we traverse them.
|
||||
*/
|
||||
size_t reclaimed_bootloader_page_table_memory = reclaim_bootloader_page_table_pages(bootloader_pt_base) << 10u;
|
||||
usable_memory += reclaimed_bootloader_page_table_memory;
|
||||
|
||||
usable_memory += LinkerAddresses.hulk_bss_size;
|
||||
usable_memory += header.stack_size;
|
||||
klog.writefln("Usable memory: %uKB", usable_memory >> 10u);
|
||||
klog.writefln("Kernel size: %uKB", (LinkerAddresses.hulk_binary_size + LinkerAddresses.hulk_bss_size + header.stack_size) >> 10u);
|
||||
}
|
||||
|
||||
public static void map(T, U)(T virtual, U physical, ulong flags)
|
||||
{
|
||||
PageTable * pt = m_pt_base;
|
||||
for (size_t level = 0; level < 4u; level++)
|
||||
@ -108,7 +231,7 @@ struct hurl
|
||||
}
|
||||
else
|
||||
{
|
||||
*ppte = PageTableEntry(physical, flags | PT_PRESENT);
|
||||
*ppte = PageTableEntry(cast(ulong)physical, flags | PT_PRESENT);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -136,8 +136,11 @@ static assert(PageTableEntry.sizeof == 8u);
|
||||
*/
|
||||
struct PageTable
|
||||
{
|
||||
/** Number of page table entries in a page table. */
|
||||
enum size_t N_ENTRIES = 512u;
|
||||
|
||||
/** Page table entries. */
|
||||
private PageTableEntry[512] entries;
|
||||
private PageTableEntry[N_ENTRIES] entries;
|
||||
|
||||
/**
|
||||
* Access the PageTableEntry for the given address and page table level.
|
||||
|
Loading…
x
Reference in New Issue
Block a user