/** * HURL, the HOS Unreal Region Locator. * * HURL provides virtual memory management for HULK. */ module hulk.hurl; public import hulk.pagetable; import hulk.cpu; import hulk.hippo; import hulk.memory; import hulk.klog; import hulk.header; import hulk.linker_addresses; /** HULK virtual base address. */ enum ulong HULK_VIRTUAL_BASE_ADDRESS = 0xFFFF_8000_0000_0000u; /** HULK virtual stack top address. */ enum ulong HULK_VIRTUAL_STACK_TOP_ADDRESS = 0xFFFF_A000_0000_0000u; /** HULK virtual framebuffer address. */ enum ulong HULK_VIRTUAL_FRAMEBUFFER_ADDRESS = 0xFFFF_A000_0000_0000u; struct hurl { private static __gshared PageTable * m_pt_base; /** * Initialize HURL. * * @param bootinfo HULK boot information structure. */ public static void initialize(HulkHeader * header) { /* Turn on NXE (no execute enable) flag in the EFER MSR. */ wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE); m_pt_base = allocate_pt(); /* Identity map all physical RAM. */ map_range(0u, 0u, hippo.physical_address_limit, PT_WRITABLE | PT_NO_EXECUTE); ulong phys_address = header.bootinfo.hulk_phys; ulong virt_address = HULK_VIRTUAL_BASE_ADDRESS; /* Map HULK header region. */ map_range(virt_address, phys_address, LinkerAddresses.hulk_header_size, PT_NO_EXECUTE); phys_address += LinkerAddresses.hulk_header_size; virt_address += LinkerAddresses.hulk_header_size; /* Map HULK text region. */ map_range(virt_address, phys_address, LinkerAddresses.hulk_text_size, 0u); phys_address += LinkerAddresses.hulk_text_size; virt_address += LinkerAddresses.hulk_text_size; /* Map HULK rodata region. */ map_range(virt_address, phys_address, LinkerAddresses.hulk_rodata_size, PT_NO_EXECUTE); phys_address += LinkerAddresses.hulk_rodata_size; virt_address += LinkerAddresses.hulk_rodata_size; /* Map HULK data region. */ map_range(virt_address, phys_address, LinkerAddresses.hulk_data_size, PT_WRITABLE | PT_NO_EXECUTE); virt_address += LinkerAddresses.hulk_data_size; /* Map HULK BSS region. */ map_range(virt_address, header.bootinfo.bss_phys, LinkerAddresses.hulk_bss_size, PT_WRITABLE | PT_NO_EXECUTE); /* Map HULK stack. */ map_range(HULK_VIRTUAL_STACK_TOP_ADDRESS - header.stack_size, header.bootinfo.stack_phys, header.stack_size, PT_WRITABLE | PT_NO_EXECUTE); /* Map HULK framebuffer. */ map_range(HULK_VIRTUAL_FRAMEBUFFER_ADDRESS, cast(ulong)header.bootinfo.fb.buffer, header.bootinfo.fb.height * header.bootinfo.fb.stride * 4u, PT_WRITABLE | PT_NO_EXECUTE); write_cr3(cast(ulong)m_pt_base); } public static void map(ulong virtual, ulong physical, ulong flags) { PageTable * pt = m_pt_base; for (size_t level = 0; level < 4u; level++) { PageTableEntry * ppte = pt.entry(virtual, level); if (level < 3u) { PageTableEntry pte = *ppte; if (pte.present) { pt = pte.follow(); } else { PageTable * next_pt = allocate_pt(); *ppte = PageTableEntry(next_pt, PT_WRITABLE | PT_PRESENT); pt = next_pt; } } else { *ppte = PageTableEntry(physical, flags | PT_PRESENT); } } } public static void map_range(size_t virtual, size_t physical, size_t length, ulong flags) { size_t end = virtual + length; while (virtual < end) { map(virtual, physical, flags); virtual += PAGE_SIZE; physical += PAGE_SIZE; } } public static void identity_map_range(size_t address, size_t length, ulong flags) { size_t end = address + length; for (size_t page = address & ~0xFFFu; page < end; page += PAGE_SIZE) { map(page, page, flags); } } public static void debug_lookup(void * address) { klog.writefln("Debugging page table lookup of 0x%x", address); PageTable * pt = m_pt_base; for (size_t level = 0; level < 4u; level++) { PageTableEntry pte = *pt.entry(address, level); klog.writefln("Level %u, index %u, entry = 0x%x", level, pt.index(address, level), pte); if (pte.present) { pt = pte.follow(); } else { break; } if (pte.huge) { break; } } } private static PageTable * allocate_pt() { PageTable * pt = cast(PageTable *)hippo.allocate_page(); memset64(pt, 0u, PAGE_SIZE / 8u); return pt; } }