HIPPO: store memory regions in addition to individual free pages

This commit is contained in:
Josh Holtrop 2025-02-21 12:31:03 -05:00
parent 30078417b6
commit b2497d1ee0
2 changed files with 141 additions and 24 deletions

View File

@ -7,21 +7,38 @@
*/
module hulk.hippo;
import hulk.hurl;
import hulk.klog;
struct Hippo
{
/**
* Linked list node entry for a physical page.
* Linked list node entry for a single page.
*/
private static struct PhysicalPage
{
PhysicalPage * next;
}
/**
* Linked list node entry for a region.
*/
private static struct Region
{
ulong size;
Region * next;
}
/**
* Linked list of free physical pages.
*/
private static __gshared PhysicalPage * free_pages;
/**
* Linked list of free regions.
*/
private static __gshared Region * regions;
/**
* Number of free physical pages.
*/
@ -48,22 +65,82 @@ struct Hippo
public static void * allocate_page()
{
PhysicalPage * pp;
if (free_pages != null)
if (free_pages !is null)
{
pp = free_pages;
free_pages = free_pages.next;
m_n_free_pages--;
}
else if (regions !is null)
{
pp = cast(PhysicalPage *)(cast(ulong)regions + regions.size - PAGE_SIZE);
regions.size -= PAGE_SIZE;
if (regions.size == 0)
{
regions = regions.next;
}
m_n_free_pages--;
}
return cast(void *)pp;
}
/**
* Free a physical memory region.
*/
public static void free_region(T)(T start, ulong size)
{
if (regions is null)
{
/* The free regions list is empty. Append this new region. */
regions = cast(Region *)start;
regions.size = size;
regions.next = null;
}
else if (size < regions.size)
{
/* The new region is smaller than the first one in the list.
* Add the new region to the beginning of the list. */
Region * new_region = cast(Region *)start;
new_region.size = size;
new_region.next = regions;
regions = new_region;
}
else
{
/* Find the last free region that is smaller than this one. */
Region * region = regions;
while (region.next !is null && region.next.size < size)
{
region = region.next;
}
/* Add the new region after this entry. */
Region * new_region = cast(Region *)start;
new_region.size = size;
new_region.next = region.next;
region.next = new_region;
}
m_n_free_pages += size >> 12;
}
/**
* Get the number of free pages.
*
* @return The number of free pages.
*/
public @property size_t n_free_pages() const
public static @property size_t n_free_pages()
{
return m_n_free_pages;
}
public static void dump_regions()
{
Region * region = regions;
while (region !is null)
{
Klog.writefln("HIPPO free region: %p %uKB", region, region.size >> 10);
region = region.next;
}
}
}

View File

@ -157,13 +157,14 @@ struct Hurl
size_t usable_memory;
size_t physical_address_limit;
const(size_t) fb_size = round_up_power_2(header.bootinfo.fb.height * header.bootinfo.fb.stride * 4u, PAGE_SIZE);
ulong[2][2] reserved = [
ulong[2][2] reserved_regions = [
[header.bootinfo.hulk_phys, LinkerAddresses.hulk_binary_size],
[cast(ulong)header.bootinfo.fb.buffer, fb_size],
];
for (size_t ri = 0u; ri < reserved.length; ri++)
for (size_t ri = 0u; ri < reserved_regions.length; ri++)
{
reserved[ri][1] += reserved[ri][0];
Klog.writefln("Reserved region %p %uKB", reserved_regions[ri][0], reserved_regions[ri][1] >> 10);
reserved_regions[ri][1] += reserved_regions[ri][0];
}
for (size_t bii = 0u; bii < header.bootinfo.memory_map_count; bii++)
{
@ -171,29 +172,14 @@ struct Hurl
(header.bootinfo.memory_map[bii].type == BootInfo.MemoryRegion.Type.Conventional))
{
ulong phys = header.bootinfo.memory_map[bii].base;
ulong phys_end = phys + header.bootinfo.memory_map[bii].size;
ulong size = header.bootinfo.memory_map[bii].size;
ulong phys_end = phys + size;
if (phys_end > physical_address_limit)
{
physical_address_limit = phys_end;
}
usable_memory += header.bootinfo.memory_map[bii].size;
while (phys < phys_end)
{
bool is_reserved = false;
for (size_t ri = 0u; ri < reserved.length; ri++)
{
if ((reserved[ri][0] <= phys) && (phys < reserved[ri][1]))
{
is_reserved = true;
break;
}
}
if (!is_reserved)
{
Hippo.free_page(phys);
}
phys += PAGE_SIZE;
}
add_region(phys, size, reserved_regions);
}
}
@ -223,6 +209,60 @@ struct Hurl
usable_memory += fb_size;
Klog.writefln("Usable memory: %uKB", usable_memory >> 10u);
Klog.writefln("Kernel size: %uKB", (LinkerAddresses.hulk_binary_size + LinkerAddresses.hulk_bss_size + header.stack_size) >> 10u);
Klog.writefln("Free pages: %u", Hippo.n_free_pages);
// Hippo.dump_regions();
}
/**
* Add a region of available physical memory.
*
* @param start Start address of region.
* @param size Size of region.
* @param reserved_regions List of reserved regions to exclude.
*/
private static void add_region(ulong start, ulong size, ulong[2][] reserved_regions)
{
ulong end = start + size;
bool split;
/* Skip empty regions. */
if (size == 0)
{
return;
}
for (size_t rri = 0; rri < reserved_regions.length; rri++)
{
/* Skip this region if it is entirely within a reserved region. */
if (reserved_regions[rri][0] <= start && reserved_regions[rri][1] >= end)
{
return;
}
/* Check if the region is partly reserved but the beginning is free. */
if (reserved_regions[rri][0] > start && reserved_regions[rri][0] < end)
{
split = true;
add_region(start, reserved_regions[rri][0] - start, reserved_regions);
}
/* Check if the region is partly reserved but the end is free. */
if (reserved_regions[rri][1] > start && reserved_regions[rri][1] < end)
{
split = true;
add_region(reserved_regions[rri][1], end - reserved_regions[rri][1], reserved_regions);
}
if (split)
{
return;
}
}
/* If a region makes it here, it did not overlap with any reserved
* region, so the entire region can be used. */
// Klog.writefln("HURL: freeing region %p %uKB", start, size >> 10);
Hippo.free_region(start, size);
}
public static void map(ulong virtual, ulong physical, ulong flags)