hos/kernel/mm/vmm.c

529 lines
13 KiB
C

// vmm.c
// Author: Josh Holtrop
// Date: 09/30/03
// Rewritten from scratch: 12/23/03
// Modified: 09/12/05
#include "hos_defines.h"
#include "kernel.h"
#include "mm/vmm.h"
#include "lang/lang.h"
#include "mm/mm.h"
int vmm_map_range(void *virt_start, void *virt_end, u32_t phys_start);
void *vmm_getFreeChunk(u32_t size);
void vmm_removeHeapEntry(u32_t queue, HeapEntry_t *he);
int vmm_moreCore(u32_t size);
int vmm_coalesceEntry(u32_t queue, HeapEntry_t *newHE);
void vmm_heb_init(HeapEntryBlock_t *heb);
void vmm_addToQueue(u32_t queue, HeapEntry_t *preceeding, HeapEntry_t *he);
int vmm_countHeapEntries(HeapEntry_t *he);
HeapEntry_t *vmm_followChain(HeapEntry_t *he);
HeapEntry_t *vmm_getUnusedEntry();
HeapEntry_t *vmm_stripUnusedEntry();
extern mb_info_t mb_info_block;
extern mb_module_t mb_modules[MAX_MODULES];
extern u32_t mm_freepages;
HeapEntryQueue_t heapEntryQueues[VMM_HE_TYPES]; // linked queues of HeapEntry objects
HeapEntry_t heapEntryHeadNodes[VMM_HE_TYPES]; // head nodes for linked queues
HeapEntry_t heapEntryTailNodes[VMM_HE_TYPES]; // tail nodes for linked queues
HeapEntryBlock_t initialHEB; // block for initial 256 HeapEntry objects
// This is the initialization procedure for the Virtual Memory Manager
// It sets up the heap for dynamic memory allocation and virtual page allocation
void vmm_init()
{
int i;
for (i = 0; i < mb_info_block.mods_count; i++) //page in the kernel modules
vmm_map_range((void*)mb_modules[i].mod_start, (void*)mb_modules[i].mod_end - 1, mb_modules[i].mod_start - VIRT_OFFSET);
for (i = 0; i < VMM_HE_TYPES; i++)
{
heapEntryQueues[i].count = 0;
heapEntryQueues[i].head = &heapEntryHeadNodes[i];
heapEntryHeadNodes[i].next = &heapEntryTailNodes[i];
heapEntryTailNodes[i].prev = &heapEntryHeadNodes[i];
}
vmm_heb_init(&initialHEB);
vmm_addToQueue(VMM_HE_UNUSED, &heapEntryHeadNodes[VMM_HE_UNUSED], &initialHEB.entry[0]);
HeapEntry_t *wilderness = vmm_stripUnusedEntry();
wilderness->base = (void *) HEAP_START;
wilderness->length = HEAP_LENGTH;
vmm_addToQueue(VMM_HE_HOLE, &heapEntryHeadNodes[VMM_HE_HOLE], wilderness);
}
/* Allocate a physical page and map the virtual address to it, return physical address allocated or NULL */
int vmm_map(void *virt)
{
u32_t dum;
return vmm_map_addr(virt, &dum);
}
int vmm_map_addr(void *virt, u32_t *phys_addr)
{
if (mm_freepages < 10)
return -1;
return vmm_map1((u32_t)virt, *phys_addr = mm_palloc());
}
// This function maps a virtual address to a physical address using the page directory / page table
int vmm_map1(u32_t virt, u32_t physical)
{
virt &= 0xFFFFF000;
physical &= 0xFFFFF000;
u32_t pde = virt >> 22;
u32_t pte = (virt & 0x003FF000) >> 12;
u32_t *pageTables = (u32_t *)0xFFFFF000; //this is the location of the page directory
if (!(pageTables[pde] & 0x01)) //the page directory entry is not present, we must allocate a page table
{
u32_t newpagetable;
if (!(newpagetable = mm_palloc()))
return -1; //out of physical memory
pageTables[pde] = newpagetable | 0x03;
invlpg_(0xFFFFF000 + (pde << 2));
invlpg_(virt); //in case it was cached, so we can fill page table safely
memsetd((void*)(0xFFC00000 | (pde << 12)), 0, 1024); //zero out new page table
}
*(u32_t *)(0xFFC00000 | (pde << 12) | (pte << 2)) = physical | 0x03;
invlpg_((0xFFC00000 | (pde << 12) | (pte << 2)));
invlpg_(virt);
return 0;
}
// This function maps a variable number of pages in a row
int vmm_mapn(u32_t virt, u32_t physical, u32_t n)
{
while (n > 0)
{
if (vmm_map1(virt, physical))
return 1; // error mapping page
virt += 4096;
physical += 4096;
n--;
}
return 0;
}
// This function removes the virtual address's entry in the
// page directory / page table
void vmm_unmap1(u32_t virt)
{
*(u32_t *)(0xFFC00000 |
((virt & 0xFFC00000) >> 10) |
((virt & 0x003FF000) >> 10)) = 0;
invlpg_(virt);
}
// This function removes multiple pages' entries
void vmm_unmapn(u32_t virt, u32_t n)
{
while (n > 0)
{
vmm_unmap1(virt);
virt += 4096;
n--;
}
}
// This function maps an entire address range into memory
int vmm_map_range(void *virt_start, void *virt_end, u32_t phys_start)
{
if (virt_end < virt_start)
return -1; // invalid region
while (virt_start < virt_end)
{
if (vmm_map1((u32_t)virt_start, phys_start))
return -2; // out of memory
virt_start += 4096;
phys_start += 4096;
}
return 0;
}
// kernel virtual memory allocator
void *kmalloc(u32_t size)
{
k_enter_critical();
if (size % VMM_MALLOC_GRANULARITY)
size = size + VMM_MALLOC_GRANULARITY -
(size % VMM_MALLOC_GRANULARITY);
/* added 09/12/05 by josh - fixed bug returning size 0 segments
multiple times */
if (size < VMM_MALLOC_GRANULARITY)
size = VMM_MALLOC_GRANULARITY;
void *attempt = vmm_getFreeChunk(size);
if (attempt)
{
k_leave_critical();
return attempt;
}
if (vmm_moreCore(size))
{
k_leave_critical();
return NULL; //we could not get any more heap memory
}
attempt = vmm_getFreeChunk(size);
k_leave_critical();
return attempt;
}
// kernel virtual memory de-allocator
int kfree(void *addr)
{
k_enter_critical();
HeapEntry_t *he = heapEntryQueues[VMM_HE_USED].head->next;
while (he->next)
{
if (he->base == addr)
{
vmm_removeHeapEntry(VMM_HE_USED, he);
if (vmm_coalesceEntry(VMM_HE_FREE, he))
vmm_addToQueue(VMM_HE_FREE, heapEntryQueues[VMM_HE_FREE].head, he);
else
vmm_addToQueue(VMM_HE_UNUSED, heapEntryQueues[VMM_HE_UNUSED].head, he);
break;
}
he = (HeapEntry_t *)he->next;
}
k_leave_critical();
return 0;
}
// This function allocates a virtual page and maps it to a physical page
void *vmm_palloc()
{
u32_t dum;
return vmm_palloc_addr(&dum);
}
void *vmm_palloc_addr(u32_t *phys_addr)
{
k_enter_critical();
HeapEntry_t *he = heapEntryQueues[VMM_HE_HOLE].head->next;
HeapEntry_t *wilderness = he;
while (he->next)
{
if (he->length == 4096)
{
vmm_removeHeapEntry(VMM_HE_HOLE, he);
vmm_addToQueue(VMM_HE_USED, &heapEntryHeadNodes[VMM_HE_USED], he);
if (vmm_map_addr(he->base, phys_addr))
he->base = NULL;
k_leave_critical();
return he->base;
}
if (he->length > wilderness->length)
wilderness = he;
he = (HeapEntry_t *)he->next;
}
if (wilderness->length < 0x00010000) //leave 16 pages free
{
k_leave_critical();
return NULL;
}
wilderness->length -= 4096; //strip 4k from the top
he = vmm_getUnusedEntry();
he->base = wilderness->base + wilderness->length;
he->length = 4096;
vmm_addToQueue(VMM_HE_USED, &heapEntryHeadNodes[VMM_HE_USED], he);
if (vmm_map_addr(he->base, phys_addr))
he->base = NULL;
k_leave_critical();
return he->base;
}
// This function frees a previously-allocated virtual page
int vmm_pfree(void *addr)
{
u32_t pbase = *(u32_t *)(0xFFC00000 |
(((u32_t)addr & 0xFFC00000) >> 10) |
(((u32_t)addr & 0x003FF000) >> 10));
if (vmm_unmapp(addr))
return -1;
mm_pfree(pbase);
return 0;
}
int vmm_unmapp(void *addr)
{
if (!addr)
return -2;
k_enter_critical();
HeapEntry_t *he = heapEntryQueues[VMM_HE_USED].head->next;
while (he->next)
{
if (he->base == addr) //found the page to free
{
vmm_removeHeapEntry(VMM_HE_USED, he);
vmm_unmap1((u32_t)he->base);
vmm_addToQueue(VMM_HE_HOLE, &heapEntryHeadNodes[VMM_HE_HOLE], he);
k_leave_critical();
return 0;
}
he = he->next;
}
k_leave_critical();
return -1; // page not found
}
// This function allocates and zeros memory for the given number of objects,
// given the size of each object
void *kcalloc(u32_t number, u32_t size)
{
void *mem = kmalloc(number * size);
if (!mem)
return NULL; //could not get memory
memset(mem, 0, number * size);
return mem;
}
// This function re-allocates memory already allocated, preserving the old contents
// (as long as newSize is greater than oldSize)
void *krealloc(void *orig, unsigned int newSize)
{
void *newMem;
if ((newMem = kmalloc(newSize)))
{
HeapEntry_t *he = heapEntryQueues[VMM_HE_USED].head->next;
while (he->next)
{
if (he->base == orig)
{
memcpy(newMem, orig, (he->length < newSize ? he->length : newSize));
kfree(orig);
return newMem;
}
he = (HeapEntry_t *)he->next;
}
kfree(newMem);
return NULL; // base address not found
}
else
return NULL; // could not get mem for new chunk
}
// This function returns the base address of a free chunk of virtual memory - called from kmalloc()
void *vmm_getFreeChunk(u32_t size)
{
HeapEntry_t *he = heapEntryQueues[VMM_HE_FREE].head->next;
HeapEntry_t *good = NULL;
while (he->next) // he is not the tail node
{
if (he->length == size)
{
vmm_removeHeapEntry(VMM_HE_FREE, he);
vmm_addToQueue(VMM_HE_USED, heapEntryQueues[VMM_HE_USED].head, he);
return he->base;
}
if (good)
{
if ((he->length > size) && (he->length < good->length))
good = he;
}
else
{
if (he->length > size)
good = he;
}
he = (HeapEntry_t *)he->next;
}
if (!good)
return NULL;
HeapEntry_t *newHE = vmm_getUnusedEntry();
newHE->base = good->base;
newHE->length = size;
newHE->next = NULL;
newHE->prev = NULL;
good->base += size;
good->length -= size;
vmm_addToQueue(VMM_HE_USED, heapEntryQueues[VMM_HE_USED].head, newHE);
return newHE->base;
}
// This function retrieves more core memory for the virtual memory allocator to allocate
int vmm_moreCore(u32_t size)
{
int pages = (size >> 12) + 2;
size = pages << 12;
if ((mm_freepages - 5) < pages)
return -1; // out of physical memory
HeapEntry_t *he = heapEntryQueues[VMM_HE_HOLE].head->next;
HeapEntry_t *wilderness = he;
while (he->next)
{
if (he->length > wilderness->length)
wilderness = he;
he = (HeapEntry_t *)he->next;
}
if (wilderness->length <= size)
return -2; // out of virtual memory
int i;
void *virt = wilderness->base;
for (i = 0; i < pages; i++)
{
vmm_map(virt);
virt += 4096;
}
HeapEntry_t *newHE = vmm_getUnusedEntry();
newHE->base = wilderness->base;
newHE->length = size;
newHE->next = 0;
newHE->prev = 0;
wilderness->base += size;
wilderness->length -= size;
if (vmm_coalesceEntry(VMM_HE_FREE, newHE)) // returns 0 on success (coalesced into previous entry)
vmm_addToQueue(VMM_HE_FREE, heapEntryQueues[VMM_HE_FREE].head, newHE);
else
vmm_addToQueue(VMM_HE_UNUSED, heapEntryQueues[VMM_HE_UNUSED].head, newHE);
return 0;
}
// This function coalesces to heap entries into one
int vmm_coalesceEntry(u32_t queue, HeapEntry_t *newHE)
{
HeapEntry_t *existing = heapEntryQueues[queue].head->next;
while (existing->next)
{
if ((existing->base + existing->length) == newHE->base)
{
existing->length += newHE->length;
return 0;
}
else if ((newHE->base + newHE->length) == existing->base)
{
existing->base = newHE->base;
existing->length += newHE->length;
return 0;
}
existing = (HeapEntry_t *)existing->next;
}
return -1; // an entry to coalesce with was not found
}
// This function removes a heap entry from a queue
void vmm_removeHeapEntry(u32_t queue, HeapEntry_t *he)
{
((HeapEntry_t *)he->prev)->next = he->next;
((HeapEntry_t *)he->next)->prev = he->prev;
heapEntryQueues[queue].count--;
he->next = NULL;
he->prev = NULL;
}
// This function initialzes a Heap Entry Block to entries linked together
void vmm_heb_init(HeapEntryBlock_t *heb)
{
int a;
for (a = 0; a < 255; a++)
{
heb->entry[a].next = &heb->entry[a+1];
heb->entry[a+1].prev = &heb->entry[a];
}
heb->entry[0].prev = NULL;
heb->entry[255].next = NULL;
}
// This function adds a HeapEntry structure to the queue following 'preceeding' the queue
// fails if add to tail (add one element before tail node)
void vmm_addToQueue(u32_t queue, HeapEntry_t *preceeding, HeapEntry_t *he)
{
heapEntryQueues[queue].count += vmm_countHeapEntries(he);
HeapEntry_t *last = vmm_followChain(he);
last->next = preceeding->next;
he->prev = preceeding;
((HeapEntry_t *)last->next)->prev = last;
preceeding->next = he;
}
// This function returns how many HeapEntry objects are in a queue starting/including from the object given
int vmm_countHeapEntries(HeapEntry_t *he)
{
int count = 0;
while (he)
{
count++;
he = (HeapEntry_t *)he->next;
}
return count;
}
// This function follows a chain of HeapEntry objects and returns a pointer to the last one
HeapEntry_t *vmm_followChain(HeapEntry_t *he)
{
while (he->next)
he = (HeapEntry_t *)he->next;
return he;
}
// This function breaks an unused chunk from its queue and returns a pointer to it
// 09/10/05 nasty bug fixed by josh, wasn't adding unused entries when we ran out of them
HeapEntry_t *vmm_getUnusedEntry()
{
if (heapEntryQueues[VMM_HE_UNUSED].count < 5)
{
HeapEntry_t *he = heapEntryQueues[VMM_HE_HOLE].head->next;
HeapEntry_t *wilderness = he;
while (he)
{
if ((he->length) > (wilderness->length))
wilderness = he;
he = (HeapEntry_t *)he->next;
}
if (wilderness->length < 10000)
{
k_check(-1, "Kernel panic: out of virtual memory\n");
}
wilderness->length -= 4096; //strip 4k from the top
HeapEntryBlock_t *newHEB = wilderness->base + wilderness->length;
vmm_map(newHEB);
vmm_heb_init(newHEB);
vmm_addToQueue(VMM_HE_UNUSED, heapEntryTailNodes[VMM_HE_UNUSED].prev, &newHEB->entry[0]);
}
return vmm_stripUnusedEntry();
}
// Return pointer to an unused HeapEntry object, ASSUMES THERE IS ONE PRESENT IN QUEUE
HeapEntry_t *vmm_stripUnusedEntry()
{
HeapEntry_t *he = heapEntryQueues[VMM_HE_UNUSED].head->next;
heapEntryQueues[VMM_HE_UNUSED].head->next = he->next;
if (! he->next)
k_check(1, "kernel panic: he->next is NULL\n");
((HeapEntry_t *)he->next)->prev = he->prev;
heapEntryQueues[VMM_HE_UNUSED].count--;
he->next = 0;
he->prev = 0;
return he;
}