281 lines
7.0 KiB
C
281 lines
7.0 KiB
C
// vmm.c
|
|
// Author: Josh Holtrop
|
|
// Date: 09/30/03
|
|
// Rewritten from scratch: 12/23/03
|
|
// Modified: 07/13/04
|
|
|
|
#include "hos_defines.h"
|
|
#include "kernel.h"
|
|
#include "mm/vmm.h"
|
|
#include "asmfuncs.h"
|
|
#include "mm/mm.h"
|
|
|
|
u32_t vmm_map(void *virt);
|
|
int vmm_map1(unsigned int virt, unsigned int physical);
|
|
int vmm_mapn(unsigned int virt, unsigned int physical, unsigned int n);
|
|
void vmm_unmap1(unsigned int virt);
|
|
void vmm_unmapn(unsigned int virt, unsigned int n);
|
|
int vmm_map_range(void *virt_start, void *virt_end, u32_t phys_start);
|
|
void vmm_heb_init(HeapEntryBlock_t *heb);
|
|
void vmm_addToQueue(u32_t queue, HeapEntry_t *he);
|
|
int vmm_countHeapEntries(HeapEntry_t *he);
|
|
HeapEntry_t *vmm_followChain(HeapEntry_t *he);
|
|
HeapEntry_t *vmm_getUnusedEntry();
|
|
HeapEntry_t *vmm_stripUnusedEntry();
|
|
|
|
//void *calloc(unsigned int number, unsigned int size);
|
|
|
|
extern mb_info_t mb_info_block;
|
|
extern mb_module_t mb_modules[MAX_MODULES];
|
|
extern u32_t mm_freepages;
|
|
|
|
HeapEntryQueue_t heapEntryQueues[VMM_HE_TYPES]; //sorted, linked queue of HeapEntry objects
|
|
HeapEntryBlock_t initialHEB; //block for initial 256 HeapEntry objects
|
|
|
|
|
|
// This is the initialization procedure for the Virtual Memory Manager
|
|
// It sets up the heap for dynamic memory allocation and virtual page allocation
|
|
void vmm_init()
|
|
{
|
|
int i;
|
|
for (i = 0; i < mb_info_block.mods_count; i++) //page in the kernel modules
|
|
vmm_map_range((void*)mb_modules[i].mod_start, (void*)mb_modules[i].mod_end- 1, mb_modules[i].mod_start - VIRT_OFFSET);
|
|
vmm_heb_init(&initialHEB);
|
|
vmm_addToQueue(VMM_HE_UNUSED, &initialHEB.entry[0]);
|
|
HeapEntry_t *wilderness = vmm_stripUnusedEntry();
|
|
wilderness->base = (void *) HEAP_START;
|
|
wilderness->length = HEAP_LENGTH;
|
|
wilderness->next = 0;
|
|
vmm_addToQueue(VMM_HE_HOLE, wilderness);
|
|
}
|
|
|
|
|
|
/* Allocate a physical page and map the virtual address to it, return physical address allocated or NULL */
|
|
u32_t vmm_map(void *virt)
|
|
{
|
|
u32_t phys = mm_palloc();
|
|
if (!phys);
|
|
return NULL;
|
|
vmm_map1((u32_t)virt, phys);
|
|
return phys;
|
|
}
|
|
|
|
|
|
// This function maps a virtual address to a physical address using the page directory / page table
|
|
int vmm_map1(unsigned int virt, unsigned int physical)
|
|
{
|
|
unsigned int pde = virt >> 22;
|
|
unsigned int pte = (virt & 0x003FF000) >> 12;
|
|
unsigned int *pageTables = (unsigned int *)0xFFFFF000; //this is the location of the page directory
|
|
if (!(pageTables[pde] & 0x01)) //the page directory entry is not present, we must allocate a page table
|
|
{
|
|
u32_t newpagetable;
|
|
if (!(newpagetable = mm_palloc()))
|
|
return 1; //out of physical memory
|
|
pageTables[pde] = newpagetable | 0x03;
|
|
invlpg_(virt); //in case it was cached, so we can fill page table safely
|
|
memsetd((void*)(0xFFC00000 | (pde << 12)), 0, 1024); //zero out new page table
|
|
}
|
|
*(unsigned int *)(0xFFC00000 | (pde << 12) | (pte << 2)) = (physical & 0xFFFFF000) | 0x03;
|
|
invlpg_(virt);
|
|
return 0;
|
|
}
|
|
|
|
|
|
// This function maps a variable number of pages in a row
|
|
int vmm_mapn(unsigned int virt, unsigned int physical, unsigned int n)
|
|
{
|
|
while (n > 0)
|
|
{
|
|
if (vmm_map1(virt, physical))
|
|
return 1; // error mapping page
|
|
virt += 4096;
|
|
physical += 4096;
|
|
n--;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
// This function removes the virtual address's entry in the page directory / page table
|
|
void vmm_unmap1(unsigned int virt)
|
|
{
|
|
*(unsigned int *)(0xFFC00000 | ((virt & 0xFFC00000) >> 10) | ((virt & 0x003FF000) >> 10)) = 0;
|
|
invlpg_(virt);
|
|
}
|
|
|
|
|
|
// This function removes multiple pages' entries
|
|
void vmm_unmapn(unsigned int virt, unsigned int n)
|
|
{
|
|
while (n > 0)
|
|
{
|
|
vmm_unmap1(virt);
|
|
virt += 4096;
|
|
n--;
|
|
}
|
|
}
|
|
|
|
|
|
int vmm_map_range(void *virt_start, void *virt_end, u32_t phys_start)
|
|
{
|
|
if (virt_end < virt_start)
|
|
return 1; // invalid region
|
|
while (virt_start < virt_end)
|
|
{
|
|
if (vmm_map1((u32_t)virt_start, phys_start))
|
|
return 2; // out of memory
|
|
virt_start += 4096;
|
|
phys_start += 4096;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
void *kmalloc(u32_t size)
|
|
{
|
|
k_enter_critical();
|
|
|
|
k_leave_critical();
|
|
return NULL;
|
|
}
|
|
|
|
|
|
void *vmm_palloc()
|
|
{
|
|
k_enter_critical();
|
|
HeapEntry_t *he = heapEntryQueues[VMM_HE_HOLE].start;
|
|
he = vmm_followChain(he);
|
|
|
|
k_leave_critical();
|
|
return NULL;
|
|
}
|
|
|
|
|
|
// This function allocates and zeros memory for the given number of objects,
|
|
// given the size of each object
|
|
/*
|
|
void *calloc(unsigned int number, unsigned int size)
|
|
{
|
|
void *mem = malloc(number * size);
|
|
if (!mem)
|
|
return NULL; //could not get memory
|
|
memset(mem, 0, number * size);
|
|
return mem;
|
|
}*/
|
|
|
|
|
|
// This function initialzes a Heap Entry Block to entries linked together
|
|
void vmm_heb_init(HeapEntryBlock_t *heb)
|
|
{
|
|
int a;
|
|
for (a = 0; a < 255; a++)
|
|
heb->entry[a].next = &heb->entry[a+1];
|
|
heb->entry[255].next = NULL;
|
|
}
|
|
|
|
|
|
// This function adds a HeapEntry structure list to the appropriate place in the queue
|
|
void vmm_addToQueue(u32_t queue, HeapEntry_t *he)
|
|
{
|
|
if (heapEntryQueues[queue].start == NULL) //queue is empty
|
|
{
|
|
heapEntryQueues[queue].start = he;
|
|
heapEntryQueues[queue].count = vmm_countHeapEntries(he);
|
|
return;
|
|
}
|
|
|
|
switch (queue)
|
|
{
|
|
HeapEntry_t *otherhe;
|
|
case VMM_HE_UNUSED: // don't sort at all, add to end
|
|
otherhe = vmm_followChain(heapEntryQueues[queue].start);
|
|
otherhe->next = he;
|
|
heapEntryQueues[queue].count += vmm_countHeapEntries(he);
|
|
break;
|
|
case VMM_HE_USED: // sort by base address
|
|
otherhe = heapEntryQueues[queue].start;
|
|
while (otherhe->next)
|
|
{
|
|
if (((HeapEntry_t *)otherhe->next)->base > he->base)
|
|
break;
|
|
otherhe = otherhe->next;
|
|
}
|
|
he->next = otherhe->next;
|
|
otherhe->next = he;
|
|
heapEntryQueues[queue].count++;
|
|
break;
|
|
case VMM_HE_HOLE: // sort by length
|
|
case VMM_HE_FREE:
|
|
otherhe = heapEntryQueues[queue].start;
|
|
while (otherhe->next)
|
|
{
|
|
if (((HeapEntry_t *)otherhe->next)->length > he->length)
|
|
break;
|
|
otherhe = otherhe->next;
|
|
}
|
|
he->next = otherhe->next;
|
|
otherhe->next = he;
|
|
heapEntryQueues[queue].count++;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
// This function returns how many HeapEntry objects are in a queue starting from the object given
|
|
int vmm_countHeapEntries(HeapEntry_t *he)
|
|
{
|
|
int count = 0;
|
|
while (he)
|
|
{
|
|
count++;
|
|
he = (HeapEntry_t *)he->next;
|
|
}
|
|
return count;
|
|
}
|
|
|
|
|
|
// This function follows a chain of HeapEntry objects and returns a pointer to the last one
|
|
HeapEntry_t *vmm_followChain(HeapEntry_t *he)
|
|
{
|
|
while (he->next)
|
|
he = (HeapEntry_t *)he->next;
|
|
return he;
|
|
}
|
|
|
|
|
|
// This function breaks an unused chunk from its queue and returns a pointer to it
|
|
HeapEntry_t *vmm_getUnusedEntry()
|
|
{
|
|
if (heapEntryQueues[VMM_HE_UNUSED].count < 5)
|
|
{
|
|
HeapEntry_t *wilderness = vmm_followChain(heapEntryQueues[VMM_HE_HOLE].start);
|
|
wilderness->length -= 4096;
|
|
HeapEntryBlock_t *newHEB = wilderness->base + wilderness->length;
|
|
vmm_map(newHEB);
|
|
vmm_heb_init(newHEB);
|
|
HeapEntry_t *newDesc = vmm_stripUnusedEntry();
|
|
newDesc->base = newHEB;
|
|
newDesc->length = 4096;
|
|
vmm_addToQueue(VMM_HE_USED, newDesc);
|
|
}
|
|
return vmm_stripUnusedEntry();
|
|
}
|
|
|
|
|
|
// Return pointer to an unused HeapEntry object, ASSUMES THERE IS ONE PRESENT IN QUEUE
|
|
HeapEntry_t *vmm_stripUnusedEntry()
|
|
{
|
|
HeapEntry_t *he = heapEntryQueues[VMM_HE_UNUSED].start;
|
|
heapEntryQueues[VMM_HE_UNUSED].start = he->next;
|
|
heapEntryQueues[VMM_HE_UNUSED].count--;
|
|
he->next = 0;
|
|
return he;
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|