hos/kernel/mm/vmm.c

193 lines
4.7 KiB
C

// vmm.c
// Author: Josh Holtrop
// Date: 09/30/03
// Rewritten from scratch: 12/23/03
// Modified: 07/13/04
#include "hos_defines.h"
#include "mm/vmm.h"
#include "asmfuncs.h"
#include "mm/mm.h"
extern mb_info_t mb_info_block;
extern mb_module_t mb_modules[MAX_MODULES];
extern u32_t mm_freepages;
HeapEntryQueue_t heapEntryQueues[VMM_HE_TYPES]; //sorted, linked queue of HeapEntry objects
HeapEntryBlock_t initialHEB; //block for initial 256 HeapEntry objects
// This is the initialization procedure for the Virtual Memory Manager
// It sets up the heap for dynamic memory allocation and virtual page allocation
void vmm_init()
{
int i;
for (i = 0; i < mb_info_block.mods_count; i++) //page in the kernel modules
vmm_map_range((void*)mb_modules[i].mod_start, (void*)mb_modules[i].mod_end- 1, mb_modules[i].mod_start - VIRT_OFFSET);
}
/* Allocate a physical page and map the virtual address to it, return physical address allocated or NULL */
u32_t vmm_map(void *virt)
{
u32_t phys = mm_palloc();
if (!phys);
return NULL;
vmm_map1((u32_t)virt, phys);
return phys;
}
// This function maps a virtual address to a physical address using the page directory / page table
void vmm_map1(unsigned int virt, unsigned int physical)
{
unsigned int pde = virt >> 22;
unsigned int pte = (virt & 0x003FF000) >> 12;
unsigned int *pageTables = (unsigned int *)0xFFFFF000; //this is the location of the page directory
if (!(pageTables[pde] & 0x01)) //the page directory entry does not exist, we must allocate a page for it
{
u32_t newpagetable = mm_palloc();
pageTables[pde] = newpagetable | 0x03;
invlpg_(virt); //in case it was cached, so we can fill page table safely
unsigned int *newpteptr = (unsigned int *)(0xFFC00000 | (pde << 12)); //points to first unsigned int of newly allocated page table
int a;
for (a = 0; a < 1024; a++)
*newpteptr++ = 0;
}
*(unsigned int *)(0xFFC00000 | (pde << 12) | (pte << 2)) = (physical & 0xFFFFF000) | 0x03;
invlpg_(virt);
}
// This function maps a variable number of pages in a row
void vmm_mapn(unsigned int virt, unsigned int physical, unsigned int n)
{
while (n > 0)
{
vmm_map1(virt, physical);
virt += 4096;
physical += 4096;
n--;
}
}
// This function removes the virtual address's entry in the page directory / page table
void vmm_unmap1(unsigned int virt)
{
*(unsigned int *)(0xFFC00000 | ((virt & 0xFFC00000) >> 10) | ((virt & 0x003FF000) >> 10)) = 0;
invlpg_(virt);
}
// This function removes multiple pages' entries
void vmm_unmapn(unsigned int virt, unsigned int n)
{
while (n > 0)
{
vmm_unmap1(virt);
virt += 4096;
n--;
}
}
int vmm_map_range(void *virt_start, void *virt_end, u32_t phys_start)
{
if (virt_end < virt_start)
return 1; //invalid region
while (virt_start < virt_end)
{
vmm_map1((u32_t)virt_start, phys_start);
virt_start += 4096;
phys_start += 4096;
}
return 0;
}
// This function allocates and zeros memory for the given number of objects,
// given the size of each object
/*
void *calloc(unsigned int number, unsigned int size)
{
void *mem = malloc(number * size);
if (!mem)
return NULL; //could not get memory
memset(mem, 0, number * size);
return mem;
}*/
// This function initialzes a Heap Entry Block to entries linked together
void vmm_heb_init(HeapEntryBlock_t *heb)
{
int a;
for (a = 0; a < 255; a++)
heb->entry[a].next = &heb->entry[a+1];
heb->entry[255].next = NULL;
}
// This function adds a HeapEntry structure list to the appropriate place in the queue
void vmm_addToQueue(u32_t queue, HeapEntry_t *he)
{
if (heapEntryQueues[queue].start == NULL) //queue is empty
{
heapEntryQueues[queue].start = he;
heapEntryQueues[queue].count = vmm_countHeapEntries(he);
return;
}
// TODO
}
// This function returns how many HeapEntry objects are in a queue starting from the object given
int vmm_countHeapEntries(HeapEntry_t *he)
{
int count = 0;
while (he)
{
count++;
he = (HeapEntry_t *)he->link;
}
return count;
}
// This function follows a chain of HeapEntry objects and returns a pointer to the last one
HeapEntry_t *vmm_followChain(HeapEntry_t *he)
{
while (he->link)
he = (HeapEntry_t *)he->link;
return he;
}
// This function breaks an unused chunk from its queue and returns a pointer to it
HeapEntry_t *vmm_getUnusedChunk()
{
if (heapEntryQueues[VMM_HE_UNUSED].count < 5)
{
heapEntryQueues[VMM_HE_UNUSED].count += 256;//not good idea
HeapEntryBlock_t *newHEB = vmm_palloc(); //INFINITE LOOP?
vmm_mcb_init(newHEB);
vmm_addToQueue(VMM_HE_UNUSED, newHEB);
heapEntryQueues[VMM_HE_UNUSED].count -= 256;
}
HeapEntry_t *he = heapEntryQueues[VMM_HE_UNUSED].start;
heapEntryQueues[VMM_HE_UNUSED].start = he->link;
heapEntryQueues[VMM_HE_UNUSED].count--;
return he;
}