Import backup from 2004-07-29

This commit is contained in:
Josh Holtrop 2004-07-29 22:00:00 -04:00
parent 11da8cb162
commit 6d23c76e84
10 changed files with 145 additions and 101 deletions

View File

@ -12,9 +12,8 @@ all:
# make -C rmmod
clean:
make -C kernel clean
make -C include clean
# make -C rmmod clean
- make -C kernel clean
- make -C rmmod clean
- rm *~ hos.flp

BIN
hos.flp

Binary file not shown.

View File

@ -96,6 +96,7 @@ void k_init()
void isr(u32_t num)
{
criticalCounter++;
for (;;)
{
(*(char*)(CONSOLE_MEMORY+158))++;
@ -104,8 +105,10 @@ void isr(u32_t num)
{
}
criticalCounter--;
}
void k_enter_critical() // functions for implementing "atomic actions"
{
disable_ints();

View File

@ -61,12 +61,9 @@ void mm_init()
// specified in base for a length of pages pages
void mm_pfreen(u32_t base, u32_t pages)
{
u32_t a;
u32_t max = base + (pages << 12);
for (a = base; a < max; a += 4096)
{
mm_pfree(a);
}
pages = base + (pages << 12); //convert #pages to #bytes
for (; base < pages; base += 4096)
mm_pfree(base);
}
@ -78,7 +75,7 @@ void mm_pfree(u32_t base)
u32_t bitNumber = (base >> 12) & 0x07; //pageNumber % 8;
page_bitmap[byteNumber] = page_bitmap[byteNumber] & ((0x01 << bitNumber) ^ 0xFF);
mm_freepages++;
if (mm_first_free_byte > byteNumber)
if (byteNumber < mm_first_free_byte)
mm_first_free_byte = byteNumber;
}
@ -87,12 +84,9 @@ void mm_pfree(u32_t base)
// specified in base for a length of pages pages
void mm_preserven(u32_t base, u32_t pages)
{
u32_t a;
u32_t max = base + (pages << 12);
for (a = base; a < max; a += 4096)
{
mm_preserve(a);
}
pages = base + (pages << 12); //convert #pages to #bytes
for (; base < pages; base += 4096)
mm_preserve(base);
}

View File

@ -2,7 +2,7 @@
// Author: Josh Holtrop
// Date: 09/30/03
// Rewritten from scratch: 12/23/03
// Modified: 07/13/04
// Modified: 07/29/04
#include "hos_defines.h"
#include "kernel.h"
@ -10,14 +10,14 @@
#include "asmfuncs.h"
#include "mm/mm.h"
u32_t vmm_map(void *virt);
int vmm_map(void *virt);
int vmm_map1(unsigned int virt, unsigned int physical);
int vmm_mapn(unsigned int virt, unsigned int physical, unsigned int n);
void vmm_unmap1(unsigned int virt);
void vmm_unmapn(unsigned int virt, unsigned int n);
int vmm_map_range(void *virt_start, void *virt_end, u32_t phys_start);
void vmm_heb_init(HeapEntryBlock_t *heb);
void vmm_addToQueue(u32_t queue, HeapEntry_t *he);
void vmm_addToQueue(u32_t queue, HeapEntry_t *preceeding, HeapEntry_t *he);
int vmm_countHeapEntries(HeapEntry_t *he);
HeapEntry_t *vmm_followChain(HeapEntry_t *he);
HeapEntry_t *vmm_getUnusedEntry();
@ -29,8 +29,9 @@ extern mb_info_t mb_info_block;
extern mb_module_t mb_modules[MAX_MODULES];
extern u32_t mm_freepages;
HeapEntryQueue_t heapEntryQueues[VMM_HE_TYPES]; // sorted, linked queue of HeapEntry objects
HeapEntryQueue_t heapEntryQueues[VMM_HE_TYPES]; // linked queue of HeapEntry objects
HeapEntry_t heapEntryHeadNodes[VMM_HE_TYPES]; // head nodes for linked queues
HeapEntry_t heapEntryTailNodes[VMM_HE_TYPES]; // tail nodes for linked queues
HeapEntryBlock_t initialHEB; // block for initial 256 HeapEntry objects
@ -40,33 +41,38 @@ void vmm_init()
{
int i;
for (i = 0; i < mb_info_block.mods_count; i++) //page in the kernel modules
vmm_map_range((void*)mb_modules[i].mod_start, (void*)mb_modules[i].mod_end- 1, mb_modules[i].mod_start - VIRT_OFFSET);
vmm_map_range((void*)mb_modules[i].mod_start, (void*)mb_modules[i].mod_end - 1, mb_modules[i].mod_start - VIRT_OFFSET);
for (i = 0; i < VMM_HE_TYPES; i++)
{
heapEntryQueues[i].head = &heapEntryHeadNodes[i];
heapEntryHeadNodes[i].next = &heapEntryTailNodes[i];
heapEntryTailNodes[i].prev = &heapEntryHeadNodes[i];
}
vmm_heb_init(&initialHEB);
vmm_addToQueue(VMM_HE_UNUSED, &initialHEB.entry[0]);
vmm_addToQueue(VMM_HE_UNUSED, &heapEntryHeadNodes[VMM_HE_UNUSED], &initialHEB.entry[0]);
HeapEntry_t *wilderness = vmm_stripUnusedEntry();
wilderness->base = (void *) HEAP_START;
wilderness->length = HEAP_LENGTH;
vmm_addToQueue(VMM_HE_HOLE, wilderness);
vmm_addToQueue(VMM_HE_HOLE, &heapEntryHeadNodes[VMM_HE_HOLE], wilderness);
}
/* Allocate a physical page and map the virtual address to it, return physical address allocated or NULL */
u32_t vmm_map(void *virt)
int vmm_map(void *virt)
{
u32_t phys = mm_palloc();
if (!phys);
return NULL;
vmm_map1((u32_t)virt, phys);
return phys;
if (mm_freepages < 10);
return -1;
vmm_map1((u32_t)virt, mm_palloc());
return 0;
}
// This function maps a virtual address to a physical address using the page directory / page table
int vmm_map1(unsigned int virt, unsigned int physical)
int vmm_map1(u32_t virt, u32_t physical)
{
unsigned int pde = virt >> 22;
unsigned int pte = (virt & 0x003FF000) >> 12;
unsigned int *pageTables = (unsigned int *)0xFFFFF000; //this is the location of the page directory
u32_t pde = virt >> 22;
u32_t pte = (virt & 0x003FF000) >> 12;
u32_t *pageTables = (u32_t *)0xFFFFF000; //this is the location of the page directory
if (!(pageTables[pde] & 0x01)) //the page directory entry is not present, we must allocate a page table
{
u32_t newpagetable;
@ -76,14 +82,14 @@ int vmm_map1(unsigned int virt, unsigned int physical)
invlpg_(virt); //in case it was cached, so we can fill page table safely
memsetd((void*)(0xFFC00000 | (pde << 12)), 0, 1024); //zero out new page table
}
*(unsigned int *)(0xFFC00000 | (pde << 12) | (pte << 2)) = (physical & 0xFFFFF000) | 0x03;
*(u32_t *)(0xFFC00000 | (pde << 12) | (pte << 2)) = (physical & 0xFFFFF000) | 0x03;
invlpg_(virt);
return 0;
}
// This function maps a variable number of pages in a row
int vmm_mapn(unsigned int virt, unsigned int physical, unsigned int n)
int vmm_mapn(u32_t virt, u32_t physical, u32_t n)
{
while (n > 0)
{
@ -98,15 +104,15 @@ int vmm_mapn(unsigned int virt, unsigned int physical, unsigned int n)
// This function removes the virtual address's entry in the page directory / page table
void vmm_unmap1(unsigned int virt)
void vmm_unmap1(u32_t virt)
{
*(unsigned int *)(0xFFC00000 | ((virt & 0xFFC00000) >> 10) | ((virt & 0x003FF000) >> 10)) = 0;
*(u32_t *)(0xFFC00000 | ((virt & 0xFFC00000) >> 10) | ((virt & 0x003FF000) >> 10)) = 0;
invlpg_(virt);
}
// This function removes multiple pages' entries
void vmm_unmapn(unsigned int virt, unsigned int n)
void vmm_unmapn(u32_t virt, u32_t n)
{
while (n > 0)
{
@ -117,14 +123,15 @@ void vmm_unmapn(unsigned int virt, unsigned int n)
}
// This function maps an entire address range into memory
int vmm_map_range(void *virt_start, void *virt_end, u32_t phys_start)
{
if (virt_end < virt_start)
return 1; // invalid region
return -1; // invalid region
while (virt_start < virt_end)
{
if (vmm_map1((u32_t)virt_start, phys_start))
return 2; // out of memory
return -2; // out of memory
virt_start += 4096;
phys_start += 4096;
}
@ -132,6 +139,8 @@ int vmm_map_range(void *virt_start, void *virt_end, u32_t phys_start)
}
// kernel virtual memory allocator
void *kmalloc(u32_t size)
{
k_enter_critical();
@ -141,28 +150,86 @@ void *kmalloc(u32_t size)
}
// kernel virtual memory de-allocator
int kfree(void *addr)
{
k_enter_critical();
k_leave_critical();
return 0;
}
// This function allocates a virtual page and maps it to a physical page
void *vmm_palloc()
{
k_enter_critical();
HeapEntry_t *he = heapEntryQueues[VMM_HE_HOLE].start;
if (he->length == 4096)
HeapEntry_t *he = heapEntryQueues[VMM_HE_HOLE].head->next;
HeapEntry_t *wilderness = he;
while (he)
{
heapEntryQueues[VMM_HE_HOLE].start = he->next;
heapEntryQueues[VMM_HE_HOLE].count--;
he->next = 0;
vmm_addToQueue(VMM_HE_USED, he);
return he->base;
if (he->length == 4096)
{
((HeapEntry_t *)he->prev)->next = he->next;
((HeapEntry_t *)he->next)->prev = he->prev;
heapEntryQueues[VMM_HE_HOLE].count--;
he->next = NULL;
he->prev = NULL;
vmm_addToQueue(VMM_HE_USED, &heapEntryHeadNodes[VMM_HE_USED], he);
vmm_map(he->base);
k_leave_critical();
return he->base;
}
if (he->length > wilderness->length)
wilderness = he;
he = (HeapEntry_t *)he->next;
}
if (wilderness->length < 0x00010000) //leave 16 pages free
{
k_leave_critical();
return NULL;
}
wilderness->length -= 4096; //strip 4k from the top
he = vmm_getUnusedEntry();
he->base = wilderness->base + wilderness->length;
he->length = 4096;
vmm_addToQueue(VMM_HE_USED, &heapEntryHeadNodes[VMM_HE_USED], he);
vmm_map(he->base);
k_leave_critical();
return NULL;
return he->base;
}
// This function frees a previously-allocated virtual page
int vmm_pfree(void *addr)
{
k_enter_critical();
HeapEntry_t *he = heapEntryQueues[VMM_HE_USED].head->next;
while (he)
{
if (he->base == addr) //found the page to free
{
((HeapEntry_t *)he->prev)->next = he->next;
((HeapEntry_t *)he->next)->prev = he->prev;
heapEntryQueues[VMM_HE_USED].count--;
he->next = NULL;
he->prev = NULL;
vmm_unmap1((u32_t)he->base);
vmm_addToQueue(VMM_HE_HOLE, &heapEntryHeadNodes[VMM_HE_HOLE], he);
k_leave_critical();
return 0;
}
he = he->next;
}
k_leave_critical();
return -1; // page not found
}
// This function allocates and zeros memory for the given number of objects,
// given the size of each object
/*
void *calloc(unsigned int number, unsigned int size)
void *calloc(u32_t number, u32_t size)
{
void *mem = malloc(number * size);
if (!mem)
@ -172,64 +239,34 @@ void *calloc(unsigned int number, unsigned int size)
}*/
// This function initialzes a Heap Entry Block to entries linked together
void vmm_heb_init(HeapEntryBlock_t *heb)
{
int a;
for (a = 0; a < 255; a++)
{
heb->entry[a].next = &heb->entry[a+1];
heb->entry[a+1].prev = &heb->entry[a];
}
heb->entry[0].prev = NULL;
heb->entry[255].next = NULL;
}
// This function adds a HeapEntry structure list to the appropriate place in the queue
void vmm_addToQueue(u32_t queue, HeapEntry_t *he)
// This function adds a HeapEntry structure to the queue following 'preceeding' the queue
void vmm_addToQueue(u32_t queue, HeapEntry_t *preceeding, HeapEntry_t *he)
{
if (heapEntryQueues[queue].start == NULL) //queue is empty
{
heapEntryQueues[queue].start = he;
heapEntryQueues[queue].count = vmm_countHeapEntries(he);
return;
}
switch (queue)
{
HeapEntry_t *prevhe;
case VMM_HE_UNUSED: // don't sort at all, add to end
prevhe = vmm_followChain(heapEntryQueues[queue].start);
prevhe->next = he;
heapEntryQueues[queue].count += vmm_countHeapEntries(he);
break;
case VMM_HE_USED: // sort by base address
prevhe = heapEntryQueues[queue].start;
while (prevhe->next)
{
if (((HeapEntry_t *)prevhe->next)->base > he->base)
break;
prevhe = prevhe->next;
}
he->next = prevhe->next;
prevhe->next = he;
heapEntryQueues[queue].count++;
break;
case VMM_HE_HOLE: // sort by length
case VMM_HE_FREE:
prevhe = heapEntryQueues[queue].start;
while (prevhe->next)
{
if (((HeapEntry_t *)prevhe->next)->length > he->length)
break;
prevhe = prevhe->next;
}
he->next = prevhe->next;
prevhe->next = he;
heapEntryQueues[queue].count++;
break;
}
heapEntryQueues[queue].count += vmm_countHeapEntries(he);
HeapEntry_t *last = vmm_followChain(he);
last->next = preceeding->next;
he->prev = preceeding;
((HeapEntry_t *)last->next)->prev = last;
preceeding->next = he;
}
// This function returns how many HeapEntry objects are in a queue starting from the object given
// This function returns how many HeapEntry objects are in a queue starting/including from the object given
int vmm_countHeapEntries(HeapEntry_t *he)
{
int count = 0;
@ -256,15 +293,22 @@ HeapEntry_t *vmm_getUnusedEntry()
{
if (heapEntryQueues[VMM_HE_UNUSED].count < 5)
{
HeapEntry_t *wilderness = vmm_followChain(heapEntryQueues[VMM_HE_HOLE].start);
wilderness->length -= 4096;
HeapEntry_t *he = heapEntryQueues[VMM_HE_HOLE].head->next;
HeapEntry_t *wilderness = he;
while (he)
{
if ((he->length) > (wilderness->length))
wilderness = he;
he = (HeapEntry_t *)he->next;
}
wilderness->length -= 4096; //strip 4k from the top
HeapEntryBlock_t *newHEB = wilderness->base + wilderness->length;
vmm_map(newHEB);
vmm_heb_init(newHEB);
HeapEntry_t *newDesc = vmm_stripUnusedEntry();
HeapEntry_t *newDesc = vmm_stripUnusedEntry(); //descriptor for the new HEB
newDesc->base = newHEB;
newDesc->length = 4096;
vmm_addToQueue(VMM_HE_USED, newDesc);
vmm_addToQueue(VMM_HE_USED, heapEntryTailNodes[VMM_HE_USED].prev, newDesc);
}
return vmm_stripUnusedEntry();
}
@ -273,10 +317,12 @@ HeapEntry_t *vmm_getUnusedEntry()
// Return pointer to an unused HeapEntry object, ASSUMES THERE IS ONE PRESENT IN QUEUE
HeapEntry_t *vmm_stripUnusedEntry()
{
HeapEntry_t *he = heapEntryQueues[VMM_HE_UNUSED].start;
heapEntryQueues[VMM_HE_UNUSED].start = he->next;
HeapEntry_t *he = heapEntryQueues[VMM_HE_UNUSED].head->next;
heapEntryQueues[VMM_HE_UNUSED].head->next = he->next;
((HeapEntry_t *)he->next)->prev = he->prev;
heapEntryQueues[VMM_HE_UNUSED].count--;
he->next = 0;
he->prev = 0;
return he;
}

View File

@ -33,13 +33,15 @@ typedef struct {
typedef struct {
int count;
HeapEntry_t *start;
HeapEntry_t *head;
} HeapEntryQueue_t;
void vmm_init();
void *kmalloc(u32_t size);
int kfree(void *addr);
void *vmm_palloc();
int vmm_pfree(void *addr);
#endif