diff --git a/kernel/functions.h b/kernel/functions.h index 025c6be..c1c5ce4 100644 --- a/kernel/functions.h +++ b/kernel/functions.h @@ -9,9 +9,9 @@ #include "hos_defines.h" #include "sys/io.h" -extern dword _code; -extern dword _bss; -extern dword _end; +extern u32_t _code; +extern u32_t _bss; +extern u32_t _end; //Enables (SeTs) Interrupt Flag on the processor static inline void enable_ints() @@ -63,18 +63,18 @@ static inline void timer_init() // - this does include the bss section // - this should be 4kb aligned per the linker script // - this is the amount of RAM the kernel code, data, & bss take -static inline dword kernel_size_used() +static inline u32_t kernel_size_used() { - return (dword)(&_end)-(dword)(&_code); + return (u32_t)(&_end)-(u32_t)(&_code); } //Returns the size of the kernel (code & data) // - this does not include the bss section // - this should be 4kb aligned per the linker script // - this should be the size of kernel.bin -static inline dword kernel_size() +static inline u32_t kernel_size() { - return (dword)(&_bss)-(dword)(&_code); + return (u32_t)(&_bss)-(u32_t)(&_code); } //converts a binary-coded-decimal byte to its decimal equivalent diff --git a/kernel/mm/vmm.c b/kernel/mm/vmm.c index 15ba8a4..cac935d 100644 --- a/kernel/mm/vmm.c +++ b/kernel/mm/vmm.c @@ -10,6 +10,21 @@ #include "asmfuncs.h" #include "mm/mm.h" +u32_t vmm_map(void *virt); +int vmm_map1(unsigned int virt, unsigned int physical); +int vmm_mapn(unsigned int virt, unsigned int physical, unsigned int n); +void vmm_unmap1(unsigned int virt); +void vmm_unmapn(unsigned int virt, unsigned int n); +int vmm_map_range(void *virt_start, void *virt_end, u32_t phys_start); +void vmm_heb_init(HeapEntryBlock_t *heb); +void vmm_addToQueue(u32_t queue, HeapEntry_t *he); +int vmm_countHeapEntries(HeapEntry_t *he); +HeapEntry_t *vmm_followChain(HeapEntry_t *he); +HeapEntry_t *vmm_getUnusedEntry(); +HeapEntry_t *vmm_stripUnusedEntry(); + +//void *calloc(unsigned int number, unsigned int size); + extern mb_info_t mb_info_block; extern mb_module_t mb_modules[MAX_MODULES]; extern u32_t mm_freepages; @@ -25,12 +40,12 @@ void vmm_init() int i; for (i = 0; i < mb_info_block.mods_count; i++) //page in the kernel modules vmm_map_range((void*)mb_modules[i].mod_start, (void*)mb_modules[i].mod_end- 1, mb_modules[i].mod_start - VIRT_OFFSET); - vmm_heb_init(initialHEB); - vmm_addToQueue(VMM_HE_UNUSED, initialHEB[0]); + vmm_heb_init(&initialHEB); + vmm_addToQueue(VMM_HE_UNUSED, &initialHEB.entry[0]); HeapEntry_t *wilderness = vmm_stripUnusedEntry(); - wilderness->base = HEAP_START; + wilderness->base = (void *) HEAP_START; wilderness->length = HEAP_LENGTH; - wilderneww->next = 0; + wilderness->next = 0; vmm_addToQueue(VMM_HE_HOLE, wilderness); } @@ -55,7 +70,7 @@ int vmm_map1(unsigned int virt, unsigned int physical) if (!(pageTables[pde] & 0x01)) //the page directory entry is not present, we must allocate a page table { u32_t newpagetable; - if (!(newpagetable = mm_palloc()) + if (!(newpagetable = mm_palloc())) return 1; //out of physical memory pageTables[pde] = newpagetable | 0x03; invlpg_(virt); //in case it was cached, so we can fill page table safely @@ -78,6 +93,7 @@ int vmm_mapn(unsigned int virt, unsigned int physical, unsigned int n) physical += 4096; n--; } + return 0; } @@ -119,14 +135,20 @@ int vmm_map_range(void *virt_start, void *virt_end, u32_t phys_start) void *kmalloc(u32_t size) { k_enter_critical(); + k_leave_critical(); + return NULL; } void *vmm_palloc() { k_enter_critical(); + HeapEntry_t *he = heapEntryQueues[VMM_HE_HOLE].start; + he = vmm_followChain(he); + k_leave_critical(); + return NULL; } @@ -163,8 +185,40 @@ void vmm_addToQueue(u32_t queue, HeapEntry_t *he) return; } - // TODO - + switch (queue) + { + HeapEntry_t *otherhe; + case VMM_HE_UNUSED: // don't sort at all, add to end + otherhe = vmm_followChain(heapEntryQueues[queue].start); + otherhe->next = he; + heapEntryQueues[queue].count += vmm_countHeapEntries(he); + break; + case VMM_HE_USED: // sort by base address + otherhe = heapEntryQueues[queue].start; + while (otherhe->next) + { + if (((HeapEntry_t *)otherhe->next)->base > he->base) + break; + otherhe = otherhe->next; + } + he->next = otherhe->next; + otherhe->next = he; + heapEntryQueues[queue].count++; + break; + case VMM_HE_HOLE: // sort by length + case VMM_HE_FREE: + otherhe = heapEntryQueues[queue].start; + while (otherhe->next) + { + if (((HeapEntry_t *)otherhe->next)->length > he->length) + break; + otherhe = otherhe->next; + } + he->next = otherhe->next; + otherhe->next = he; + heapEntryQueues[queue].count++; + break; + } } @@ -175,7 +229,7 @@ int vmm_countHeapEntries(HeapEntry_t *he) while (he) { count++; - he = (HeapEntry_t *)he->link; + he = (HeapEntry_t *)he->next; } return count; } @@ -184,8 +238,8 @@ int vmm_countHeapEntries(HeapEntry_t *he) // This function follows a chain of HeapEntry objects and returns a pointer to the last one HeapEntry_t *vmm_followChain(HeapEntry_t *he) { - while (he->link) - he = (HeapEntry_t *)he->link; + while (he->next) + he = (HeapEntry_t *)he->next; return he; } @@ -195,10 +249,15 @@ HeapEntry_t *vmm_getUnusedEntry() { if (heapEntryQueues[VMM_HE_UNUSED].count < 5) { - HeapEntry_t *wilderness = vmm_followChain( - HeapEntryBlock_t *newHEB; + HeapEntry_t *wilderness = vmm_followChain(heapEntryQueues[VMM_HE_HOLE].start); + wilderness->length -= 4096; + HeapEntryBlock_t *newHEB = wilderness->base + wilderness->length; + vmm_map(newHEB); vmm_heb_init(newHEB); - vmm_addToQueue(VMM_HE_UNUSED, newHEB); + HeapEntry_t *newDesc = vmm_stripUnusedEntry(); + newDesc->base = newHEB; + newDesc->length = 4096; + vmm_addToQueue(VMM_HE_USED, newDesc); } return vmm_stripUnusedEntry(); } @@ -210,6 +269,7 @@ HeapEntry_t *vmm_stripUnusedEntry() HeapEntry_t *he = heapEntryQueues[VMM_HE_UNUSED].start; heapEntryQueues[VMM_HE_UNUSED].start = he->next; heapEntryQueues[VMM_HE_UNUSED].count--; + he->next = 0; return he; } diff --git a/kernel/mm/vmm.h b/kernel/mm/vmm.h index 13fec1c..cd325d0 100644 --- a/kernel/mm/vmm.h +++ b/kernel/mm/vmm.h @@ -38,14 +38,8 @@ typedef struct { void vmm_init(); -u32_t vmm_map(void *virt); -int vmm_map1(unsigned int virt, unsigned int physical); -int vmm_mapn(unsigned int virt, unsigned int physical, unsigned int n); -void vmm_unmap1(unsigned int virt); -void vmm_unmapn(unsigned int virt, unsigned int n); -int vmm_map_range(void *virt_start, void *virt_end, u32_t phys_start); -void vmm_heb_init(HeapEntryBlock_t *heb); -//void *calloc(unsigned int number, unsigned int size); +void *kmalloc(u32_t size); +void *vmm_palloc(); #endif