Import backup from 2004-07-31
This commit is contained in:
parent
6d23c76e84
commit
07e8a0f4cf
@ -78,29 +78,27 @@ int k_mbsave(mb_info_t *mbinfo, unsigned int mb_magic)
|
|||||||
/* Main kernel initialization routine */
|
/* Main kernel initialization routine */
|
||||||
void k_init()
|
void k_init()
|
||||||
{
|
{
|
||||||
|
k_enter_critical();
|
||||||
mm_init();
|
mm_init();
|
||||||
vmm_init();
|
vmm_init();
|
||||||
if (real_mode_module)
|
if (real_mode_module)
|
||||||
{
|
{
|
||||||
(*(char*)(CONSOLE_MEMORY+40))++;
|
(*(char*)(CONSOLE_MEMORY+40))++;
|
||||||
}
|
}
|
||||||
// u32_t *ptr = malloc(sizeof(u32_t));
|
u32_t *ptr = kmalloc(sizeof(u32_t));
|
||||||
// *ptr = 42;
|
BOCHS_DEBUG(50, (u32_t)ptr);
|
||||||
// while (*ptr == 42);
|
*ptr = 42;
|
||||||
|
while (*ptr == 42);
|
||||||
|
kfree(ptr);
|
||||||
// dev_init();
|
// dev_init();
|
||||||
for (;;)
|
BOCHS_DEBUG(0, 0xdeadbeef);
|
||||||
{
|
criticalCounter--;
|
||||||
(*(char*)CONSOLE_MEMORY)++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void isr(u32_t num)
|
void isr(u32_t num)
|
||||||
{
|
{
|
||||||
criticalCounter++;
|
criticalCounter++;
|
||||||
for (;;)
|
BOCHS_DEBUG(158, 0xbeef1234);
|
||||||
{
|
|
||||||
(*(char*)(CONSOLE_MEMORY+158))++;
|
|
||||||
}
|
|
||||||
switch (num)
|
switch (num)
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -122,3 +120,10 @@ void k_leave_critical()
|
|||||||
enable_ints();
|
enable_ints();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void BOCHS_DEBUG(u32_t pos, u32_t param)
|
||||||
|
{
|
||||||
|
for (;;)
|
||||||
|
{
|
||||||
|
(*(char*)(0xC00B8000+pos))++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -20,5 +20,7 @@ void isr();
|
|||||||
void k_enter_critical(); // functions for implementing "atomic actions"
|
void k_enter_critical(); // functions for implementing "atomic actions"
|
||||||
void k_leave_critical();
|
void k_leave_critical();
|
||||||
|
|
||||||
|
void BOCHS_DEBUG(u32_t pos, u32_t param);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -61,8 +61,8 @@ void mm_init()
|
|||||||
// specified in base for a length of pages pages
|
// specified in base for a length of pages pages
|
||||||
void mm_pfreen(u32_t base, u32_t pages)
|
void mm_pfreen(u32_t base, u32_t pages)
|
||||||
{
|
{
|
||||||
pages = base + (pages << 12); //convert #pages to #bytes
|
//convert pages to max address
|
||||||
for (; base < pages; base += 4096)
|
for (pages = base + (pages << 12); base < pages; base += 4096)
|
||||||
mm_pfree(base);
|
mm_pfree(base);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,8 +84,8 @@ void mm_pfree(u32_t base)
|
|||||||
// specified in base for a length of pages pages
|
// specified in base for a length of pages pages
|
||||||
void mm_preserven(u32_t base, u32_t pages)
|
void mm_preserven(u32_t base, u32_t pages)
|
||||||
{
|
{
|
||||||
pages = base + (pages << 12); //convert #pages to #bytes
|
//convert pages to max address
|
||||||
for (; base < pages; base += 4096)
|
for (pages = base + (pages << 12); base < pages; base += 4096)
|
||||||
mm_preserve(base);
|
mm_preserve(base);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
187
kernel/mm/vmm.c
187
kernel/mm/vmm.c
@ -2,7 +2,7 @@
|
|||||||
// Author: Josh Holtrop
|
// Author: Josh Holtrop
|
||||||
// Date: 09/30/03
|
// Date: 09/30/03
|
||||||
// Rewritten from scratch: 12/23/03
|
// Rewritten from scratch: 12/23/03
|
||||||
// Modified: 07/29/04
|
// Modified: 07/30/04
|
||||||
|
|
||||||
#include "hos_defines.h"
|
#include "hos_defines.h"
|
||||||
#include "kernel.h"
|
#include "kernel.h"
|
||||||
@ -16,6 +16,10 @@ int vmm_mapn(unsigned int virt, unsigned int physical, unsigned int n);
|
|||||||
void vmm_unmap1(unsigned int virt);
|
void vmm_unmap1(unsigned int virt);
|
||||||
void vmm_unmapn(unsigned int virt, unsigned int n);
|
void vmm_unmapn(unsigned int virt, unsigned int n);
|
||||||
int vmm_map_range(void *virt_start, void *virt_end, u32_t phys_start);
|
int vmm_map_range(void *virt_start, void *virt_end, u32_t phys_start);
|
||||||
|
void *vmm_getFreeChunk(u32_t size);
|
||||||
|
void vmm_removeHeapEntry(u32_t queue, HeapEntry_t *he);
|
||||||
|
int vmm_moreCore(u32_t size);
|
||||||
|
int vmm_coalesceEntry(u32_t queue, HeapEntry_t *newHE);
|
||||||
void vmm_heb_init(HeapEntryBlock_t *heb);
|
void vmm_heb_init(HeapEntryBlock_t *heb);
|
||||||
void vmm_addToQueue(u32_t queue, HeapEntry_t *preceeding, HeapEntry_t *he);
|
void vmm_addToQueue(u32_t queue, HeapEntry_t *preceeding, HeapEntry_t *he);
|
||||||
int vmm_countHeapEntries(HeapEntry_t *he);
|
int vmm_countHeapEntries(HeapEntry_t *he);
|
||||||
@ -23,8 +27,6 @@ HeapEntry_t *vmm_followChain(HeapEntry_t *he);
|
|||||||
HeapEntry_t *vmm_getUnusedEntry();
|
HeapEntry_t *vmm_getUnusedEntry();
|
||||||
HeapEntry_t *vmm_stripUnusedEntry();
|
HeapEntry_t *vmm_stripUnusedEntry();
|
||||||
|
|
||||||
//void *calloc(unsigned int number, unsigned int size);
|
|
||||||
|
|
||||||
extern mb_info_t mb_info_block;
|
extern mb_info_t mb_info_block;
|
||||||
extern mb_module_t mb_modules[MAX_MODULES];
|
extern mb_module_t mb_modules[MAX_MODULES];
|
||||||
extern u32_t mm_freepages;
|
extern u32_t mm_freepages;
|
||||||
@ -144,9 +146,22 @@ int vmm_map_range(void *virt_start, void *virt_end, u32_t phys_start)
|
|||||||
void *kmalloc(u32_t size)
|
void *kmalloc(u32_t size)
|
||||||
{
|
{
|
||||||
k_enter_critical();
|
k_enter_critical();
|
||||||
|
if (size % VMM_MALLOC_GRANULARITY)
|
||||||
|
size = size + VMM_MALLOC_GRANULARITY - (size % VMM_MALLOC_GRANULARITY);
|
||||||
|
void *attempt = vmm_getFreeChunk(size);
|
||||||
|
if (attempt)
|
||||||
|
{
|
||||||
|
k_leave_critical();
|
||||||
|
return attempt;
|
||||||
|
}
|
||||||
|
if (vmm_moreCore(size))
|
||||||
|
{
|
||||||
|
k_leave_critical();
|
||||||
|
return NULL; //we could not get any more heap memory
|
||||||
|
}
|
||||||
|
attempt = vmm_getFreeChunk(size);
|
||||||
k_leave_critical();
|
k_leave_critical();
|
||||||
return NULL;
|
return attempt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -154,7 +169,20 @@ void *kmalloc(u32_t size)
|
|||||||
int kfree(void *addr)
|
int kfree(void *addr)
|
||||||
{
|
{
|
||||||
k_enter_critical();
|
k_enter_critical();
|
||||||
|
HeapEntry_t *he = heapEntryQueues[VMM_HE_USED].head->next;
|
||||||
|
while (he->next)
|
||||||
|
{
|
||||||
|
if (he->base == addr)
|
||||||
|
{
|
||||||
|
vmm_removeHeapEntry(VMM_HE_USED, he);
|
||||||
|
if (vmm_coalesceEntry(VMM_HE_FREE, he))
|
||||||
|
vmm_addToQueue(VMM_HE_FREE, heapEntryQueues[VMM_HE_FREE].head, he);
|
||||||
|
else
|
||||||
|
vmm_addToQueue(VMM_HE_UNUSED, heapEntryQueues[VMM_HE_UNUSED].head, he);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
he = (HeapEntry_t *)he->next;
|
||||||
|
}
|
||||||
k_leave_critical();
|
k_leave_critical();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -170,11 +198,7 @@ void *vmm_palloc()
|
|||||||
{
|
{
|
||||||
if (he->length == 4096)
|
if (he->length == 4096)
|
||||||
{
|
{
|
||||||
((HeapEntry_t *)he->prev)->next = he->next;
|
vmm_removeHeapEntry(VMM_HE_HOLE, he);
|
||||||
((HeapEntry_t *)he->next)->prev = he->prev;
|
|
||||||
heapEntryQueues[VMM_HE_HOLE].count--;
|
|
||||||
he->next = NULL;
|
|
||||||
he->prev = NULL;
|
|
||||||
vmm_addToQueue(VMM_HE_USED, &heapEntryHeadNodes[VMM_HE_USED], he);
|
vmm_addToQueue(VMM_HE_USED, &heapEntryHeadNodes[VMM_HE_USED], he);
|
||||||
vmm_map(he->base);
|
vmm_map(he->base);
|
||||||
k_leave_critical();
|
k_leave_critical();
|
||||||
@ -228,17 +252,150 @@ int vmm_pfree(void *addr)
|
|||||||
|
|
||||||
// This function allocates and zeros memory for the given number of objects,
|
// This function allocates and zeros memory for the given number of objects,
|
||||||
// given the size of each object
|
// given the size of each object
|
||||||
/*
|
void *kcalloc(u32_t number, u32_t size)
|
||||||
void *calloc(u32_t number, u32_t size)
|
|
||||||
{
|
{
|
||||||
void *mem = malloc(number * size);
|
void *mem = kmalloc(number * size);
|
||||||
if (!mem)
|
if (!mem)
|
||||||
return NULL; //could not get memory
|
return NULL; //could not get memory
|
||||||
memset(mem, 0, number * size);
|
memset(mem, 0, number * size);
|
||||||
return mem;
|
return mem;
|
||||||
}*/
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// This function re-allocates memory already allocated, preserving the old contents
|
||||||
|
// (as long as newSize is greater than oldSize)
|
||||||
|
void *krealloc(void *orig, unsigned int newSize)
|
||||||
|
{
|
||||||
|
void *newMem;
|
||||||
|
if ((newMem = kmalloc(newSize)))
|
||||||
|
{
|
||||||
|
HeapEntry_t *he = heapEntryQueues[VMM_HE_USED].head->next;
|
||||||
|
while (he->next)
|
||||||
|
{
|
||||||
|
if (he->base == orig)
|
||||||
|
{
|
||||||
|
memcpy(newMem, orig, (he->length < newSize ? he->length : newSize));
|
||||||
|
kfree(orig);
|
||||||
|
return newMem;
|
||||||
|
}
|
||||||
|
he = (HeapEntry_t *)he->next;
|
||||||
|
}
|
||||||
|
kfree(newMem);
|
||||||
|
return NULL; // base address not found
|
||||||
|
}
|
||||||
|
else
|
||||||
|
return NULL; // could not get mem for new chunk
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// This function returns the base address of a free chunk of virtual memory - called from kmalloc()
|
||||||
|
void *vmm_getFreeChunk(u32_t size)
|
||||||
|
{
|
||||||
|
HeapEntry_t *he = heapEntryQueues[VMM_HE_FREE].head->next;
|
||||||
|
HeapEntry_t *good = NULL;
|
||||||
|
while (he->next) // he is not the tail node
|
||||||
|
{
|
||||||
|
if (he->length == size)
|
||||||
|
{
|
||||||
|
vmm_removeHeapEntry(VMM_HE_FREE, he);
|
||||||
|
vmm_addToQueue(VMM_HE_USED, heapEntryQueues[VMM_HE_USED].head, he);
|
||||||
|
return he->base;
|
||||||
|
}
|
||||||
|
if (good)
|
||||||
|
{
|
||||||
|
if ((he->length > size) && (he->length < good->length))
|
||||||
|
good = he;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (he->length > size)
|
||||||
|
good = he;
|
||||||
|
}
|
||||||
|
he = (HeapEntry_t *)he->next;
|
||||||
|
}
|
||||||
|
if (good)
|
||||||
|
{
|
||||||
|
HeapEntry_t *newHE = vmm_getUnusedEntry();
|
||||||
|
newHE->base = good->base;
|
||||||
|
newHE->length = size;
|
||||||
|
good->base += size;
|
||||||
|
good->length -= size;
|
||||||
|
vmm_addToQueue(VMM_HE_USED, heapEntryQueues[VMM_HE_USED].head, newHE);
|
||||||
|
return newHE->base;
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// This function retrieves more core memory for the virtual memory allocator to allocate
|
||||||
|
int vmm_moreCore(u32_t size)
|
||||||
|
{
|
||||||
|
int pages = (size >> 12) + 2;
|
||||||
|
size = pages << 12;
|
||||||
|
if ((mm_freepages - 5) < pages)
|
||||||
|
return -1; // out of physical memory
|
||||||
|
HeapEntry_t *he = heapEntryQueues[VMM_HE_HOLE].head->next;
|
||||||
|
HeapEntry_t *wilderness = he;
|
||||||
|
while (he->next)
|
||||||
|
{
|
||||||
|
if (he->length > wilderness->length)
|
||||||
|
wilderness = he;
|
||||||
|
he = (HeapEntry_t *)he->next;
|
||||||
|
}
|
||||||
|
if (wilderness->length <= size)
|
||||||
|
return -2; // out of virtual memory
|
||||||
|
int i;
|
||||||
|
void *virt = wilderness->base;
|
||||||
|
for (i = 0; i < pages; i++)
|
||||||
|
{
|
||||||
|
vmm_map(virt);
|
||||||
|
virt += 4096;
|
||||||
|
}
|
||||||
|
HeapEntry_t *newHE = vmm_getUnusedEntry();
|
||||||
|
newHE->base = wilderness->base;
|
||||||
|
newHE->length = size;
|
||||||
|
wilderness->base += size;
|
||||||
|
wilderness->length -= size;
|
||||||
|
if (vmm_coalesceEntry(VMM_HE_FREE, newHE)) // returns 0 on success (coalesced into previous entry)
|
||||||
|
vmm_addToQueue(VMM_HE_FREE, heapEntryQueues[VMM_HE_FREE].head, newHE);
|
||||||
|
else
|
||||||
|
vmm_addToQueue(VMM_HE_UNUSED, heapEntryQueues[VMM_HE_UNUSED].head, newHE);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// This function coalesces to heap entries into one
|
||||||
|
int vmm_coalesceEntry(u32_t queue, HeapEntry_t *newHE)
|
||||||
|
{
|
||||||
|
HeapEntry_t *existing = heapEntryQueues[queue].head->next;
|
||||||
|
while (existing->next)
|
||||||
|
{
|
||||||
|
if ((existing->base + existing->length) == newHE->base)
|
||||||
|
{
|
||||||
|
existing->length += newHE->length;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
else if ((newHE->base + newHE->length) == existing->base)
|
||||||
|
{
|
||||||
|
existing->base -= newHE->length;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
existing = (HeapEntry_t *)existing->next;
|
||||||
|
}
|
||||||
|
return -1; // an entry to coalesce with was not found
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// This function removes a heap entry from a queue
|
||||||
|
void vmm_removeHeapEntry(u32_t queue, HeapEntry_t *he)
|
||||||
|
{
|
||||||
|
((HeapEntry_t *)he->prev)->next = he->next;
|
||||||
|
((HeapEntry_t *)he->next)->prev = he->prev;
|
||||||
|
heapEntryQueues[queue].count--;
|
||||||
|
he->next = NULL;
|
||||||
|
he->prev = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// This function initialzes a Heap Entry Block to entries linked together
|
// This function initialzes a Heap Entry Block to entries linked together
|
||||||
void vmm_heb_init(HeapEntryBlock_t *heb)
|
void vmm_heb_init(HeapEntryBlock_t *heb)
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
// Author: Josh Holtrop
|
// Author: Josh Holtrop
|
||||||
// Date: 09/30/03
|
// Date: 09/30/03
|
||||||
// Rewritten from scratch: 12/23/03, 07/13/04
|
// Rewritten from scratch: 12/23/03, 07/13/04
|
||||||
// Modified: 07/15/04
|
// Modified: 07/30/04
|
||||||
|
|
||||||
#ifndef __HOS_VMM__
|
#ifndef __HOS_VMM__
|
||||||
#define __HOS_VMM__ __HOS_VMM__
|
#define __HOS_VMM__ __HOS_VMM__
|
||||||
@ -42,6 +42,7 @@ void *kmalloc(u32_t size);
|
|||||||
int kfree(void *addr);
|
int kfree(void *addr);
|
||||||
void *vmm_palloc();
|
void *vmm_palloc();
|
||||||
int vmm_pfree(void *addr);
|
int vmm_pfree(void *addr);
|
||||||
|
void *kcalloc(unsigned int number, unsigned int size);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -0,0 +1,2 @@
|
|||||||
|
clean:
|
||||||
|
- rm *~ *.o *.bin *.lst
|
Loading…
x
Reference in New Issue
Block a user