# HG changeset patch # User John Tsiombikas # Date 1306523350 -10800 # Node ID a2c6110bd24b27e5e605799dfce6cd52a472a589 # Parent 8ea6debe42656d1dfe1e6daec19ccf511efab047# Parent bdd9bf70269d32f4063e90b5007e69f758bd6c76 refactored the code a bit, and merged with the malloc implementation branch diff -r 8ea6debe4265 -r a2c6110bd24b src/intr.c --- a/src/intr.c Sat Apr 23 00:51:18 2011 +0300 +++ b/src/intr.c Fri May 27 22:09:10 2011 +0300 @@ -95,9 +95,6 @@ * setting up the maping of IRQs [0, 15] to interrupts [32, 47] */ init_pic(IRQ_OFFSET); - - /* we're done setting up, enable interrupts before returning */ - enable_intr(); } /* set an interrupt handler function for a particular interrupt */ diff -r 8ea6debe4265 -r a2c6110bd24b src/klibc/malloc.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/klibc/malloc.c Fri May 27 22:09:10 2011 +0300 @@ -0,0 +1,209 @@ +#include +#include "intr.h" +#include "vm.h" +#include "panic.h" + +#define MAGIC 0xbaadbeef + +struct mem_range { + uint32_t start; + size_t size; + struct mem_range *next; +}; + +struct alloc_desc { + size_t size; + uint32_t magic; +}; + +static void add_range(struct mem_range *rng); +static void coalesce(struct mem_range *low, struct mem_range *mid, struct mem_range *high); +static struct mem_range *alloc_node(void); +static void free_node(struct mem_range *node); + +struct mem_range *free_list; +struct mem_range *node_pool; + + +void *malloc(size_t sz) +{ + void *res = 0; + struct mem_range *node, *prev, dummy; + int intr_state; + struct alloc_desc *desc; + size_t alloc_size = sz + sizeof *desc; + + if(!sz) { + return 0; + } + + /* entering the critical section, do not disturb */ + intr_state = get_intr_state(); + disable_intr(); + +find_range: + prev = &dummy; + dummy.next = node = free_list; + while(node) { + /* find a node in the free_list with enough space ... */ + if(node->size >= alloc_size) { + /* insert the allocation descriptor at the beginning */ + desc = (void*)node->start; + desc->size = alloc_size; + desc->magic = MAGIC; + res = desc + 1; /* that's what we'll return to the user */ + + /* modify the node to reflect the new range after we + * grabbed a part at the beginning... + */ + node->size -= alloc_size; + node->start += alloc_size; + + /* if the range represented by this node now has zero size, + * remove and free the node (it goes into the node_pool) + */ + if(!node->size) { + prev->next = node->next; + if(free_list == node) { + free_list = node->next; + } + free_node(node); + } + break; + } + prev = node; + node = node->next; + } + + /* we didn't find a range big enough in the free_list. In that case + * we need to allocate some pages, add them to the free_list and try + * again. + */ + if(!res) { + struct mem_range *range; + int pg, pgcount = (PGSIZE - 1 + alloc_size) / PGSIZE; + + if((pg = pgalloc(pgcount, MEM_KERNEL)) == -1) { + set_intr_state(intr_state); + return 0; + } + + range = alloc_node(); + range->start = PAGE_TO_ADDR(pg); + range->size = pg * PGSIZE; + add_range(range); + goto find_range; + } + + set_intr_state(intr_state); + return res; +} + +void free(void *ptr) +{ + int intr_state; + struct alloc_desc *desc; + struct mem_range *rng; + + if(!ptr) return; + + intr_state = get_intr_state(); + disable_intr(); + + desc = (struct alloc_desc*)ptr - 1; + if(desc->magic != MAGIC) { + panic("free(%x) magic missmatch, invalid address.\n", (unsigned int)ptr); + } + + rng = alloc_node(); + rng->start = (uint32_t)desc; + rng->size = desc->size; + add_range(rng); + + set_intr_state(intr_state); +} + +static void add_range(struct mem_range *rng) +{ + struct mem_range *node, *prev = 0; + + if(!free_list || free_list->start > rng->start) { + rng->next = free_list; + free_list = rng; + + } else { + node = free_list; + + while(node) { + if(!node->next || node->next->start > rng->start) { + rng->next = node->next; + node->next = rng; + prev = node; /* needed by coalesce after the loop */ + break; + } + + prev = node; + node = node->next; + } + } + + coalesce(prev, rng, rng->next); +} + +static void coalesce(struct mem_range *low, struct mem_range *mid, struct mem_range *high) +{ + if(high) { + if(mid->start + mid->size >= high->start) { + mid->size = high->size - mid->start; + mid->next = high->next; + free_node(high); + } + } + + if(low) { + if(low->start + low->size >= mid->start) { + low->size = mid->size - low->start; + low->next = mid->next; + free_node(mid); + } + } +} + +static struct mem_range *alloc_node(void) +{ + struct mem_range *node; + + /* no nodes available for reuse... + * grab a page, slice it into nodes, link them up and hang them in the pool + */ + if(!node_pool) { + int i, num_nodes, pg; + struct mem_range *nodepage; + + pg = pgalloc(1, MEM_KERNEL); + if(pg == -1) { + panic("failed to allocate page for the malloc node pool\n"); + return 0; /* unreachable */ + } + + nodepage = (struct mem_range*)PAGE_TO_ADDR(pg); + num_nodes = PGSIZE / sizeof *nodepage; + + for(i=1; inext; + node->next = 0; + return node; +} + +static void free_node(struct mem_range *node) +{ + node->next = node_pool; + node_pool = node; +} diff -r 8ea6debe4265 -r a2c6110bd24b src/klibc/stdlib.h --- a/src/klibc/stdlib.h Sat Apr 23 00:51:18 2011 +0300 +++ b/src/klibc/stdlib.h Fri May 27 22:09:10 2011 +0300 @@ -12,4 +12,8 @@ void itoa(int val, char *buf, int base); void utoa(unsigned int val, char *buf, int base); +/* defined in malloc.c */ +void *malloc(size_t sz); +void free(void *ptr); + #endif /* STDLIB_H_ */ diff -r 8ea6debe4265 -r a2c6110bd24b src/main.c --- a/src/main.c Sat Apr 23 00:51:18 2011 +0300 +++ b/src/main.c Fri May 27 22:09:10 2011 +0300 @@ -5,6 +5,7 @@ #include "asmops.h" #include "segm.h" #include "intr.h" +#include "mem.h" #include "vm.h" static void do_nothing(); @@ -57,7 +58,13 @@ /* silence the blasted timer interrupt */ interrupt(32, do_nothing); - init_vm(mbinf); + /* initialize the physical memory manager */ + init_mem(mbinf); + /* initialize paging and the virtual memory manager */ + init_vm(); + + /* initialization complete, enable interrupts */ + enable_intr(); dbg_print_vm(MEM_USER); dbg_print_vm(MEM_KERNEL); diff -r 8ea6debe4265 -r a2c6110bd24b src/vm-asm.S --- a/src/vm-asm.S Sat Apr 23 00:51:18 2011 +0300 +++ b/src/vm-asm.S Fri May 27 22:09:10 2011 +0300 @@ -1,6 +1,6 @@ .text /* enable_paging(void) - * sets the cr0 bit 31 which enables page translation */ + * sets bit 31 of cr0 which enables page translation */ .globl enable_paging enable_paging: movl %cr0, %eax @@ -9,7 +9,7 @@ ret /* disable_paging(void) - * clears the cr0 bit 31 */ + * clears bit 31 of cr0 which disables page translation */ .globl disable_paging disable_paging: movl %cr0, %eax @@ -17,6 +17,8 @@ movl %eax, %cr0 ret +/* get_paging_status(void) + * returns 0 if paging is disabled or 1 if it's enabled */ .globl get_paging_status get_paging_status: movl %cr0, %eax diff -r 8ea6debe4265 -r a2c6110bd24b src/vm.c --- a/src/vm.c Sat Apr 23 00:51:18 2011 +0300 +++ b/src/vm.c Fri May 27 22:09:10 2011 +0300 @@ -53,13 +53,10 @@ static struct page_range first_node; -void init_vm(struct mboot_info *mb) +void init_vm(void) { uint32_t idmap_end; - /* initialize the physical memory map and allocator */ - init_mem(mb); - /* setup the page tables */ pgdir = (uint32_t*)alloc_phys_page(); memset(pgdir, 0, PGSIZE); @@ -83,12 +80,12 @@ node_pool = 0; first_node.start = ADDR_TO_PAGE(KMEM_START); - first_node.end = PAGE_COUNT; + first_node.end = ADDR_TO_PAGE(PGTBL_BASE); first_node.next = 0; pglist[MEM_KERNEL] = &first_node; pglist[MEM_USER] = alloc_node(); - pglist[MEM_USER]->start = 0; + pglist[MEM_USER]->start = ADDR_TO_PAGE(idmap_end); pglist[MEM_USER]->end = ADDR_TO_PAGE(KMEM_START); pglist[MEM_USER]->next = 0; } @@ -149,7 +146,7 @@ if(!(pgdir[diridx] & PG_PRESENT)) { goto err; } - pgtbl = (uint32_t*)(pgdir[diridx] & ADDR_PGENT_MASK); + pgtbl = PGTBL(diridx); if(!(pgtbl[pgidx] & PG_PRESENT)) { goto err; @@ -168,18 +165,9 @@ int map_page_range(int vpg_start, int pgcount, int ppg_start, unsigned int attr) { int i, phys_pg; - uint32_t paddr; for(i=0; i