kern
diff src/klibc/malloc.c @ 30:a2c6110bd24b
refactored the code a bit, and merged with the malloc implementation branch
author | John Tsiombikas <nuclear@siggraph.org> |
---|---|
date | Fri, 27 May 2011 22:09:10 +0300 |
parents | |
children | 3ed041d07ae1 |
line diff
1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/klibc/malloc.c Fri May 27 22:09:10 2011 +0300 1.3 @@ -0,0 +1,209 @@ 1.4 +#include <stdlib.h> 1.5 +#include "intr.h" 1.6 +#include "vm.h" 1.7 +#include "panic.h" 1.8 + 1.9 +#define MAGIC 0xbaadbeef 1.10 + 1.11 +struct mem_range { 1.12 + uint32_t start; 1.13 + size_t size; 1.14 + struct mem_range *next; 1.15 +}; 1.16 + 1.17 +struct alloc_desc { 1.18 + size_t size; 1.19 + uint32_t magic; 1.20 +}; 1.21 + 1.22 +static void add_range(struct mem_range *rng); 1.23 +static void coalesce(struct mem_range *low, struct mem_range *mid, struct mem_range *high); 1.24 +static struct mem_range *alloc_node(void); 1.25 +static void free_node(struct mem_range *node); 1.26 + 1.27 +struct mem_range *free_list; 1.28 +struct mem_range *node_pool; 1.29 + 1.30 + 1.31 +void *malloc(size_t sz) 1.32 +{ 1.33 + void *res = 0; 1.34 + struct mem_range *node, *prev, dummy; 1.35 + int intr_state; 1.36 + struct alloc_desc *desc; 1.37 + size_t alloc_size = sz + sizeof *desc; 1.38 + 1.39 + if(!sz) { 1.40 + return 0; 1.41 + } 1.42 + 1.43 + /* entering the critical section, do not disturb */ 1.44 + intr_state = get_intr_state(); 1.45 + disable_intr(); 1.46 + 1.47 +find_range: 1.48 + prev = &dummy; 1.49 + dummy.next = node = free_list; 1.50 + while(node) { 1.51 + /* find a node in the free_list with enough space ... */ 1.52 + if(node->size >= alloc_size) { 1.53 + /* insert the allocation descriptor at the beginning */ 1.54 + desc = (void*)node->start; 1.55 + desc->size = alloc_size; 1.56 + desc->magic = MAGIC; 1.57 + res = desc + 1; /* that's what we'll return to the user */ 1.58 + 1.59 + /* modify the node to reflect the new range after we 1.60 + * grabbed a part at the beginning... 1.61 + */ 1.62 + node->size -= alloc_size; 1.63 + node->start += alloc_size; 1.64 + 1.65 + /* if the range represented by this node now has zero size, 1.66 + * remove and free the node (it goes into the node_pool) 1.67 + */ 1.68 + if(!node->size) { 1.69 + prev->next = node->next; 1.70 + if(free_list == node) { 1.71 + free_list = node->next; 1.72 + } 1.73 + free_node(node); 1.74 + } 1.75 + break; 1.76 + } 1.77 + prev = node; 1.78 + node = node->next; 1.79 + } 1.80 + 1.81 + /* we didn't find a range big enough in the free_list. In that case 1.82 + * we need to allocate some pages, add them to the free_list and try 1.83 + * again. 1.84 + */ 1.85 + if(!res) { 1.86 + struct mem_range *range; 1.87 + int pg, pgcount = (PGSIZE - 1 + alloc_size) / PGSIZE; 1.88 + 1.89 + if((pg = pgalloc(pgcount, MEM_KERNEL)) == -1) { 1.90 + set_intr_state(intr_state); 1.91 + return 0; 1.92 + } 1.93 + 1.94 + range = alloc_node(); 1.95 + range->start = PAGE_TO_ADDR(pg); 1.96 + range->size = pg * PGSIZE; 1.97 + add_range(range); 1.98 + goto find_range; 1.99 + } 1.100 + 1.101 + set_intr_state(intr_state); 1.102 + return res; 1.103 +} 1.104 + 1.105 +void free(void *ptr) 1.106 +{ 1.107 + int intr_state; 1.108 + struct alloc_desc *desc; 1.109 + struct mem_range *rng; 1.110 + 1.111 + if(!ptr) return; 1.112 + 1.113 + intr_state = get_intr_state(); 1.114 + disable_intr(); 1.115 + 1.116 + desc = (struct alloc_desc*)ptr - 1; 1.117 + if(desc->magic != MAGIC) { 1.118 + panic("free(%x) magic missmatch, invalid address.\n", (unsigned int)ptr); 1.119 + } 1.120 + 1.121 + rng = alloc_node(); 1.122 + rng->start = (uint32_t)desc; 1.123 + rng->size = desc->size; 1.124 + add_range(rng); 1.125 + 1.126 + set_intr_state(intr_state); 1.127 +} 1.128 + 1.129 +static void add_range(struct mem_range *rng) 1.130 +{ 1.131 + struct mem_range *node, *prev = 0; 1.132 + 1.133 + if(!free_list || free_list->start > rng->start) { 1.134 + rng->next = free_list; 1.135 + free_list = rng; 1.136 + 1.137 + } else { 1.138 + node = free_list; 1.139 + 1.140 + while(node) { 1.141 + if(!node->next || node->next->start > rng->start) { 1.142 + rng->next = node->next; 1.143 + node->next = rng; 1.144 + prev = node; /* needed by coalesce after the loop */ 1.145 + break; 1.146 + } 1.147 + 1.148 + prev = node; 1.149 + node = node->next; 1.150 + } 1.151 + } 1.152 + 1.153 + coalesce(prev, rng, rng->next); 1.154 +} 1.155 + 1.156 +static void coalesce(struct mem_range *low, struct mem_range *mid, struct mem_range *high) 1.157 +{ 1.158 + if(high) { 1.159 + if(mid->start + mid->size >= high->start) { 1.160 + mid->size = high->size - mid->start; 1.161 + mid->next = high->next; 1.162 + free_node(high); 1.163 + } 1.164 + } 1.165 + 1.166 + if(low) { 1.167 + if(low->start + low->size >= mid->start) { 1.168 + low->size = mid->size - low->start; 1.169 + low->next = mid->next; 1.170 + free_node(mid); 1.171 + } 1.172 + } 1.173 +} 1.174 + 1.175 +static struct mem_range *alloc_node(void) 1.176 +{ 1.177 + struct mem_range *node; 1.178 + 1.179 + /* no nodes available for reuse... 1.180 + * grab a page, slice it into nodes, link them up and hang them in the pool 1.181 + */ 1.182 + if(!node_pool) { 1.183 + int i, num_nodes, pg; 1.184 + struct mem_range *nodepage; 1.185 + 1.186 + pg = pgalloc(1, MEM_KERNEL); 1.187 + if(pg == -1) { 1.188 + panic("failed to allocate page for the malloc node pool\n"); 1.189 + return 0; /* unreachable */ 1.190 + } 1.191 + 1.192 + nodepage = (struct mem_range*)PAGE_TO_ADDR(pg); 1.193 + num_nodes = PGSIZE / sizeof *nodepage; 1.194 + 1.195 + for(i=1; i<num_nodes; i++) { 1.196 + nodepage[i - 1].next = nodepage + i; 1.197 + } 1.198 + nodepage[i - 1].next = 0; 1.199 + node_pool = nodepage; 1.200 + } 1.201 + 1.202 + node = node_pool; 1.203 + node_pool = node->next; 1.204 + node->next = 0; 1.205 + return node; 1.206 +} 1.207 + 1.208 +static void free_node(struct mem_range *node) 1.209 +{ 1.210 + node->next = node_pool; 1.211 + node_pool = node; 1.212 +}