kern

annotate src/proc.c @ 68:0a205396e1a0

- added a generic red-black tree data structure - added a VM map as an red-black tree of vm_pages in the process structure - constructed the vm map of the memory passed by the kernel initially to the first process.
author John Tsiombikas <nuclear@mutantstargoat.com>
date Mon, 10 Oct 2011 04:16:01 +0300
parents f44bec97a0ec
children b45e2d5f0ae1
rev   line source
nuclear@52 1 #include <stdio.h>
nuclear@47 2 #include <string.h>
nuclear@52 3 #include <assert.h>
nuclear@57 4 #include <errno.h>
nuclear@55 5 #include "config.h"
nuclear@42 6 #include "proc.h"
nuclear@42 7 #include "tss.h"
nuclear@45 8 #include "vm.h"
nuclear@47 9 #include "segm.h"
nuclear@47 10 #include "intr.h"
nuclear@47 11 #include "panic.h"
nuclear@51 12 #include "syscall.h"
nuclear@51 13 #include "sched.h"
nuclear@54 14 #include "tss.h"
nuclear@47 15
nuclear@55 16 #define FLAGS_INTR_BIT (1 << 9)
nuclear@47 17
nuclear@54 18 static void start_first_proc(void);
nuclear@54 19
nuclear@55 20 /* defined in proc-asm.S */
nuclear@57 21 uint32_t switch_stack(uint32_t new_stack, uint32_t *old_stack);
nuclear@57 22 void just_forked(void);
nuclear@54 23
nuclear@47 24 /* defined in test_proc.S */
nuclear@47 25 void test_proc(void);
nuclear@47 26 void test_proc_end(void);
nuclear@42 27
nuclear@42 28 static struct process proc[MAX_PROC];
nuclear@56 29
nuclear@56 30 /* cur_pid: pid of the currently executing process.
nuclear@56 31 * when we're in the idle process cur_pid will be 0.
nuclear@56 32 * last_pid: pid of the last real process that was running, this should
nuclear@56 33 * never become 0. Essentially this defines the active kernel stack.
nuclear@56 34 */
nuclear@56 35 static int cur_pid, last_pid;
nuclear@42 36
nuclear@54 37 static struct task_state *tss;
nuclear@54 38
nuclear@54 39
nuclear@42 40 void init_proc(void)
nuclear@42 41 {
nuclear@54 42 int tss_page;
nuclear@51 43
nuclear@54 44 /* allocate a page for the task state segment, to make sure
nuclear@54 45 * it doesn't cross page boundaries
nuclear@54 46 */
nuclear@54 47 if((tss_page = pgalloc(1, MEM_KERNEL)) == -1) {
nuclear@54 48 panic("failed to allocate memory for the task state segment\n");
nuclear@54 49 }
nuclear@55 50 tss = (struct task_state*)PAGE_TO_ADDR(tss_page);
nuclear@54 51
nuclear@54 52 /* the kernel stack segment never changes so we might as well set it now
nuclear@54 53 * the only other thing that we use in the tss is the kernel stack pointer
nuclear@54 54 * which is different for each process, and thus managed by context_switch
nuclear@54 55 */
nuclear@54 56 memset(tss, 0, sizeof *tss);
nuclear@54 57 tss->ss0 = selector(SEGM_KDATA, 0);
nuclear@54 58
nuclear@55 59 set_tss((uint32_t)tss);
nuclear@54 60
nuclear@54 61 /* initialize system call handler (see syscall.c) */
nuclear@51 62 init_syscall();
nuclear@42 63
nuclear@54 64 start_first_proc(); /* XXX never returns */
nuclear@54 65 }
nuclear@54 66
nuclear@54 67 static void start_first_proc(void)
nuclear@54 68 {
nuclear@54 69 struct process *p;
nuclear@54 70 int proc_size_pg, img_start_pg, stack_pg;
nuclear@55 71 uint32_t img_start_addr;
nuclear@54 72 struct intr_frame ifrm;
nuclear@54 73
nuclear@42 74 /* prepare the first process */
nuclear@54 75 p = proc + 1;
nuclear@54 76 p->id = 1;
nuclear@54 77 p->parent = 0; /* no parent for init */
nuclear@42 78
nuclear@55 79 p->ticks_left = TIMESLICE_TICKS;
nuclear@55 80 p->next = p->prev = 0;
nuclear@55 81
nuclear@55 82 /* the first process may keep this existing page table */
nuclear@55 83 p->ctx.pgtbl_paddr = get_pgdir_addr();
nuclear@55 84
nuclear@42 85 /* allocate a chunk of memory for the process image
nuclear@42 86 * and copy the code of test_proc there.
nuclear@42 87 */
nuclear@51 88 proc_size_pg = (test_proc_end - test_proc) / PGSIZE + 1;
nuclear@45 89 if((img_start_pg = pgalloc(proc_size_pg, MEM_USER)) == -1) {
nuclear@45 90 panic("failed to allocate space for the init process image\n");
nuclear@45 91 }
nuclear@54 92 img_start_addr = PAGE_TO_ADDR(img_start_pg);
nuclear@54 93 memcpy((void*)img_start_addr, test_proc, proc_size_pg * PGSIZE);
nuclear@54 94 printf("copied init process at: %x\n", img_start_addr);
nuclear@47 95
nuclear@47 96 /* allocate the first page of the process stack */
nuclear@47 97 stack_pg = ADDR_TO_PAGE(KMEM_START) - 1;
nuclear@47 98 if(pgalloc_vrange(stack_pg, 1) == -1) {
nuclear@47 99 panic("failed to allocate user stack page\n");
nuclear@47 100 }
nuclear@54 101 p->user_stack_pg = stack_pg;
nuclear@52 102
nuclear@54 103 /* allocate a kernel stack for this process */
nuclear@54 104 if((p->kern_stack_pg = pgalloc(KERN_STACK_SIZE / PGSIZE, MEM_KERNEL)) == -1) {
nuclear@54 105 panic("failed to allocate kernel stack for the init process\n");
nuclear@54 106 }
nuclear@54 107 /* when switching from user space to kernel space, the ss0:esp0 from TSS
nuclear@54 108 * will be used to switch to the per-process kernel stack, so we need to
nuclear@54 109 * set it correctly before switching to user space.
nuclear@54 110 * tss->ss0 is already set in init_proc above.
nuclear@54 111 */
nuclear@54 112 tss->esp0 = PAGE_TO_ADDR(p->kern_stack_pg) + KERN_STACK_SIZE;
nuclear@45 113
nuclear@45 114
nuclear@54 115 /* now we need to fill in the fake interrupt stack frame */
nuclear@54 116 memset(&ifrm, 0, sizeof ifrm);
nuclear@54 117 /* after the priviledge switch, this ss:esp will be used in userspace */
nuclear@54 118 ifrm.esp = PAGE_TO_ADDR(stack_pg) + PGSIZE;
nuclear@54 119 ifrm.ss = selector(SEGM_UDATA, 3);
nuclear@54 120 /* instruction pointer at the beginning of the process image */
nuclear@55 121 ifrm.eip = img_start_addr;
nuclear@54 122 ifrm.cs = selector(SEGM_UCODE, 3);
nuclear@54 123 /* make sure the user will run with interrupts enabled */
nuclear@54 124 ifrm.eflags = FLAGS_INTR_BIT;
nuclear@54 125 /* user data selectors should all be the same */
nuclear@54 126 ifrm.ds = ifrm.es = ifrm.fs = ifrm.gs = ifrm.ss;
nuclear@42 127
nuclear@51 128 /* add it to the scheduler queues */
nuclear@55 129 add_proc(p->id);
nuclear@55 130
nuclear@56 131 /* make it current */
nuclear@56 132 set_current_pid(p->id);
nuclear@42 133
nuclear@68 134 /* build the current vm map */
nuclear@68 135 cons_vmmap(&p->vmmap);
nuclear@68 136
nuclear@54 137 /* execute a fake return from interrupt with the fake stack frame */
nuclear@54 138 intr_ret(ifrm);
nuclear@42 139 }
nuclear@42 140
nuclear@57 141 int fork(void)
nuclear@57 142 {
nuclear@57 143 int i, pid;
nuclear@57 144 struct process *p, *parent;
nuclear@57 145
nuclear@57 146 disable_intr();
nuclear@57 147
nuclear@57 148 /* find a free process slot */
nuclear@57 149 /* TODO don't search up to MAX_PROC if uid != 0 */
nuclear@57 150 pid = -1;
nuclear@57 151 for(i=1; i<MAX_PROC; i++) {
nuclear@57 152 if(proc[i].id == 0) {
nuclear@57 153 pid = i;
nuclear@57 154 break;
nuclear@57 155 }
nuclear@57 156 }
nuclear@57 157
nuclear@57 158 if(pid == -1) {
nuclear@57 159 /* process table full */
nuclear@57 160 return -EAGAIN;
nuclear@57 161 }
nuclear@57 162
nuclear@57 163
nuclear@57 164 p = proc + pid;
nuclear@57 165 parent = get_current_proc();
nuclear@57 166
nuclear@57 167 /* allocate a kernel stack for the new process */
nuclear@57 168 if((p->kern_stack_pg = pgalloc(KERN_STACK_SIZE / PGSIZE, MEM_KERNEL)) == -1) {
nuclear@57 169 return -EAGAIN;
nuclear@57 170 }
nuclear@57 171 p->ctx.stack_ptr = PAGE_TO_ADDR(p->kern_stack_pg) + KERN_STACK_SIZE;
nuclear@57 172 /* we need to copy the current interrupt frame to the new kernel stack so
nuclear@57 173 * that the new process will return to the same point as the parent, just
nuclear@57 174 * after the fork syscall.
nuclear@57 175 */
nuclear@57 176 p->ctx.stack_ptr -= sizeof(struct intr_frame);
nuclear@57 177 memcpy((void*)p->ctx.stack_ptr, get_intr_frame(), sizeof(struct intr_frame));
nuclear@57 178 /* child's return from fork returns 0 */
nuclear@57 179 ((struct intr_frame*)p->ctx.stack_ptr)->regs.eax = 0;
nuclear@57 180
nuclear@59 181 /* we also need the address of just_forked in the stack, so that switch_stacks
nuclear@59 182 * called from context_switch, will return to just_forked when we first switch
nuclear@59 183 * to a newly forked process. just_forked then just calls intr_ret to return to
nuclear@59 184 * userspace with the already constructed interrupt frame (see above).
nuclear@59 185 */
nuclear@57 186 p->ctx.stack_ptr -= 4;
nuclear@57 187 *(uint32_t*)p->ctx.stack_ptr = (uint32_t)just_forked;
nuclear@57 188
nuclear@57 189 /* initialize the rest of the process structure */
nuclear@57 190 p->id = pid;
nuclear@57 191 p->parent = parent->id;
nuclear@57 192 p->next = p->prev = 0;
nuclear@57 193
nuclear@57 194 /* will be copied on write */
nuclear@57 195 p->user_stack_pg = parent->user_stack_pg;
nuclear@57 196
nuclear@57 197 p->ctx.pgtbl_paddr = clone_vm(CLONE_COW);
nuclear@57 198
nuclear@57 199 /* done, now let's add it to the scheduler runqueue */
nuclear@57 200 add_proc(p->id);
nuclear@57 201
nuclear@57 202 return pid;
nuclear@57 203 }
nuclear@47 204
nuclear@47 205 void context_switch(int pid)
nuclear@42 206 {
nuclear@56 207 static struct process *prev, *new;
nuclear@49 208
nuclear@55 209 assert(get_intr_state() == 0);
nuclear@56 210 assert(pid > 0);
nuclear@56 211 assert(last_pid > 0);
nuclear@55 212
nuclear@56 213 prev = proc + last_pid;
nuclear@54 214 new = proc + pid;
nuclear@52 215
nuclear@56 216 if(last_pid != pid) {
nuclear@57 217 set_current_pid(new->id);
nuclear@47 218
nuclear@56 219 /* switch to the new process' address space */
nuclear@56 220 set_pgdir_addr(new->ctx.pgtbl_paddr);
nuclear@47 221
nuclear@56 222 /* make sure we'll return to the correct kernel stack next time
nuclear@56 223 * we enter from userspace
nuclear@56 224 */
nuclear@56 225 tss->esp0 = PAGE_TO_ADDR(new->kern_stack_pg) + KERN_STACK_SIZE;
nuclear@57 226
nuclear@57 227 /* push all registers onto the stack before switching stacks */
nuclear@57 228 push_regs();
nuclear@57 229
nuclear@57 230 /* XXX: when switching to newly forked processes this switch_stack call
nuclear@57 231 * WILL NOT RETURN HERE. It will return to just_forked instead. So the
nuclear@57 232 * rest of this function will not run.
nuclear@57 233 */
nuclear@57 234 switch_stack(new->ctx.stack_ptr, &prev->ctx.stack_ptr);
nuclear@57 235
nuclear@57 236 /* restore registers from the new stack */
nuclear@57 237 pop_regs();
nuclear@57 238 } else {
nuclear@57 239 set_current_pid(new->id);
nuclear@56 240 }
nuclear@56 241 }
nuclear@56 242
nuclear@56 243
nuclear@56 244 void set_current_pid(int pid)
nuclear@56 245 {
nuclear@56 246 cur_pid = pid;
nuclear@56 247 if(pid > 0) {
nuclear@56 248 last_pid = pid;
nuclear@56 249 }
nuclear@47 250 }
nuclear@51 251
nuclear@51 252 int get_current_pid(void)
nuclear@51 253 {
nuclear@51 254 return cur_pid;
nuclear@51 255 }
nuclear@51 256
nuclear@51 257 struct process *get_current_proc(void)
nuclear@51 258 {
nuclear@56 259 return cur_pid > 0 ? &proc[cur_pid] : 0;
nuclear@51 260 }
nuclear@51 261
nuclear@51 262 struct process *get_process(int pid)
nuclear@51 263 {
nuclear@51 264 return &proc[pid];
nuclear@51 265 }