rev |
line source |
nuclear@52
|
1 #include <stdio.h>
|
nuclear@47
|
2 #include <string.h>
|
nuclear@52
|
3 #include <assert.h>
|
nuclear@57
|
4 #include <errno.h>
|
nuclear@55
|
5 #include "config.h"
|
nuclear@42
|
6 #include "proc.h"
|
nuclear@42
|
7 #include "tss.h"
|
nuclear@45
|
8 #include "vm.h"
|
nuclear@47
|
9 #include "segm.h"
|
nuclear@47
|
10 #include "intr.h"
|
nuclear@47
|
11 #include "panic.h"
|
nuclear@51
|
12 #include "syscall.h"
|
nuclear@51
|
13 #include "sched.h"
|
nuclear@54
|
14 #include "tss.h"
|
nuclear@72
|
15 #include "kdef.h"
|
nuclear@47
|
16
|
nuclear@55
|
17 #define FLAGS_INTR_BIT (1 << 9)
|
nuclear@47
|
18
|
nuclear@54
|
19 static void start_first_proc(void);
|
nuclear@54
|
20
|
nuclear@55
|
21 /* defined in proc-asm.S */
|
nuclear@57
|
22 uint32_t switch_stack(uint32_t new_stack, uint32_t *old_stack);
|
nuclear@57
|
23 void just_forked(void);
|
nuclear@54
|
24
|
nuclear@47
|
25 /* defined in test_proc.S */
|
nuclear@47
|
26 void test_proc(void);
|
nuclear@47
|
27 void test_proc_end(void);
|
nuclear@42
|
28
|
nuclear@42
|
29 static struct process proc[MAX_PROC];
|
nuclear@56
|
30
|
nuclear@56
|
31 /* cur_pid: pid of the currently executing process.
|
nuclear@56
|
32 * when we're in the idle process cur_pid will be 0.
|
nuclear@56
|
33 * last_pid: pid of the last real process that was running, this should
|
nuclear@56
|
34 * never become 0. Essentially this defines the active kernel stack.
|
nuclear@56
|
35 */
|
nuclear@56
|
36 static int cur_pid, last_pid;
|
nuclear@42
|
37
|
nuclear@54
|
38 static struct task_state *tss;
|
nuclear@54
|
39
|
nuclear@54
|
40
|
nuclear@42
|
41 void init_proc(void)
|
nuclear@42
|
42 {
|
nuclear@54
|
43 int tss_page;
|
nuclear@51
|
44
|
nuclear@54
|
45 /* allocate a page for the task state segment, to make sure
|
nuclear@54
|
46 * it doesn't cross page boundaries
|
nuclear@54
|
47 */
|
nuclear@54
|
48 if((tss_page = pgalloc(1, MEM_KERNEL)) == -1) {
|
nuclear@54
|
49 panic("failed to allocate memory for the task state segment\n");
|
nuclear@54
|
50 }
|
nuclear@55
|
51 tss = (struct task_state*)PAGE_TO_ADDR(tss_page);
|
nuclear@54
|
52
|
nuclear@54
|
53 /* the kernel stack segment never changes so we might as well set it now
|
nuclear@54
|
54 * the only other thing that we use in the tss is the kernel stack pointer
|
nuclear@54
|
55 * which is different for each process, and thus managed by context_switch
|
nuclear@54
|
56 */
|
nuclear@54
|
57 memset(tss, 0, sizeof *tss);
|
nuclear@54
|
58 tss->ss0 = selector(SEGM_KDATA, 0);
|
nuclear@54
|
59
|
nuclear@55
|
60 set_tss((uint32_t)tss);
|
nuclear@54
|
61
|
nuclear@54
|
62 /* initialize system call handler (see syscall.c) */
|
nuclear@51
|
63 init_syscall();
|
nuclear@42
|
64
|
nuclear@54
|
65 start_first_proc(); /* XXX never returns */
|
nuclear@54
|
66 }
|
nuclear@54
|
67
|
nuclear@54
|
68 static void start_first_proc(void)
|
nuclear@54
|
69 {
|
nuclear@54
|
70 struct process *p;
|
nuclear@54
|
71 int proc_size_pg, img_start_pg, stack_pg;
|
nuclear@55
|
72 uint32_t img_start_addr;
|
nuclear@54
|
73 struct intr_frame ifrm;
|
nuclear@54
|
74
|
nuclear@42
|
75 /* prepare the first process */
|
nuclear@54
|
76 p = proc + 1;
|
nuclear@54
|
77 p->id = 1;
|
nuclear@54
|
78 p->parent = 0; /* no parent for init */
|
nuclear@42
|
79
|
nuclear@96
|
80 p->umask = 022;
|
nuclear@96
|
81
|
nuclear@55
|
82 p->ticks_left = TIMESLICE_TICKS;
|
nuclear@55
|
83 p->next = p->prev = 0;
|
nuclear@55
|
84
|
nuclear@55
|
85 /* the first process may keep this existing page table */
|
nuclear@55
|
86 p->ctx.pgtbl_paddr = get_pgdir_addr();
|
nuclear@55
|
87
|
nuclear@42
|
88 /* allocate a chunk of memory for the process image
|
nuclear@42
|
89 * and copy the code of test_proc there.
|
nuclear@42
|
90 */
|
nuclear@51
|
91 proc_size_pg = (test_proc_end - test_proc) / PGSIZE + 1;
|
nuclear@45
|
92 if((img_start_pg = pgalloc(proc_size_pg, MEM_USER)) == -1) {
|
nuclear@45
|
93 panic("failed to allocate space for the init process image\n");
|
nuclear@45
|
94 }
|
nuclear@54
|
95 img_start_addr = PAGE_TO_ADDR(img_start_pg);
|
nuclear@54
|
96 memcpy((void*)img_start_addr, test_proc, proc_size_pg * PGSIZE);
|
nuclear@54
|
97 printf("copied init process at: %x\n", img_start_addr);
|
nuclear@47
|
98
|
nuclear@69
|
99 /* allocate the first page of the user stack */
|
nuclear@47
|
100 stack_pg = ADDR_TO_PAGE(KMEM_START) - 1;
|
nuclear@47
|
101 if(pgalloc_vrange(stack_pg, 1) == -1) {
|
nuclear@47
|
102 panic("failed to allocate user stack page\n");
|
nuclear@47
|
103 }
|
nuclear@54
|
104 p->user_stack_pg = stack_pg;
|
nuclear@52
|
105
|
nuclear@54
|
106 /* allocate a kernel stack for this process */
|
nuclear@54
|
107 if((p->kern_stack_pg = pgalloc(KERN_STACK_SIZE / PGSIZE, MEM_KERNEL)) == -1) {
|
nuclear@54
|
108 panic("failed to allocate kernel stack for the init process\n");
|
nuclear@54
|
109 }
|
nuclear@54
|
110 /* when switching from user space to kernel space, the ss0:esp0 from TSS
|
nuclear@54
|
111 * will be used to switch to the per-process kernel stack, so we need to
|
nuclear@54
|
112 * set it correctly before switching to user space.
|
nuclear@54
|
113 * tss->ss0 is already set in init_proc above.
|
nuclear@54
|
114 */
|
nuclear@54
|
115 tss->esp0 = PAGE_TO_ADDR(p->kern_stack_pg) + KERN_STACK_SIZE;
|
nuclear@45
|
116
|
nuclear@45
|
117
|
nuclear@54
|
118 /* now we need to fill in the fake interrupt stack frame */
|
nuclear@54
|
119 memset(&ifrm, 0, sizeof ifrm);
|
nuclear@54
|
120 /* after the priviledge switch, this ss:esp will be used in userspace */
|
nuclear@54
|
121 ifrm.esp = PAGE_TO_ADDR(stack_pg) + PGSIZE;
|
nuclear@54
|
122 ifrm.ss = selector(SEGM_UDATA, 3);
|
nuclear@54
|
123 /* instruction pointer at the beginning of the process image */
|
nuclear@55
|
124 ifrm.eip = img_start_addr;
|
nuclear@54
|
125 ifrm.cs = selector(SEGM_UCODE, 3);
|
nuclear@54
|
126 /* make sure the user will run with interrupts enabled */
|
nuclear@54
|
127 ifrm.eflags = FLAGS_INTR_BIT;
|
nuclear@54
|
128 /* user data selectors should all be the same */
|
nuclear@54
|
129 ifrm.ds = ifrm.es = ifrm.fs = ifrm.gs = ifrm.ss;
|
nuclear@42
|
130
|
nuclear@51
|
131 /* add it to the scheduler queues */
|
nuclear@55
|
132 add_proc(p->id);
|
nuclear@55
|
133
|
nuclear@56
|
134 /* make it current */
|
nuclear@56
|
135 set_current_pid(p->id);
|
nuclear@42
|
136
|
nuclear@68
|
137 /* build the current vm map */
|
nuclear@68
|
138 cons_vmmap(&p->vmmap);
|
nuclear@68
|
139
|
nuclear@54
|
140 /* execute a fake return from interrupt with the fake stack frame */
|
nuclear@54
|
141 intr_ret(ifrm);
|
nuclear@42
|
142 }
|
nuclear@42
|
143
|
nuclear@72
|
144 int sys_fork(void)
|
nuclear@57
|
145 {
|
nuclear@57
|
146 int i, pid;
|
nuclear@57
|
147 struct process *p, *parent;
|
nuclear@57
|
148
|
nuclear@57
|
149 disable_intr();
|
nuclear@57
|
150
|
nuclear@57
|
151 /* find a free process slot */
|
nuclear@57
|
152 /* TODO don't search up to MAX_PROC if uid != 0 */
|
nuclear@57
|
153 pid = -1;
|
nuclear@57
|
154 for(i=1; i<MAX_PROC; i++) {
|
nuclear@57
|
155 if(proc[i].id == 0) {
|
nuclear@57
|
156 pid = i;
|
nuclear@57
|
157 break;
|
nuclear@57
|
158 }
|
nuclear@57
|
159 }
|
nuclear@57
|
160
|
nuclear@57
|
161 if(pid == -1) {
|
nuclear@57
|
162 /* process table full */
|
nuclear@57
|
163 return -EAGAIN;
|
nuclear@57
|
164 }
|
nuclear@57
|
165
|
nuclear@57
|
166
|
nuclear@57
|
167 p = proc + pid;
|
nuclear@57
|
168 parent = get_current_proc();
|
nuclear@57
|
169
|
nuclear@90
|
170 /* copy file table */
|
nuclear@90
|
171 memcpy(p->files, parent->files, sizeof p->files);
|
nuclear@90
|
172
|
nuclear@96
|
173 p->umask = parent->umask;
|
nuclear@96
|
174
|
nuclear@57
|
175 /* allocate a kernel stack for the new process */
|
nuclear@57
|
176 if((p->kern_stack_pg = pgalloc(KERN_STACK_SIZE / PGSIZE, MEM_KERNEL)) == -1) {
|
nuclear@57
|
177 return -EAGAIN;
|
nuclear@57
|
178 }
|
nuclear@57
|
179 p->ctx.stack_ptr = PAGE_TO_ADDR(p->kern_stack_pg) + KERN_STACK_SIZE;
|
nuclear@57
|
180 /* we need to copy the current interrupt frame to the new kernel stack so
|
nuclear@57
|
181 * that the new process will return to the same point as the parent, just
|
nuclear@57
|
182 * after the fork syscall.
|
nuclear@57
|
183 */
|
nuclear@57
|
184 p->ctx.stack_ptr -= sizeof(struct intr_frame);
|
nuclear@57
|
185 memcpy((void*)p->ctx.stack_ptr, get_intr_frame(), sizeof(struct intr_frame));
|
nuclear@57
|
186 /* child's return from fork returns 0 */
|
nuclear@57
|
187 ((struct intr_frame*)p->ctx.stack_ptr)->regs.eax = 0;
|
nuclear@57
|
188
|
nuclear@59
|
189 /* we also need the address of just_forked in the stack, so that switch_stacks
|
nuclear@59
|
190 * called from context_switch, will return to just_forked when we first switch
|
nuclear@59
|
191 * to a newly forked process. just_forked then just calls intr_ret to return to
|
nuclear@59
|
192 * userspace with the already constructed interrupt frame (see above).
|
nuclear@59
|
193 */
|
nuclear@57
|
194 p->ctx.stack_ptr -= 4;
|
nuclear@57
|
195 *(uint32_t*)p->ctx.stack_ptr = (uint32_t)just_forked;
|
nuclear@57
|
196
|
nuclear@57
|
197 /* initialize the rest of the process structure */
|
nuclear@57
|
198 p->id = pid;
|
nuclear@57
|
199 p->parent = parent->id;
|
nuclear@72
|
200 p->child_list = 0;
|
nuclear@57
|
201 p->next = p->prev = 0;
|
nuclear@57
|
202
|
nuclear@72
|
203 /* add to the child list */
|
nuclear@72
|
204 p->sib_next = parent->child_list;
|
nuclear@72
|
205 parent->child_list = p;
|
nuclear@72
|
206
|
nuclear@57
|
207 /* will be copied on write */
|
nuclear@57
|
208 p->user_stack_pg = parent->user_stack_pg;
|
nuclear@57
|
209
|
nuclear@69
|
210 /* clone the parent's virtual memory */
|
nuclear@69
|
211 clone_vm(p, parent, CLONE_COW);
|
nuclear@57
|
212
|
nuclear@57
|
213 /* done, now let's add it to the scheduler runqueue */
|
nuclear@57
|
214 add_proc(p->id);
|
nuclear@57
|
215
|
nuclear@57
|
216 return pid;
|
nuclear@57
|
217 }
|
nuclear@47
|
218
|
nuclear@72
|
219 int sys_exit(int status)
|
nuclear@72
|
220 {
|
nuclear@72
|
221 struct process *p, *child;
|
nuclear@72
|
222
|
nuclear@72
|
223 p = get_current_proc();
|
nuclear@72
|
224
|
nuclear@75
|
225 printf("process %d exit(%d)\n", p->id, status);
|
nuclear@75
|
226
|
nuclear@72
|
227 /* TODO deliver SIGCHLD to the parent */
|
nuclear@72
|
228
|
nuclear@72
|
229 /* find any child processes and make init adopt them */
|
nuclear@72
|
230 child = p->child_list;
|
nuclear@72
|
231 while(child) {
|
nuclear@72
|
232 child->parent = 1;
|
nuclear@72
|
233 child = child->sib_next;
|
nuclear@72
|
234 }
|
nuclear@72
|
235
|
nuclear@72
|
236 cleanup_vm(p);
|
nuclear@72
|
237
|
nuclear@72
|
238 /* remove it from the runqueue */
|
nuclear@72
|
239 remove_proc(p->id);
|
nuclear@72
|
240
|
nuclear@72
|
241 /* make it a zombie until its parent reaps it */
|
nuclear@72
|
242 p->state = STATE_ZOMBIE;
|
nuclear@72
|
243 p->exit_status = (status & _WSTATUS_MASK) | (_WREASON_EXITED << _WREASON_SHIFT);
|
nuclear@72
|
244
|
nuclear@72
|
245 /* wakeup any processes waiting for it
|
nuclear@72
|
246 * we're waking up the parent's address, because waitpid waits
|
nuclear@72
|
247 * on it's own process struct, not knowing which child will die
|
nuclear@72
|
248 * first.
|
nuclear@72
|
249 */
|
nuclear@72
|
250 wakeup(get_process(p->parent));
|
nuclear@72
|
251 return 0;
|
nuclear@72
|
252 }
|
nuclear@72
|
253
|
nuclear@72
|
254 int sys_waitpid(int pid, int *status, int opt)
|
nuclear@72
|
255 {
|
nuclear@72
|
256 struct process *p, *child;
|
nuclear@72
|
257
|
nuclear@72
|
258 p = get_current_proc();
|
nuclear@72
|
259
|
nuclear@72
|
260 restart:
|
nuclear@72
|
261 if(pid <= 0) {
|
nuclear@72
|
262 /* search for zombie children */
|
nuclear@72
|
263 child = p->child_list;
|
nuclear@72
|
264 while(child) {
|
nuclear@72
|
265 if(child->state == STATE_ZOMBIE) {
|
nuclear@72
|
266 break;
|
nuclear@72
|
267 }
|
nuclear@72
|
268 child = child->sib_next;
|
nuclear@72
|
269 }
|
nuclear@72
|
270 } else {
|
nuclear@72
|
271 if(!(child = get_process(pid)) || child->parent != p->id) {
|
nuclear@72
|
272 return -ECHILD;
|
nuclear@72
|
273 }
|
nuclear@72
|
274 if(child->state != STATE_ZOMBIE) {
|
nuclear@72
|
275 child = 0;
|
nuclear@72
|
276 }
|
nuclear@72
|
277 }
|
nuclear@72
|
278
|
nuclear@72
|
279 /* found ? */
|
nuclear@72
|
280 if(child) {
|
nuclear@72
|
281 int res;
|
nuclear@72
|
282 struct process *prev, dummy;
|
nuclear@72
|
283
|
nuclear@72
|
284 if(status) {
|
nuclear@72
|
285 *status = child->exit_status;
|
nuclear@72
|
286 }
|
nuclear@72
|
287 res = child->id;
|
nuclear@72
|
288
|
nuclear@72
|
289 /* remove it from our children list */
|
nuclear@72
|
290 dummy.sib_next = p->child_list;
|
nuclear@72
|
291 prev = &dummy;
|
nuclear@72
|
292 while(prev->next) {
|
nuclear@72
|
293 if(prev->next == child) {
|
nuclear@72
|
294 prev->next = child->next;
|
nuclear@72
|
295 break;
|
nuclear@72
|
296 }
|
nuclear@72
|
297 }
|
nuclear@72
|
298 p->child_list = dummy.next;
|
nuclear@72
|
299
|
nuclear@72
|
300 /* invalidate the id */
|
nuclear@72
|
301 child->id = 0;
|
nuclear@72
|
302 return res;
|
nuclear@72
|
303 }
|
nuclear@72
|
304
|
nuclear@72
|
305 /* not found, wait or sod off */
|
nuclear@72
|
306 if(!(opt & WNOHANG)) {
|
nuclear@72
|
307 /* wait on our own process struct because
|
nuclear@72
|
308 * we have no way of knowing which child will
|
nuclear@72
|
309 * die first.
|
nuclear@72
|
310 * exit will wakeup the parent structure...
|
nuclear@72
|
311 */
|
nuclear@72
|
312 wait(p);
|
nuclear@72
|
313 /* done waiting, restart waitpid */
|
nuclear@72
|
314 goto restart;
|
nuclear@72
|
315 }
|
nuclear@72
|
316
|
nuclear@72
|
317 return 0; /* he's not dead jim */
|
nuclear@72
|
318 }
|
nuclear@72
|
319
|
nuclear@47
|
320 void context_switch(int pid)
|
nuclear@42
|
321 {
|
nuclear@56
|
322 static struct process *prev, *new;
|
nuclear@49
|
323
|
nuclear@55
|
324 assert(get_intr_state() == 0);
|
nuclear@56
|
325 assert(pid > 0);
|
nuclear@56
|
326 assert(last_pid > 0);
|
nuclear@55
|
327
|
nuclear@56
|
328 prev = proc + last_pid;
|
nuclear@54
|
329 new = proc + pid;
|
nuclear@52
|
330
|
nuclear@56
|
331 if(last_pid != pid) {
|
nuclear@57
|
332 set_current_pid(new->id);
|
nuclear@47
|
333
|
nuclear@56
|
334 /* switch to the new process' address space */
|
nuclear@56
|
335 set_pgdir_addr(new->ctx.pgtbl_paddr);
|
nuclear@47
|
336
|
nuclear@56
|
337 /* make sure we'll return to the correct kernel stack next time
|
nuclear@56
|
338 * we enter from userspace
|
nuclear@56
|
339 */
|
nuclear@56
|
340 tss->esp0 = PAGE_TO_ADDR(new->kern_stack_pg) + KERN_STACK_SIZE;
|
nuclear@57
|
341
|
nuclear@57
|
342 /* push all registers onto the stack before switching stacks */
|
nuclear@57
|
343 push_regs();
|
nuclear@57
|
344
|
nuclear@57
|
345 /* XXX: when switching to newly forked processes this switch_stack call
|
nuclear@57
|
346 * WILL NOT RETURN HERE. It will return to just_forked instead. So the
|
nuclear@57
|
347 * rest of this function will not run.
|
nuclear@57
|
348 */
|
nuclear@57
|
349 switch_stack(new->ctx.stack_ptr, &prev->ctx.stack_ptr);
|
nuclear@57
|
350
|
nuclear@57
|
351 /* restore registers from the new stack */
|
nuclear@57
|
352 pop_regs();
|
nuclear@57
|
353 } else {
|
nuclear@57
|
354 set_current_pid(new->id);
|
nuclear@56
|
355 }
|
nuclear@56
|
356 }
|
nuclear@56
|
357
|
nuclear@56
|
358
|
nuclear@56
|
359 void set_current_pid(int pid)
|
nuclear@56
|
360 {
|
nuclear@56
|
361 cur_pid = pid;
|
nuclear@56
|
362 if(pid > 0) {
|
nuclear@56
|
363 last_pid = pid;
|
nuclear@56
|
364 }
|
nuclear@47
|
365 }
|
nuclear@51
|
366
|
nuclear@51
|
367 int get_current_pid(void)
|
nuclear@51
|
368 {
|
nuclear@51
|
369 return cur_pid;
|
nuclear@51
|
370 }
|
nuclear@51
|
371
|
nuclear@51
|
372 struct process *get_current_proc(void)
|
nuclear@51
|
373 {
|
nuclear@56
|
374 return cur_pid > 0 ? &proc[cur_pid] : 0;
|
nuclear@51
|
375 }
|
nuclear@51
|
376
|
nuclear@51
|
377 struct process *get_process(int pid)
|
nuclear@51
|
378 {
|
nuclear@72
|
379 struct process *p = proc + pid;
|
nuclear@72
|
380 if(p->id != pid) {
|
nuclear@72
|
381 printf("get_process called with invalid pid: %d\n", pid);
|
nuclear@72
|
382 return 0;
|
nuclear@72
|
383 }
|
nuclear@72
|
384 return p;
|
nuclear@51
|
385 }
|
nuclear@72
|
386
|
nuclear@72
|
387 int sys_getpid(void)
|
nuclear@72
|
388 {
|
nuclear@72
|
389 return cur_pid;
|
nuclear@72
|
390 }
|
nuclear@72
|
391
|
nuclear@72
|
392 int sys_getppid(void)
|
nuclear@72
|
393 {
|
nuclear@72
|
394 struct process *p = get_current_proc();
|
nuclear@72
|
395
|
nuclear@72
|
396 if(!p) {
|
nuclear@72
|
397 return 0;
|
nuclear@72
|
398 }
|
nuclear@72
|
399 return p->parent;
|
nuclear@72
|
400 }
|