cynix

x86 UNIX-like OS
git clone git://git.2f30.org/cynix
Log | Files | Refs | README | LICENSE

tss.c (9506B)


      1 /*
      2  *  core/tss.c
      3  *
      4  *  Copyright (C) 2009 stateless
      5  */
      6 
      7 #include <tss.h>
      8 #include <string.h>
      9 #include <common.h>
     10 #include <heap.h>
     11 #include <errno.h>
     12 #include <syscall.h>
     13 #include <tty.h>
     14 
     15 #define NR_HASH_ENT 64
     16 
     17 extern struct page_directory_t *new_kernel_pdir;
     18 
     19 struct task_t *curr_proc = NULL;
     20 
     21 static struct list_head htsleep[NR_HASH_ENT];
     22 static struct list_head runqueue;
     23 static struct task_t *idle = NULL;
     24 static int tss_init = 0;
     25 static pid_t currpid = 0;
     26 
     27 static uint32_t hash_chan(void *chan);
     28 
     29 static inline pid_t
     30 get_new_pid(void)
     31 {
     32 	int pid;
     33 	uint32_t state;
     34 
     35 	save_flags(&state);
     36 	cli();
     37 	/* I hope this never wraps */
     38 	pid = currpid++;
     39 	load_flags(state);
     40 	return pid;
     41 }
     42 
     43 void
     44 ps(void)
     45 {
     46 	uint32_t state, i;
     47 	struct list_head *iter;
     48 	struct task_t *task;
     49 
     50 	printf("%-12s%-12s%-12s%-12s%-12s\n",
     51 	       "PID",
     52 	       "TYPE",
     53 	       "STATE",
     54 	       "PROC",
     55 	       "PARENT");
     56 	save_flags(&state);
     57 	cli();
     58 	list_for_each(iter, &runqueue) {
     59 		task = list_entry(iter, struct task_t, q_task);
     60 		printf("%-12d%-12s%-12s%-12s%-12s\n",
     61 		       task->pid,
     62 		       (task->flags & KERNEL_PROCESS) ? "KTHREAD" : "USER PROC",
     63 		       (task->state == TASK_ZOMBIE) ? "ZOMBIE" :
     64 		       (task->state == TASK_SLEEPING) ? "SLEEPING" :
     65 		       (task->state == TASK_RUNNABLE) ? "RUNNABLE" : "RUNNING",
     66 		       task->name,
     67 		       (!task->parent) ? "(null)" : task->parent->name);
     68 	}
     69 	for (i = 0; i < NR_HASH_ENT; ++i) {
     70 		list_for_each(iter, &htsleep[i]) {
     71 			task = list_entry(iter, struct task_t, q_task);
     72 			assert(task->state == TASK_SLEEPING);
     73 			printf("%-12d%-12s%-12s%-12s%-12s\n",
     74 			       task->pid,
     75 			       (task->flags & KERNEL_PROCESS) ? "KTRHEAD" : "USER PROC",
     76 			       "SLEEPING",
     77 			       task->name,
     78 			       (!task->parent) ? "(null)" : task->parent->name);
     79 		}
     80 	}
     81 	load_flags(state);
     82 }
     83 
     84 void
     85 idle_fn(void)
     86 {
     87 	tss_init = 1;
     88 	curr_proc->state = TASK_RUNNING;
     89 	sti();
     90 	while (1) {
     91 	}
     92 }
     93 
     94 void
     95 freeze_tasks(void)
     96 {
     97 	struct list_head *iter;
     98 	struct task_t *task;
     99 
    100 	list_for_each(iter, &runqueue) {
    101 		task = list_entry(iter, struct task_t, q_task);
    102 		if (!strcmp(task->name, "kdb")
    103 				|| !strcmp(task->name, "idle")) {
    104 			task->state = TASK_RUNNABLE;
    105 			continue;
    106 		}
    107 		task->old_state = task->state;
    108 		task->state = TASK_SLEEPING;
    109 	}
    110 }
    111 
    112 void
    113 unfreeze_tasks(void)
    114 {
    115 	struct list_head *iter;
    116 	struct task_t *task;
    117 
    118 	list_for_each(iter, &runqueue) {
    119 		task = list_entry(iter, struct task_t, q_task);
    120 		if (strcmp(task->name, "kdb")
    121 				&& strcmp(task->name, "idle"))
    122 			task->state = task->old_state;
    123 	}
    124 }
    125 
    126 void
    127 schedule(void)
    128 {
    129 	struct list_head *iter, *q;
    130 	struct task_t *task;
    131 
    132 	if (!tss_init)
    133 		return;
    134 
    135 	list_for_each_safe(iter, q, &runqueue) {
    136 		task = list_entry(iter, struct task_t, q_task);
    137 		if (task->state == TASK_RUNNABLE) {
    138 			task->state = TASK_RUNNING;
    139 			curr_proc = task;
    140 			switch_page_dir(curr_proc->page_dir);
    141 			break;
    142 		} else if (task->state == TASK_RUNNING)
    143 			task->state = TASK_RUNNABLE;
    144 		list_del(iter);
    145 		add_task(task);
    146 	}
    147 }
    148 
    149 int
    150 init_tss(void)
    151 {
    152 	int i;
    153 
    154 	cli();
    155 	INIT_LIST_HEAD(&runqueue);
    156 	for (i = 0; i < NR_HASH_ENT; ++i)
    157 		INIT_LIST_HEAD(&htsleep[i]);
    158 	idle = create_kthread("idle", idle_fn);
    159 	if (IS_ERR(idle))
    160 		return PTR_ERR(idle);
    161 	curr_proc = idle;
    162 	return 0;
    163 }
    164 
    165 void
    166 kick_tss(void)
    167 {
    168 	PUT_ESP(curr_proc->esp);
    169 	jmp(((struct cswitch_frame_t *)curr_proc->esp)->eip);
    170 }
    171 
    172 struct task_t *
    173 create_kthread(const char *name, void *routine) {
    174 	struct task_t *task;
    175 	int ret = -ENOMEM, i;
    176 	size_t len;
    177 
    178 	task = kmalloc(sizeof(*task));
    179 	if (IS_ERR(task)) {
    180 		ret = PTR_ERR(task);
    181 		goto err;
    182 	}
    183 	memset(task, 0, sizeof(*task));
    184 	len = strlen(name) + 1;
    185 	assert(len);
    186 	task->name = kmalloc(len);
    187 	if (IS_ERR(task->name)) {
    188 		ret = PTR_ERR(task->name);
    189 		goto err1;
    190 	}
    191 	strncpy(task->name, name, len);
    192 	task->name[len - 1] = '\0';
    193 	INIT_LIST_HEAD(&task->l_regions);
    194 	task->parent = curr_proc;
    195 	task->pid = get_new_pid();
    196 	task->state = task->old_state = TASK_RUNNABLE;
    197 	task->flags = KERNEL_PROCESS;
    198 	task->cdir = get_root_inode();
    199 	if (!task->cdir) {
    200 		ret = -EIO;
    201 		goto err2;
    202 	}
    203 	for (i = 0; i < NR_MAX_OPEN_FILES; ++i)
    204 		task->fdtable[i] = NULL;
    205 	task->stack = kmalloc(TASK_STACK_SIZE);
    206 	if (IS_ERR(task->stack)) {
    207 		ret = PTR_ERR(task->stack);
    208 		goto err2;
    209 	}
    210 	memset(task->stack, 0x0, TASK_STACK_SIZE);
    211 	task->page_dir = clone_page_dir();
    212 	if (IS_ERR(task->page_dir)) {
    213 		ret = PTR_ERR(task->page_dir);
    214 		goto err3;
    215 	}
    216 	task->cf = (struct cswitch_frame_t *)((uint8_t *)task->stack
    217 					      + TASK_STACK_SIZE
    218 					      - sizeof(struct cswitch_frame_t));
    219 	task->cf->eflags = 0x202;
    220 	task->cf->cs = 0x8;
    221 	task->cf->eip = (uint32_t)routine;
    222 	task->cf->ds = 0x10;
    223 	task->esp = (uint32_t)task->cf;
    224 	attach_tty(task);
    225 	add_task(task);
    226 	return task;
    227 err3:
    228 	kfree(task->stack);
    229 err2:
    230 	kfree(task->name);
    231 err1:
    232 	kfree(task);
    233 err:
    234 	return ERR_PTR(ret);
    235 }
    236 
    237 void
    238 add_task(struct task_t *task)
    239 {
    240 	uint32_t state;
    241 
    242 	save_flags(&state);
    243 	cli();
    244 	list_add_tail(&task->q_task, &runqueue);
    245 	load_flags(state);
    246 }
    247 
    248 void
    249 remove_task(struct task_t *task)
    250 {
    251 	uint32_t state;
    252 
    253 	save_flags(&state);
    254 	cli();
    255 	list_del(&task->q_task);
    256 	kfree(task->name);
    257 	kfree(task->stack);
    258 	kfree(task);
    259 	load_flags(state);
    260 }
    261 
    262 int
    263 fork(void)
    264 {
    265 	int ret = -ENOMEM, i;
    266 	uint32_t offset;
    267 	struct task_t *task, *parent;
    268 	size_t len;
    269 
    270 	parent = curr_proc;
    271 	task = kmalloc(sizeof(*task));
    272 	if (IS_ERR(task)) {
    273 		ret = PTR_ERR(task);
    274 		goto err;
    275 	}
    276 	memset(task, 0, sizeof(*task));
    277 	len = strlen(parent->name) + 1;
    278 	assert(len);
    279 	task->name = kmalloc(len);
    280 	if (IS_ERR(task->name)) {
    281 		ret = PTR_ERR(task->name);
    282 		goto err1;
    283 	}
    284 	strncpy(task->name, parent->name, len);
    285 	task->name[len - 1] = '\0';
    286 	INIT_LIST_HEAD(&task->l_regions);
    287 	task->parent = parent;
    288 	task->pid = get_new_pid();
    289 	task->state = task->old_state = TASK_RUNNABLE;
    290 	task->flags = parent->flags;
    291 	task->uid = parent->uid;
    292 	task->gid = parent->gid;
    293 	task->fuid = parent->fuid;
    294 	task->fgid = parent->fgid;
    295 	task->cdir = parent->cdir;
    296 	for (i = 0; i < NR_MAX_OPEN_FILES; ++i) {
    297 		task->fdtable[i] = parent->fdtable[i];
    298 		if (task->fdtable[i]->f_state == FILE_ALLOC)
    299 			++task->fdtable[i]->f_refcount;
    300 	}
    301 	for (i = 0; i < NR_MAX_OPEN_FILES; ++i)
    302 		task->pipebufs[i] = parent->pipebufs[i];
    303 	task->stack = kmalloc(TASK_STACK_SIZE);
    304 	if (IS_ERR(task->stack)) {
    305 		ret = PTR_ERR(task->stack);
    306 		goto err2;
    307 	}
    308 	memset(task->stack, 0, TASK_STACK_SIZE);
    309 	task->page_dir = clone_page_dir();
    310 	if (IS_ERR(task->page_dir)) {
    311 		ret = PTR_ERR(task->page_dir);
    312 		goto err3;
    313 	}
    314 	memcpy(task->stack, parent->stack, TASK_STACK_SIZE);
    315 	/*
    316 	 * perhaps by patching all ebps in the parent stack, the child will be able to
    317 	 * return properly even if SYS_FORK is not inlined
    318 	 */
    319 	offset = (uint32_t)((uint8_t *)parent->stack
    320 			    + TASK_STACK_SIZE
    321 			    - parent->esp);
    322 	task->cf = (struct cswitch_frame_t *)((uint8_t *)task->stack
    323 					      + TASK_STACK_SIZE
    324 					      - offset
    325 					      - sizeof(struct cswitch_frame_t));
    326 	*task->cf = *parent->cf;
    327 	offset = (uint32_t)((uint8_t *)parent->stack
    328 			    + TASK_STACK_SIZE
    329 			    - parent->cf->ebp);
    330 	task->cf->ebp = (uint32_t)((uint8_t *)task->stack
    331 				   + TASK_STACK_SIZE
    332 				   - offset);
    333 	task->cf->eax = 0;
    334 	task->esp = (uint32_t)task->cf;
    335 	/* TODO: create userspace stuff */
    336 	attach_tty(task);
    337 	add_task(task);
    338 	return task->pid;
    339 err3:
    340 	kfree(task->stack);
    341 err2:
    342 	kfree(task->name);
    343 err1:
    344 	kfree(task);
    345 err:
    346 	return ret;
    347 }
    348 
    349 /* TODO: test this one */
    350 pid_t
    351 waitpid(pid_t pid, int *status, int options)
    352 {
    353 	struct list_head *iter, *q;
    354 	struct task_t *task;
    355 	pid_t retpid;
    356 	int haschld, pidexists;
    357 
    358 	assert(options == WNOHANG);
    359 	if (options != WNOHANG)
    360 		return -EINVAL;
    361 
    362 	haschld = 0;
    363 	pidexists = (pid == -1) ? 1 : 0;
    364 	for (;;) {
    365 		list_for_each_safe(iter, q, &runqueue) {
    366 			task = list_entry(iter, struct task_t, q_task);
    367 			if (pid != -1 && task->pid == pid)
    368 				pidexists = 1;
    369 			if (task->parent != curr_proc)
    370 				continue;
    371 			haschld = 1;
    372 			if (task->state != TASK_ZOMBIE)
    373 				continue;
    374 			if (status) *status = (int)task->status;
    375 			retpid = task->pid;
    376 			remove_task(task);
    377 			goto out;
    378 		}
    379 		if (!pidexists)
    380 			return -ECHILD;
    381 		if (options == WNOHANG) {
    382 			if (haschld)
    383 				return 0;
    384 			return -ECHILD;
    385 		}
    386 	}
    387 out:
    388 	return retpid;
    389 }
    390 
    391 static uint32_t
    392 hash_chan(void *chan)
    393 {
    394 	uint32_t *c = chan, hash = 0, i;
    395 
    396 	for (i = 0; i < 32; ++i)
    397 		hash = (*c & (1lu << i)) + (hash << 6) + (hash << 16) - hash;
    398 	return hash;
    399 }
    400 
    401 /* can be called as follows:
    402  * syscall -> suspend_task (from a syscall)
    403  * suspend_task (normally)
    404  * NOTE: suspend_task will fail if it is called
    405  * from nested syscalls (syscall depth > 1)
    406  */
    407 void
    408 suspend_task(void *channel)
    409 {
    410 	uint32_t i;
    411 
    412 	curr_proc->state = TASK_SLEEPING;
    413 	list_del(&curr_proc->q_task);
    414 	i = hash_chan(channel) % NR_HASH_ENT;
    415 	list_add_tail(&curr_proc->q_task, &htsleep[i]);
    416 	schedule();
    417 }
    418 
    419 int
    420 resume_task(void *channel)
    421 {
    422 	uint32_t i;
    423 	struct list_head *iter, *q;
    424 	struct task_t *task;
    425 
    426 	i = hash_chan(channel) % NR_HASH_ENT;
    427 	list_for_each_safe(iter, q, &htsleep[i]) {
    428 		task = list_entry(iter, struct task_t, q_task);
    429 		assert(task->state == TASK_SLEEPING);
    430 		task->state = TASK_RUNNABLE;
    431 		list_del(&task->q_task);
    432 		list_add(&task->q_task, &runqueue);
    433 	}
    434 	return 0;
    435 }
    436 
    437 uid_t
    438 getuid(void)
    439 {
    440 	return curr_proc->uid;
    441 }
    442 
    443 int
    444 setuid(__attribute__ ((unused)) uid_t uid)
    445 {
    446 	/* TODO: perform additional checks */
    447 	curr_proc->uid = uid; /* w00t w00t */
    448 	return 0;
    449 }
    450 
    451 gid_t
    452 getgid(void)
    453 {
    454 	return curr_proc->gid;
    455 }
    456 
    457 int
    458 setgid(__attribute__ ((unused)) gid_t gid)
    459 {
    460 	/* TODO: perform additional checks */
    461 	curr_proc->gid = gid;
    462 	return 0;
    463 }
    464