tinythread

tiny threading library for linux
git clone git://git.2f30.org/tinythread
Log | Files | Refs | README | LICENSE

thread.c (5606B)


      1 /* See LICENSE file for copyright and license details. */
      2 #include <sys/wait.h>
      3 #include <sys/mman.h>
      4 #include <sched.h>
      5 #include <unistd.h>
      6 #include <errno.h>
      7 
      8 #include <stdio.h>
      9 #include <stdlib.h>
     10 #include <string.h>
     11 
     12 #include "list.h"
     13 #include "thread.h"
     14 
     15 struct thread {
     16 	struct list_head list;
     17 	void *stack;
     18 	pid_t pid;
     19 	int (*fn)(void *);
     20 	void *args;
     21 };
     22 
     23 struct thread_ctx {
     24 	/* Configuration parameters for the library */
     25 	struct thread_config thread_config;
     26 	/* Lock used internally to ensure the core is protected */
     27 	spinlock_t lock;
     28 	/* List of all registered threads */
     29 	struct list_head thread_list;
     30 };
     31 
     32 struct thread_ctx *
     33 thread_init(const struct thread_config *tc, int *rval) {
     34 	struct thread_ctx *tctx;
     35 	long page_size;
     36 	size_t stack_size;
     37 	size_t guard_size;
     38 
     39 	tctx = calloc(1, sizeof *tctx);
     40 	if (!tctx) {
     41 		if (rval)
     42 			*rval = -ENOMEM;
     43 		return NULL;
     44 	}
     45 
     46 	spinlock_init(&tctx->lock);
     47 	INIT_LIST_HEAD(&tctx->thread_list);
     48 
     49 	page_size = sysconf(_SC_PAGESIZE);
     50 	if (page_size < 0)
     51 		page_size = 4096;
     52 
     53 	if (tc) {
     54 		/* Ensure stack size and guard size are
     55 		 * page aligned */
     56 		stack_size = (tc->stack_size +
     57 			      (page_size - 1)) & ~(page_size - 1);
     58 		guard_size = (tc->guard_size +
     59 			      (page_size - 1)) & ~(page_size - 1);
     60 		tctx->thread_config.stack_size = stack_size;
     61 		tctx->thread_config.guard_size = guard_size;
     62 	} else {
     63 		tctx->thread_config.stack_size = page_size * 16;
     64 		tctx->thread_config.guard_size = page_size;
     65 	}
     66 
     67 	return tctx;
     68 }
     69 
     70 pid_t
     71 thread_register(struct thread_ctx *tctx, int (*fn)(void *), void *arg)
     72 {
     73 	struct thread *t;
     74 	pid_t ret;
     75 
     76 	if (!tctx)
     77 		return -EINVAL;
     78 
     79 	acquire(&tctx->lock);
     80 	t = malloc(sizeof *t);
     81 	if (!t) {
     82 		release(&tctx->lock);
     83 		return -ENOMEM;
     84 	}
     85 
     86 	t->stack = mmap(0, tctx->thread_config.stack_size, PROT_READ | PROT_WRITE,
     87 			MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
     88 	if (t->stack == MAP_FAILED) {
     89 		ret = -errno;
     90 		goto free_thread;
     91 	}
     92 
     93 	/* Set the guard page at the end of the stack */
     94 	ret = mprotect(t->stack, tctx->thread_config.guard_size, PROT_NONE);
     95 	if (ret < 0) {
     96 		ret = -errno;
     97 		goto free_stack;
     98 	}
     99 
    100 	t->pid = clone(fn, (char *)t->stack + tctx->thread_config.stack_size,
    101 		       SIGCHLD | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
    102 		       CLONE_VM | CLONE_SYSVSEM, arg, NULL, NULL, NULL);
    103 	if (t->pid < 0) {
    104 		ret = -errno;
    105 		goto free_stack;
    106 	}
    107 
    108 	t->fn = fn;
    109 	t->args = arg;
    110 
    111 	list_add_tail(&t->list, &tctx->thread_list);
    112 	release(&tctx->lock);
    113 
    114 	return t->pid;
    115 
    116 free_stack:
    117 	munmap(t->stack, tctx->thread_config.stack_size);
    118 free_thread:
    119 	free(t);
    120 	release(&tctx->lock);
    121 	return ret;
    122 }
    123 
    124 /* Called with lock acquired */
    125 static void
    126 thread_remove(struct thread_ctx *tctx, pid_t pid)
    127 {
    128 	struct list_head *iter, *q;
    129 	struct thread *tmp;
    130 
    131 	/* A hash table here would be better for many threads */
    132 	list_for_each_safe(iter, q, &tctx->thread_list) {
    133 		tmp = list_entry(iter, struct thread, list);
    134 		if (tmp->pid == pid) {
    135 			list_del(&tmp->list);
    136 			munmap(tmp->stack, tctx->thread_config.stack_size);
    137 			free(tmp);
    138 			break;
    139 		}
    140 	}
    141 }
    142 
    143 int
    144 thread_wait(struct thread_ctx *tctx, pid_t pid)
    145 {
    146 	pid_t p;
    147 	int status;
    148 
    149 	if (!tctx)
    150 		return -EINVAL;
    151 
    152 	acquire(&tctx->lock);
    153 	p = wait(&status);
    154 	if (p < 0) {
    155 		release(&tctx->lock);
    156 		return -errno;
    157 	}
    158 	if (p == pid) {
    159 		thread_remove(tctx, p);
    160 		release(&tctx->lock);
    161 		return WEXITSTATUS(status);
    162 	}
    163 	release(&tctx->lock);
    164 
    165 	return -EAGAIN;
    166 }
    167 
    168 int
    169 thread_wait_blocking(struct thread_ctx *tctx, pid_t pid)
    170 {
    171 	pid_t p;
    172 	int status;
    173 
    174 	if (!tctx)
    175 		return -EINVAL;
    176 
    177 	acquire(&tctx->lock);
    178 	while (1) {
    179 		p = wait(&status);
    180 		if (p < 0) {
    181 			release(&tctx->lock);
    182 			return -errno;
    183 		}
    184 		if (p == pid) {
    185 			thread_remove(tctx, p);
    186 			break;
    187 		}
    188 	}
    189 	release(&tctx->lock);
    190 
    191 	return WEXITSTATUS(status);
    192 }
    193 
    194 int
    195 thread_wait_all_blocking(struct thread_ctx *tctx)
    196 {
    197 	pid_t pid;
    198 
    199 	if (!tctx)
    200 		return -EINVAL;
    201 
    202 	acquire(&tctx->lock);
    203 	while (!list_empty(&tctx->thread_list)) {
    204 		pid = wait(NULL);
    205 		if (pid < 0) {
    206 			release(&tctx->lock);
    207 			return -errno;
    208 		}
    209 		thread_remove(tctx, pid);
    210 	}
    211 	release(&tctx->lock);
    212 
    213 	return 0;
    214 }
    215 
    216 void
    217 thread_exit(struct thread_ctx *tctx)
    218 {
    219 	struct list_head *iter, *q;
    220 	struct thread *tmp;
    221 
    222 	if (!tctx)
    223 		return;
    224 
    225 	acquire(&tctx->lock);
    226 	while (!list_empty(&tctx->thread_list)) {
    227 		/* A hash table here would be better for many threads */
    228 		list_for_each_safe(iter, q, &tctx->thread_list) {
    229 			tmp = list_entry(iter, struct thread, list);
    230 			list_del(&tmp->list);
    231 			munmap(tmp->stack, tctx->thread_config.stack_size);
    232 			free(tmp);
    233 		}
    234 	}
    235 	release(&tctx->lock);
    236 	free(tctx);
    237 }
    238 
    239 pid_t
    240 thread_id(void)
    241 {
    242 	return getpid();
    243 }
    244 
    245 int
    246 thread_get_sched_param(struct thread_ctx *tctx, pid_t pid, int *policy,
    247 		       struct sched_param *param)
    248 {
    249 	struct list_head *iter;
    250 	struct thread *tmp;
    251 	int ret = -EINVAL;
    252 
    253 	if (!tctx)
    254 		return -EINVAL;
    255 
    256 	acquire(&tctx->lock);
    257 	list_for_each(iter, &tctx->thread_list) {
    258 		tmp = list_entry(iter, struct thread, list);
    259 		if (tmp->pid == pid) {
    260 			ret = sched_getparam(tmp->pid, param);
    261 			if (ret < 0)
    262 				ret = -errno;
    263 			else
    264 				*policy = sched_getscheduler(tmp->pid);
    265 			break;
    266 		}
    267 	}
    268 	release(&tctx->lock);
    269 
    270 	return ret;
    271 }
    272 
    273 int
    274 thread_set_sched_param(struct thread_ctx *tctx, pid_t pid, int policy,
    275 		       const struct sched_param *param)
    276 {
    277 	struct list_head *iter;
    278 	struct thread *tmp;
    279 	int ret = -EINVAL;
    280 
    281 	if (!tctx)
    282 		return -EINVAL;
    283 
    284 	acquire(&tctx->lock);
    285 	list_for_each(iter, &tctx->thread_list) {
    286 		tmp = list_entry(iter, struct thread, list);
    287 		if (tmp->pid == pid) {
    288 			ret = sched_setscheduler(tmp->pid, policy, param);
    289 			if (ret < 0)
    290 				ret = -errno;
    291 			break;
    292 		}
    293 	}
    294 	release(&tctx->lock);
    295 
    296 	return ret;
    297 }