voron

experimental ARM OS
git clone git://git.2f30.org/voron
Log | Files | Refs | README | LICENSE

sched.c (6377B)


      1 #include <kernel.h>
      2 #include <list.h>
      3 #include <dmtimer.h>
      4 #include <sched.h>
      5 #include <mmu.h>
      6 #include <irq.h>
      7 #include <p_modes.h>
      8 
      9 #define MAX_HASH_ENT	64
     10 
     11 struct task_struct *current = NULL;
     12 static struct list_head task_list_head;
     13 static struct list_head ht_sleep[MAX_HASH_ENT];
     14 static uatomic_t ms_counter = UATOMIC_INIT(0);
     15 static int __sched_enabled = 0;
     16 
     17 static pid_t
     18 get_new_pid(void)
     19 {
     20 	static uatomic_t currpid = UATOMIC_INIT(0);
     21 	return (pid_t)uatomic_add_return(1, &currpid);
     22 }
     23 
     24 static inline u32
     25 hash_chan(u32 channel)
     26 {
     27 	int i;
     28 	u32 hash = 0;
     29 
     30 	for (i = 0; i < 32; ++i)
     31 		hash = (channel & (1 << i)) + (hash << 8) + (hash << 12) - hash;
     32 
     33 	return hash;
     34 }
     35 
     36 static void
     37 task_remove(void)
     38 {
     39 	current->state = TASK_TERMINATE;
     40 	schedule();
     41 }
     42 
     43 int
     44 kthread_create(void (*routine)(void *), void *arg)
     45 {
     46 	struct task_struct *task;
     47 
     48 	task = kmalloc(sizeof(*task));
     49 	if (!task)
     50 		return -ENOMEM;
     51 
     52 	/* allocate stack */
     53 	task->stack_alloc = kmalloc(PAGE_SIZE);
     54 	if (!task->stack_alloc) {
     55 		kfree(task);
     56 		return -ENOMEM;
     57 	}
     58 
     59 	task->state = TASK_RUNNABLE;
     60 	task->pid = get_new_pid();
     61 	memset(&task->regs, 0, sizeof(task->regs));
     62 	/* set thread stack */
     63 	task->regs.sp = (u32)task->stack_alloc;
     64 	task->regs.sp += PAGE_SIZE;
     65 	/* set argument */
     66 	task->regs.r0 = (u32)arg;
     67 	/* set the function that new thread will execute
     68 	 * we must add 4 because irq_ex will subtract 4 */
     69 	task->regs.pc = (u32)routine;
     70 	task->regs.pc += 4;
     71 	/* set return address */
     72 	task->regs.lr = (u32)task_remove;
     73 	/* thread will run in System mode */
     74 	task->regs.cpsr = CPS_SYS;
     75 
     76 	/* add it to task list of the scheduler */
     77 	if (sched_is_enabled()) {
     78 		sched_disable();
     79 		list_add(&task->list, &task_list_head);
     80 		sched_enable();
     81 	} else
     82 		list_add(&task->list, &task_list_head);
     83 
     84 	return 0;
     85 }
     86 
     87 void
     88 schedule(void)
     89 {
     90 	if (current)
     91 		current->scheduled = 0;
     92 	/* trigger SGI */
     93 	irq_trigger_sgi(1);
     94 	/* make sure that we rescheduled */
     95 	while (1) {
     96 		if (current && current->state != TASK_TERMINATE &&
     97 		    current->scheduled)
     98 			break;
     99 		asm volatile("wfi" : : : "memory");
    100 	}
    101 }
    102 
    103 static void
    104 __idle(void)
    105 {
    106 	while (1)
    107 		asm volatile("wfi" : : : "memory");
    108 }
    109 
    110 static inline void
    111 __switch_to(struct regs *regs, struct task_struct *new_curr)
    112 {
    113 	if (!new_curr) {
    114 		/* if we we don't have any process
    115 		 * make irq_ex return to __idle */
    116 		regs->pc = (u32)__idle;
    117 		/* we must add 4 because irq_ex subtracts 4 */
    118 		regs->pc += 4;
    119 	} else
    120 		*regs = new_curr->regs;
    121 	current = new_curr;
    122 	/* data synchronization barrier */
    123 	dsb();
    124 	/* clear exclusive address access */
    125 	asm volatile("clrex" : : : "memory");
    126 }
    127 
    128 
    129 static void
    130 sched(struct regs *regs)
    131 {
    132 	struct list_head *iter, *curr_list;
    133 	struct task_struct *task, *new_curr;
    134 
    135 	if (list_empty(&task_list_head))
    136 		return;
    137 
    138 	if (current) {
    139 		current->scheduled = 1;
    140 		if (current->state != TASK_TERMINATE)
    141 			current->regs = *regs;
    142 		curr_list = &current->list;
    143 	} else
    144 		curr_list = &task_list_head;
    145 
    146 	new_curr = NULL;
    147 
    148 	list_for_each(iter, curr_list) {
    149 		if (iter == &task_list_head)
    150 			continue;
    151 
    152 		task = list_entry(iter, struct task_struct, list);
    153 
    154 		if (task->state == TASK_SLEEPING &&
    155 		    task->sleep_reason == SLEEPR_SLEEP &&
    156 		    task->wakeup_ms <= uatomic_read(&ms_counter)) {
    157 			new_curr = task;
    158 			new_curr->state = TASK_RUNNING;
    159 			break;
    160 		} else if (task->state == TASK_RUNNABLE) {
    161 			new_curr = task;
    162 			new_curr->state = TASK_RUNNING;
    163 			break;
    164 		}
    165 	}
    166 
    167 	if (current) {
    168 		if (current->state == TASK_SLEEPING &&
    169 		    current->sleep_reason == SLEEPR_SUSPEND) {
    170 			int i = hash_chan(current->sleep_chan) % MAX_HASH_ENT;
    171 			list_del(&current->list);
    172 			list_add(&current->list, &ht_sleep[i]);
    173 		} else if (current->state == TASK_RUNNING) {
    174 			if (!new_curr)
    175 				new_curr = current;
    176 			else
    177 				current->state = TASK_RUNNABLE;
    178 		} else if (current->state == TASK_TERMINATE) {
    179 			list_del(&current->list);
    180 			kfree(current->stack_alloc);
    181 			kfree(current);
    182 		}
    183 	}
    184 
    185 	__switch_to(regs, new_curr);
    186 }
    187 
    188 
    189 void
    190 sched_enable()
    191 {
    192 	u32 flags = irq_get_flags();
    193 	irq_disable();
    194 	writel(1, &__sched_enabled);
    195 	irq_set_flags(flags);
    196 }
    197 
    198 void
    199 sched_disable()
    200 {
    201 	u32 flags = irq_get_flags();
    202 	irq_disable();
    203 	writel(0, &__sched_enabled);
    204 	irq_set_flags(flags);
    205 }
    206 
    207 int
    208 sched_is_enabled()
    209 {
    210 	return readl(&__sched_enabled);
    211 }
    212 
    213 void
    214 suspend_task(u32 channel)
    215 {
    216 	sched_disable();
    217 	current->sleep_chan = channel;
    218 	current->sleep_reason = SLEEPR_SUSPEND;
    219 	current->state = TASK_SLEEPING;
    220 	sched_enable();
    221 	schedule();
    222 }
    223 
    224 void
    225 suspend_task_no_schedule(u32 channel)
    226 {
    227 	/* caller *must* call sched_disable()
    228 	 * before this function */
    229 	current->sleep_chan = channel;
    230 	current->sleep_reason = SLEEPR_SUSPEND;
    231 	current->state = TASK_SLEEPING;
    232 	/* caller *must* call sched_enable()
    233 	 * and then schedule() after this function */
    234 }
    235 
    236 void
    237 resume_tasks(u32 channel)
    238 {
    239 	struct list_head *iter, *n;
    240 	struct task_struct *task;
    241 	int i, sched_e;
    242 
    243 	i = hash_chan(channel) % MAX_HASH_ENT;
    244 	list_for_each_safe(iter, n, &ht_sleep[i]) {
    245 		task = list_entry(iter, struct task_struct, list);
    246 		if (task->sleep_chan == channel) {
    247 			task->state = TASK_RUNNABLE;
    248 			sched_e = sched_is_enabled();
    249 			if (sched_e)
    250 				sched_disable();
    251 			list_del(iter);
    252 			list_add(iter, &task_list_head);
    253 			if (sched_e)
    254 				sched_enable();
    255 		}
    256 	}
    257 }
    258 
    259 void
    260 sleep(u32 seconds)
    261 {
    262 	sched_disable();
    263 	current->wakeup_ms = uatomic_read(&ms_counter) + seconds * 1000;
    264 	current->sleep_reason = SLEEPR_SLEEP;
    265 	current->state = TASK_SLEEPING;
    266 	sched_enable();
    267 	/* schedule */
    268 	schedule();
    269 }
    270 
    271 void
    272 msleep(u32 milliseconds)
    273 {
    274 	sched_disable();
    275 	/* TODO: if ms is smaller than SCHED_INT_MS
    276 	 * do a loop and don't schedule */
    277 	if (milliseconds < SCHED_INT_MS)
    278 		milliseconds = SCHED_INT_MS;
    279 	current->wakeup_ms = uatomic_read(&ms_counter) + milliseconds;
    280 	current->sleep_reason = SLEEPR_SLEEP;
    281 	current->state = TASK_SLEEPING;
    282 	sched_enable();
    283 	schedule();
    284 }
    285 
    286 static void
    287 sched_handler(__unused int timer_id, struct regs *regs)
    288 {
    289 	uatomic_add(SCHED_INT_MS, &ms_counter);
    290 	if (sched_is_enabled())
    291 		sched(regs);
    292 }
    293 
    294 static void
    295 manual_sched_handler(__unused u32 irq_num, struct regs *regs)
    296 {
    297 	if (sched_is_enabled())
    298 		sched(regs);
    299 }
    300 
    301 __attribute__((__constructor__))
    302 void
    303 sched_init(void)
    304 {
    305 	size_t i;
    306 
    307 	INIT_LIST_HEAD(&task_list_head);
    308 	for (i = 0; i < ARRAY_SIZE(ht_sleep); i++)
    309 		INIT_LIST_HEAD(&ht_sleep[i]);
    310 
    311 	irq_register(1, manual_sched_handler);
    312 	dmtimer_register(1, sched_handler, SCHED_INT_MS);
    313 	sched_enable();
    314 }