commit 4afc94ef43c676b8a0addd1850a6dfa571b674d7
parent 92fb432cc5ecfbae847aa23a16abbe77904889b2
Author: oblique <psyberbits@gmail.com>
Date: Sun, 28 Oct 2012 02:05:34 +0300
improve scheduler and add sleep(), msleep()
Diffstat:
4 files changed, 102 insertions(+), 35 deletions(-)
diff --git a/include/kernel.h b/include/kernel.h
@@ -11,6 +11,7 @@
#include <regs.h>
#include <alloc.h>
#include <debug.h>
+#include <atomic.h>
#define __unused __attribute__((__unused__))
diff --git a/include/sched.h b/include/sched.h
@@ -5,8 +5,10 @@
#include <spinlock.h>
#include <list.h>
-typedef u32 pid_t;
+/* scheduler interval in milliseconds */
+#define SCHED_INT_MS 10
+typedef u32 pid_t;
typedef enum {
TASK_TERMINATE,
@@ -23,6 +25,7 @@ struct task_struct {
struct list_head list;
spinlock_t *lock;
void *stack_alloc;
+ u32 wakeup_ms;
};
@@ -36,6 +39,8 @@ current_task(void)
#define current (*current_task())
+void sleep(u32 seconds);
+void msleep(u32 milliseconds);
int kthread_create(void (*routine)(void *), void *arg);
void schedule(void);
diff --git a/kernel/kmain.c b/kernel/kmain.c
@@ -8,8 +8,10 @@ static void
thread_func(void *arg)
{
u32 n = (u32)arg;
- while (1)
+ while (1) {
kprintf("thread %d\n", n);
+ msleep(500);
+ }
}
void
diff --git a/kernel/sched.c b/kernel/sched.c
@@ -9,6 +9,7 @@
struct task_struct *curr_task = NULL;
static struct list_head task_list_head;
static spinlock_t task_struct_lock = SPINLOCK_INIT;
+static uatomic_t ms_counter = UATOMIC_INIT(0);
static pid_t
get_new_pid(void)
@@ -77,54 +78,112 @@ kthread_create(void (*routine)(void *), void *arg)
void
schedule(void)
{
- /* trigger scheduler timer */
- dmtimer_trigger(1);
- while(1)
+ u32 ms = uatomic_read(&ms_counter);
+ /* this guarantees that will loop until scheduler will executed */
+ while (ms == uatomic_read(&ms_counter)) {
+ /* trigger scheduler timer */
+ dmtimer_trigger(1);
+ /* wait for interrupt */
asm volatile("wfi");
+ }
}
static void
+__idle(void)
+{
+ while (1)
+ asm volatile("wfi");
+}
+
+static void
+__switch_to(struct regs *regs, struct task_struct *new_curr)
+{
+ if (!new_curr) {
+ /* if we we don't have any process
+ * make irq_ex return to __idle */
+ regs->pc = (u32)&__idle;
+ /* we must add 4 because irq_ex subtracts 4 */
+ regs->pc += 4;
+ } else
+ *regs = new_curr->regs;
+ current = new_curr;
+ dsb();
+ asm volatile("clrex");
+}
+
+
+static void
sched(struct regs *regs)
{
+ struct list_head *iter, *curr_list;
+ struct task_struct *task, *new_curr;
+
+ /* TODO: if scheduler is triggered by schedule()
+ * then add the correct value */
+ uatomic_add(SCHED_INT_MS, &ms_counter);
+
if (list_empty(&task_list_head))
return;
if (current) {
- struct list_head *iter;
- struct task_struct *task, *prev;
-
if (current->state != TASK_TERMINATE)
current->regs = *regs;
+ curr_list = ¤t->list;
+ } else
+ curr_list = &task_list_head;
- prev = current;
- list_for_each(iter, &prev->list) {
- if (iter == &task_list_head)
- continue;
- task = list_entry(iter, struct task_struct, list);
- if (task->state == TASK_RUNNABLE) {
- current = task;
- break;
- }
- }
+ new_curr = NULL;
- if (iter == &prev->list && prev->state != TASK_RUNNING)
- current = NULL;
-
- if (prev->state == TASK_TERMINATE) {
- spinlock_lock(prev->lock);
- list_del(&prev->list);
- spinlock_unlock(prev->lock);
- kfree(prev->stack_alloc);
- kfree(prev);
- } else if (prev != current)
- prev->state = TASK_RUNNABLE;
- } else
- current = list_first_entry(&task_list_head, struct task_struct, list);
+ list_for_each(iter, curr_list) {
+ if (iter == &task_list_head)
+ continue;
- if (current) {
- current->state = TASK_RUNNING;
- *regs = current->regs;
+ task = list_entry(iter, struct task_struct, list);
+
+ if (task->state == TASK_SLEEP && task->wakeup_ms <= uatomic_read(&ms_counter)) {
+ new_curr = task;
+ new_curr->state = TASK_RUNNING;
+ break;
+ } else if (task->state == TASK_RUNNABLE) {
+ new_curr = task;
+ new_curr->state = TASK_RUNNING;
+ break;
+ }
}
+
+ if (!new_curr && current && current->state == TASK_RUNNING)
+ new_curr = current;
+
+ if (current && current->state == TASK_TERMINATE) {
+ spinlock_lock(current->lock);
+ list_del(¤t->list);
+ spinlock_unlock(current->lock);
+ kfree(current->stack_alloc);
+ kfree(current);
+ } else if (current && current != new_curr && current->state == TASK_RUNNING)
+ current->state = TASK_RUNNABLE;
+
+ __switch_to(regs, new_curr);
+}
+
+void
+sleep(u32 seconds)
+{
+ current->state = TASK_SLEEP;
+ current->wakeup_ms = uatomic_read(&ms_counter) + seconds * 1000;
+ schedule();
+}
+
+void
+msleep(u32 milliseconds)
+{
+ current->state = TASK_SLEEP;
+ /* TODO: if ms is smaller than SCHED_INT_MS
+ * do a loop and don't schedule */
+ if (milliseconds < SCHED_INT_MS)
+ milliseconds = SCHED_INT_MS;
+ current->wakeup_ms = uatomic_read(&ms_counter) + milliseconds;
+ schedule();
}
static void
@@ -138,5 +197,5 @@ void
sched_init(void)
{
INIT_LIST_HEAD(&task_list_head);
- dmtimer_register(1, sched_handler, 10);
+ dmtimer_register(1, sched_handler, SCHED_INT_MS);
}