diff --git a/cpu/native/irq_cpu.c b/cpu/native/irq_cpu.c index b899534a3f..145a09ab7f 100644 --- a/cpu/native/irq_cpu.c +++ b/cpu/native/irq_cpu.c @@ -292,7 +292,6 @@ void native_irq_handler() } DEBUG("native_irq_handler(): return"); - _native_in_isr = 0; cpu_switch_context_exit(); } diff --git a/cpu/native/native_cpu.c b/cpu/native/native_cpu.c index e26d62c063..7b82a4a25c 100644 --- a/cpu/native/native_cpu.c +++ b/cpu/native/native_cpu.c @@ -104,7 +104,7 @@ char *thread_stack_init(void (*task_func)(void), void *stack_start, int stacksiz return (char *) p; } -void cpu_switch_context_exit(void) +void isr_cpu_switch_context_exit(void) { ucontext_t *ctx; @@ -119,36 +119,64 @@ void cpu_switch_context_exit(void) /* the next context will have interrupts enabled due to ucontext */ DEBUG("XXX: cpu_switch_context_exit: native_interrupts_enabled = 1;\n"); native_interrupts_enabled = 1; + _native_in_isr = 0; if (setcontext(ctx) == -1) { err(1, "cpu_switch_context_exit(): setcontext():"); } } -void thread_yield() +void cpu_switch_context_exit() { - /** - * XXX: check whether it is advisable to switch context for sched_run() - */ - ucontext_t *oc, *nc; - - DEBUG("thread_yield()\n"); - - oc = (ucontext_t *)(active_thread->sp); - - sched_run(); - - nc = (ucontext_t *)(active_thread->sp); - - if (nc != oc) { - DEBUG("thread_yield(): calling swapcontext(%s)\n\n", active_thread->name); - - if (swapcontext(oc, nc) == -1) { - err(1, "thread_yield(): swapcontext()"); + if (_native_in_isr == 0) { + dINT(); + _native_in_isr = 1; + native_isr_context.uc_stack.ss_sp = __isr_stack; + native_isr_context.uc_stack.ss_size = SIGSTKSZ; + native_isr_context.uc_stack.ss_flags = 0; + makecontext(&native_isr_context, isr_cpu_switch_context_exit, 0); + if (setcontext(&native_isr_context) == -1) { + err(1, "cpu_switch_context_exit: swapcontext"); } } else { - DEBUG("thread_yield(): old = new, returning to context (%s)\n\n", active_thread->name); + isr_cpu_switch_context_exit(); + } + errx(EXIT_FAILURE, "this should have never been reached!!"); +} + +void isr_thread_yield() +{ + DEBUG("isr_thread_yield()\n"); + + sched_run(); + ucontext_t *ctx = (ucontext_t *)(active_thread->sp); + DEBUG("isr_thread_yield(): switching to(%s)\n\n", active_thread->name); + + native_interrupts_enabled = 1; + _native_in_isr = 0; + if (setcontext(ctx) == -1) { + err(1, "isr_thread_yield(): setcontext()"); + } +} + +void thread_yield() +{ + ucontext_t *ctx = (ucontext_t *)(active_thread->sp); + if (_native_in_isr == 0) { + _native_in_isr = 1; + dINT(); + native_isr_context.uc_stack.ss_sp = __isr_stack; + native_isr_context.uc_stack.ss_size = SIGSTKSZ; + native_isr_context.uc_stack.ss_flags = 0; + makecontext(&native_isr_context, isr_thread_yield, 0); + if (swapcontext(ctx, &native_isr_context) == -1) { + err(1, "thread_yield: swapcontext"); + } + eINT(); + } + else { + isr_thread_yield(); } }