Merge pull request #14772 from maribu/esp_sched_cleanup

cpu/esp: Use API to access sched internals
This commit is contained in:
benpicco 2020-08-20 00:09:35 +02:00 committed by GitHub
commit 157705c0c6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 31 additions and 29 deletions

View File

@ -903,10 +903,10 @@ static void ATTR_GDBINIT install_exceptions(void)
static void ATTR_GDBFN uart_hdlr(void *arg)
{
assert(sched_active_thread != NULL);
assert(sched_active_thread->sp != NULL);
assert(thread_get_active() != NULL);
assert(thread_get_active()->sp != NULL);
XtExcFrame* frame = (XtExcFrame *)sched_active_thread->sp;
XtExcFrame* frame = (XtExcFrame *)thread_get_active()->sp;
int doDebug=0;
int fifolen=0;

View File

@ -397,8 +397,8 @@ static int32_t rand_wrapper(void)
void *osi_task_top_sp(void)
{
extern volatile thread_t *sched_active_thread;
return sched_active_thread ? sched_active_thread->sp : 0;
thread_t *active_thread = thread_get_active();
return active_thread ? active_thread->sp : 0;
}
const wifi_osi_funcs_t s_wifi_osi_funcs = {

View File

@ -196,7 +196,7 @@ BaseType_t IRAM_ATTR _queue_generic_send(QueueHandle_t xQueue,
}
else {
/* suspend the calling thread to wait for space in the queue */
thread_t *me = (thread_t*)sched_active_thread;
thread_t *me = thread_get_active();
sched_set_status(me, STATUS_SEND_BLOCKED);
/* waiting list is sorted by priority */
thread_add_to_list(&queue->sending, me);
@ -302,7 +302,7 @@ BaseType_t IRAM_ATTR _queue_generic_recv (QueueHandle_t xQueue,
}
else {
/* suspend the calling thread to wait for an item in the queue */
thread_t *me = (thread_t*)sched_active_thread;
thread_t *me = thread_get_active();
sched_set_status(me, STATUS_RECEIVE_BLOCKED);
/* waiting list is sorted by priority */
thread_add_to_list(&queue->receiving, me);

View File

@ -157,7 +157,7 @@ void IRAM_ATTR _lock_acquire(_lock_t *lock)
}
/* if scheduler is not running, we have not to lock the mutex */
if (sched_active_thread == NULL) {
if (thread_get_active() == NULL) {
return;
}
@ -175,7 +175,7 @@ void IRAM_ATTR _lock_acquire_recursive(_lock_t *lock)
}
/* if scheduler is not running, we have not to lock the rmutex */
if (sched_active_thread == NULL) {
if (thread_get_active() == NULL) {
return;
}
@ -193,7 +193,7 @@ int IRAM_ATTR _lock_try_acquire(_lock_t *lock)
}
/* if scheduler is not running, we have not to lock the mutex */
if (sched_active_thread == NULL) {
if (thread_get_active() == NULL) {
return 0;
}
@ -214,7 +214,7 @@ int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock)
}
/* if scheduler is not running, we have not to lock the rmutex */
if (sched_active_thread == NULL) {
if (thread_get_active() == NULL) {
return 0;
}
@ -230,7 +230,7 @@ void IRAM_ATTR _lock_release(_lock_t *lock)
assert(lock != NULL && *lock != 0);
/* if scheduler is not running, we have not to unlock the mutex */
if (sched_active_thread == NULL) {
if (thread_get_active() == NULL) {
return;
}
@ -242,7 +242,7 @@ void IRAM_ATTR _lock_release_recursive(_lock_t *lock)
assert(lock != NULL && *lock != 0);
/* if scheduler is not running, we have not to unlock the rmutex */
if (sched_active_thread == NULL) {
if (thread_get_active() == NULL) {
return;
}

View File

@ -262,10 +262,11 @@ void IRAM_ATTR thread_yield_higher(void)
/* yield next task */
#if defined(ENABLE_DEBUG) && defined(DEVELHELP)
if (sched_active_thread) {
thread_t *active_thread = thread_get_active();
if (active_thread) {
DEBUG("%u old task %u %s %u\n", system_get_time(),
sched_active_thread->pid, sched_active_thread->name,
sched_active_thread->sp - sched_active_thread-> stack_start);
active_thread->pid, active_thread->name,
active_thread->sp - active_thread-> stack_start);
}
#endif
if (!irq_is_in()) {
@ -285,10 +286,11 @@ void IRAM_ATTR thread_yield_higher(void)
}
#if defined(ENABLE_DEBUG) && defined(DEVELHELP)
if (sched_active_thread) {
active_thread = thread_get_active();
if (active_thread) {
DEBUG("%u new task %u %s %u\n", system_get_time(),
sched_active_thread->pid, sched_active_thread->name,
sched_active_thread->sp - sched_active_thread-> stack_start);
active_thread->pid, active_thread->name,
active_thread->sp - active_thread-> stack_start);
}
#endif
@ -307,7 +309,7 @@ void thread_stack_print(void)
/* Print the current stack to stdout. */
#if defined(DEVELHELP)
volatile thread_t* task = thread_get(sched_active_pid);
thread_t* task = thread_get_active();
if (task) {
char* stack_top = task->stack_start + task->stack_size;
@ -415,7 +417,7 @@ NORETURN void cpu_switch_context_exit(void)
NORETURN void task_exit(void)
{
DEBUG("sched_task_exit: ending thread %" PRIkernel_pid "...\n",
sched_active_thread ? sched_active_thread->pid : KERNEL_PID_UNDEF);
thread_getpid());
(void) irq_disable();