cpu/esp: Use API to access sched internals

Replace access to `sched_active_task` and `sched_active_pid` with calls to
`thread_getpid()` and `thread_get_active()`.
This commit is contained in:
Marian Buschsieweke 2020-08-17 10:51:10 +02:00
parent 7abcc4242b
commit 35d46e6dc3
No known key found for this signature in database
GPG Key ID: 61F64C6599B1539F
5 changed files with 31 additions and 29 deletions

View File

@ -903,10 +903,10 @@ static void ATTR_GDBINIT install_exceptions(void)
static void ATTR_GDBFN uart_hdlr(void *arg) static void ATTR_GDBFN uart_hdlr(void *arg)
{ {
assert(sched_active_thread != NULL); assert(thread_get_active() != NULL);
assert(sched_active_thread->sp != NULL); assert(thread_get_active()->sp != NULL);
XtExcFrame* frame = (XtExcFrame *)sched_active_thread->sp; XtExcFrame* frame = (XtExcFrame *)thread_get_active()->sp;
int doDebug=0; int doDebug=0;
int fifolen=0; int fifolen=0;

View File

@ -64,7 +64,7 @@ static void task_delete_wrapper(void *task_handle)
static void task_yield_wrapper(void) static void task_yield_wrapper(void)
{ {
#ifdef RIOT_VERSION #ifdef RIOT_VERSION
thread_yield_higher(); thread_yield_higher();
#else #else
portYIELD(); portYIELD();
#endif #endif
@ -73,7 +73,7 @@ static void task_yield_wrapper(void)
static void task_yield_from_isr_wrapper(void) static void task_yield_from_isr_wrapper(void)
{ {
#ifdef RIOT_VERSION #ifdef RIOT_VERSION
thread_yield_higher(); thread_yield_higher();
#else #else
portYIELD(); portYIELD();
#endif #endif
@ -152,7 +152,7 @@ static bool semphr_take_from_isr_wrapper(void *semphr, int *hptw)
signed portBASE_TYPE ret; signed portBASE_TYPE ret;
ret = xSemaphoreTakeFromISR(semphr, (signed portBASE_TYPE *)hptw); ret = xSemaphoreTakeFromISR(semphr, (signed portBASE_TYPE *)hptw);
return ret == pdPASS ? true : false; return ret == pdPASS ? true : false;
} }
@ -397,13 +397,13 @@ static int32_t rand_wrapper(void)
void *osi_task_top_sp(void) void *osi_task_top_sp(void)
{ {
extern volatile thread_t *sched_active_thread; thread_t *active_thread = thread_get_active();
return sched_active_thread ? sched_active_thread->sp : 0; return active_thread ? active_thread->sp : 0;
} }
const wifi_osi_funcs_t s_wifi_osi_funcs = { const wifi_osi_funcs_t s_wifi_osi_funcs = {
.version = ESP_WIFI_OS_ADAPTER_VERSION, .version = ESP_WIFI_OS_ADAPTER_VERSION,
.task_create = task_create_wrapper, .task_create = task_create_wrapper,
.task_delete = task_delete_wrapper, .task_delete = task_delete_wrapper,
.task_yield = task_yield_wrapper, .task_yield = task_yield_wrapper,
@ -411,7 +411,7 @@ const wifi_osi_funcs_t s_wifi_osi_funcs = {
.task_delay = task_delay_wrapper, .task_delay = task_delay_wrapper,
.task_get_current_task = task_get_current_task_wrapper, .task_get_current_task = task_get_current_task_wrapper,
.task_get_max_priority = task_get_max_priority_wrapper, .task_get_max_priority = task_get_max_priority_wrapper,
.task_ms_to_tick = task_ms_to_tick_wrapper, .task_ms_to_tick = task_ms_to_tick_wrapper,
.task_suspend_all = task_suspend_all_wrapper, .task_suspend_all = task_suspend_all_wrapper,
@ -419,19 +419,19 @@ const wifi_osi_funcs_t s_wifi_osi_funcs = {
.os_init = os_init_wrapper, .os_init = os_init_wrapper,
.os_start = os_start_wrapper, .os_start = os_start_wrapper,
.semphr_create = semphr_create_wrapper, .semphr_create = semphr_create_wrapper,
.semphr_delete = semphr_delete_wrapper, .semphr_delete = semphr_delete_wrapper,
.semphr_take_from_isr = semphr_take_from_isr_wrapper, .semphr_take_from_isr = semphr_take_from_isr_wrapper,
.semphr_give_from_isr = semphr_give_from_isr_wrapper, .semphr_give_from_isr = semphr_give_from_isr_wrapper,
.semphr_take = semphr_take_wrapper, .semphr_take = semphr_take_wrapper,
.semphr_give = semphr_give_wrapper, .semphr_give = semphr_give_wrapper,
.mutex_create = mutex_create_wrapper, .mutex_create = mutex_create_wrapper,
.mutex_delete = mutex_delete_wrapper, .mutex_delete = mutex_delete_wrapper,
.mutex_lock = mutex_lock_wrapper, .mutex_lock = mutex_lock_wrapper,
.mutex_unlock = mutex_unlock_wrapper, .mutex_unlock = mutex_unlock_wrapper,
.queue_create = queue_create_wrapper, .queue_create = queue_create_wrapper,
.queue_delete = queue_delete_wrapper, .queue_delete = queue_delete_wrapper,
.queue_send = queue_send_wrapper, .queue_send = queue_send_wrapper,

View File

@ -196,7 +196,7 @@ BaseType_t IRAM_ATTR _queue_generic_send(QueueHandle_t xQueue,
} }
else { else {
/* suspend the calling thread to wait for space in the queue */ /* suspend the calling thread to wait for space in the queue */
thread_t *me = (thread_t*)sched_active_thread; thread_t *me = thread_get_active();
sched_set_status(me, STATUS_SEND_BLOCKED); sched_set_status(me, STATUS_SEND_BLOCKED);
/* waiting list is sorted by priority */ /* waiting list is sorted by priority */
thread_add_to_list(&queue->sending, me); thread_add_to_list(&queue->sending, me);
@ -302,7 +302,7 @@ BaseType_t IRAM_ATTR _queue_generic_recv (QueueHandle_t xQueue,
} }
else { else {
/* suspend the calling thread to wait for an item in the queue */ /* suspend the calling thread to wait for an item in the queue */
thread_t *me = (thread_t*)sched_active_thread; thread_t *me = thread_get_active();
sched_set_status(me, STATUS_RECEIVE_BLOCKED); sched_set_status(me, STATUS_RECEIVE_BLOCKED);
/* waiting list is sorted by priority */ /* waiting list is sorted by priority */
thread_add_to_list(&queue->receiving, me); thread_add_to_list(&queue->receiving, me);

View File

@ -157,7 +157,7 @@ void IRAM_ATTR _lock_acquire(_lock_t *lock)
} }
/* if scheduler is not running, we have not to lock the mutex */ /* if scheduler is not running, we have not to lock the mutex */
if (sched_active_thread == NULL) { if (thread_get_active() == NULL) {
return; return;
} }
@ -175,7 +175,7 @@ void IRAM_ATTR _lock_acquire_recursive(_lock_t *lock)
} }
/* if scheduler is not running, we have not to lock the rmutex */ /* if scheduler is not running, we have not to lock the rmutex */
if (sched_active_thread == NULL) { if (thread_get_active() == NULL) {
return; return;
} }
@ -193,7 +193,7 @@ int IRAM_ATTR _lock_try_acquire(_lock_t *lock)
} }
/* if scheduler is not running, we have not to lock the mutex */ /* if scheduler is not running, we have not to lock the mutex */
if (sched_active_thread == NULL) { if (thread_get_active() == NULL) {
return 0; return 0;
} }
@ -214,7 +214,7 @@ int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock)
} }
/* if scheduler is not running, we have not to lock the rmutex */ /* if scheduler is not running, we have not to lock the rmutex */
if (sched_active_thread == NULL) { if (thread_get_active() == NULL) {
return 0; return 0;
} }
@ -230,7 +230,7 @@ void IRAM_ATTR _lock_release(_lock_t *lock)
assert(lock != NULL && *lock != 0); assert(lock != NULL && *lock != 0);
/* if scheduler is not running, we have not to unlock the mutex */ /* if scheduler is not running, we have not to unlock the mutex */
if (sched_active_thread == NULL) { if (thread_get_active() == NULL) {
return; return;
} }
@ -242,7 +242,7 @@ void IRAM_ATTR _lock_release_recursive(_lock_t *lock)
assert(lock != NULL && *lock != 0); assert(lock != NULL && *lock != 0);
/* if scheduler is not running, we have not to unlock the rmutex */ /* if scheduler is not running, we have not to unlock the rmutex */
if (sched_active_thread == NULL) { if (thread_get_active() == NULL) {
return; return;
} }

View File

@ -262,10 +262,11 @@ void IRAM_ATTR thread_yield_higher(void)
/* yield next task */ /* yield next task */
#if defined(ENABLE_DEBUG) && defined(DEVELHELP) #if defined(ENABLE_DEBUG) && defined(DEVELHELP)
if (sched_active_thread) { thread_t *active_thread = thread_get_active();
if (active_thread) {
DEBUG("%u old task %u %s %u\n", system_get_time(), DEBUG("%u old task %u %s %u\n", system_get_time(),
sched_active_thread->pid, sched_active_thread->name, active_thread->pid, active_thread->name,
sched_active_thread->sp - sched_active_thread-> stack_start); active_thread->sp - active_thread-> stack_start);
} }
#endif #endif
if (!irq_is_in()) { if (!irq_is_in()) {
@ -285,10 +286,11 @@ void IRAM_ATTR thread_yield_higher(void)
} }
#if defined(ENABLE_DEBUG) && defined(DEVELHELP) #if defined(ENABLE_DEBUG) && defined(DEVELHELP)
if (sched_active_thread) { active_thread = thread_get_active();
if (active_thread) {
DEBUG("%u new task %u %s %u\n", system_get_time(), DEBUG("%u new task %u %s %u\n", system_get_time(),
sched_active_thread->pid, sched_active_thread->name, active_thread->pid, active_thread->name,
sched_active_thread->sp - sched_active_thread-> stack_start); active_thread->sp - active_thread-> stack_start);
} }
#endif #endif
@ -307,7 +309,7 @@ void thread_stack_print(void)
/* Print the current stack to stdout. */ /* Print the current stack to stdout. */
#if defined(DEVELHELP) #if defined(DEVELHELP)
volatile thread_t* task = thread_get(sched_active_pid); thread_t* task = thread_get_active();
if (task) { if (task) {
char* stack_top = task->stack_start + task->stack_size; char* stack_top = task->stack_start + task->stack_size;
@ -415,7 +417,7 @@ NORETURN void cpu_switch_context_exit(void)
NORETURN void task_exit(void) NORETURN void task_exit(void)
{ {
DEBUG("sched_task_exit: ending thread %" PRIkernel_pid "...\n", DEBUG("sched_task_exit: ending thread %" PRIkernel_pid "...\n",
sched_active_thread ? sched_active_thread->pid : KERNEL_PID_UNDEF); thread_getpid());
(void) irq_disable(); (void) irq_disable();