Merge pull request #14772 from maribu/esp_sched_cleanup
cpu/esp: Use API to access sched internals
This commit is contained in:
commit
157705c0c6
6
cpu/esp8266/vendor/esp-gdbstub/gdbstub.c
vendored
6
cpu/esp8266/vendor/esp-gdbstub/gdbstub.c
vendored
@ -903,10 +903,10 @@ static void ATTR_GDBINIT install_exceptions(void)
|
||||
|
||||
static void ATTR_GDBFN uart_hdlr(void *arg)
|
||||
{
|
||||
assert(sched_active_thread != NULL);
|
||||
assert(sched_active_thread->sp != NULL);
|
||||
assert(thread_get_active() != NULL);
|
||||
assert(thread_get_active()->sp != NULL);
|
||||
|
||||
XtExcFrame* frame = (XtExcFrame *)sched_active_thread->sp;
|
||||
XtExcFrame* frame = (XtExcFrame *)thread_get_active()->sp;
|
||||
|
||||
int doDebug=0;
|
||||
int fifolen=0;
|
||||
|
||||
@ -64,7 +64,7 @@ static void task_delete_wrapper(void *task_handle)
|
||||
static void task_yield_wrapper(void)
|
||||
{
|
||||
#ifdef RIOT_VERSION
|
||||
thread_yield_higher();
|
||||
thread_yield_higher();
|
||||
#else
|
||||
portYIELD();
|
||||
#endif
|
||||
@ -73,7 +73,7 @@ static void task_yield_wrapper(void)
|
||||
static void task_yield_from_isr_wrapper(void)
|
||||
{
|
||||
#ifdef RIOT_VERSION
|
||||
thread_yield_higher();
|
||||
thread_yield_higher();
|
||||
#else
|
||||
portYIELD();
|
||||
#endif
|
||||
@ -152,7 +152,7 @@ static bool semphr_take_from_isr_wrapper(void *semphr, int *hptw)
|
||||
signed portBASE_TYPE ret;
|
||||
|
||||
ret = xSemaphoreTakeFromISR(semphr, (signed portBASE_TYPE *)hptw);
|
||||
|
||||
|
||||
return ret == pdPASS ? true : false;
|
||||
}
|
||||
|
||||
@ -397,13 +397,13 @@ static int32_t rand_wrapper(void)
|
||||
|
||||
void *osi_task_top_sp(void)
|
||||
{
|
||||
extern volatile thread_t *sched_active_thread;
|
||||
return sched_active_thread ? sched_active_thread->sp : 0;
|
||||
thread_t *active_thread = thread_get_active();
|
||||
return active_thread ? active_thread->sp : 0;
|
||||
}
|
||||
|
||||
const wifi_osi_funcs_t s_wifi_osi_funcs = {
|
||||
.version = ESP_WIFI_OS_ADAPTER_VERSION,
|
||||
|
||||
|
||||
.task_create = task_create_wrapper,
|
||||
.task_delete = task_delete_wrapper,
|
||||
.task_yield = task_yield_wrapper,
|
||||
@ -411,7 +411,7 @@ const wifi_osi_funcs_t s_wifi_osi_funcs = {
|
||||
.task_delay = task_delay_wrapper,
|
||||
.task_get_current_task = task_get_current_task_wrapper,
|
||||
.task_get_max_priority = task_get_max_priority_wrapper,
|
||||
|
||||
|
||||
.task_ms_to_tick = task_ms_to_tick_wrapper,
|
||||
|
||||
.task_suspend_all = task_suspend_all_wrapper,
|
||||
@ -419,19 +419,19 @@ const wifi_osi_funcs_t s_wifi_osi_funcs = {
|
||||
|
||||
.os_init = os_init_wrapper,
|
||||
.os_start = os_start_wrapper,
|
||||
|
||||
|
||||
.semphr_create = semphr_create_wrapper,
|
||||
.semphr_delete = semphr_delete_wrapper,
|
||||
.semphr_take_from_isr = semphr_take_from_isr_wrapper,
|
||||
.semphr_give_from_isr = semphr_give_from_isr_wrapper,
|
||||
.semphr_take = semphr_take_wrapper,
|
||||
.semphr_give = semphr_give_wrapper,
|
||||
|
||||
|
||||
.mutex_create = mutex_create_wrapper,
|
||||
.mutex_delete = mutex_delete_wrapper,
|
||||
.mutex_lock = mutex_lock_wrapper,
|
||||
.mutex_unlock = mutex_unlock_wrapper,
|
||||
|
||||
|
||||
.queue_create = queue_create_wrapper,
|
||||
.queue_delete = queue_delete_wrapper,
|
||||
.queue_send = queue_send_wrapper,
|
||||
|
||||
@ -196,7 +196,7 @@ BaseType_t IRAM_ATTR _queue_generic_send(QueueHandle_t xQueue,
|
||||
}
|
||||
else {
|
||||
/* suspend the calling thread to wait for space in the queue */
|
||||
thread_t *me = (thread_t*)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
sched_set_status(me, STATUS_SEND_BLOCKED);
|
||||
/* waiting list is sorted by priority */
|
||||
thread_add_to_list(&queue->sending, me);
|
||||
@ -302,7 +302,7 @@ BaseType_t IRAM_ATTR _queue_generic_recv (QueueHandle_t xQueue,
|
||||
}
|
||||
else {
|
||||
/* suspend the calling thread to wait for an item in the queue */
|
||||
thread_t *me = (thread_t*)sched_active_thread;
|
||||
thread_t *me = thread_get_active();
|
||||
sched_set_status(me, STATUS_RECEIVE_BLOCKED);
|
||||
/* waiting list is sorted by priority */
|
||||
thread_add_to_list(&queue->receiving, me);
|
||||
|
||||
@ -157,7 +157,7 @@ void IRAM_ATTR _lock_acquire(_lock_t *lock)
|
||||
}
|
||||
|
||||
/* if scheduler is not running, we have not to lock the mutex */
|
||||
if (sched_active_thread == NULL) {
|
||||
if (thread_get_active() == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -175,7 +175,7 @@ void IRAM_ATTR _lock_acquire_recursive(_lock_t *lock)
|
||||
}
|
||||
|
||||
/* if scheduler is not running, we have not to lock the rmutex */
|
||||
if (sched_active_thread == NULL) {
|
||||
if (thread_get_active() == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -193,7 +193,7 @@ int IRAM_ATTR _lock_try_acquire(_lock_t *lock)
|
||||
}
|
||||
|
||||
/* if scheduler is not running, we have not to lock the mutex */
|
||||
if (sched_active_thread == NULL) {
|
||||
if (thread_get_active() == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -214,7 +214,7 @@ int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock)
|
||||
}
|
||||
|
||||
/* if scheduler is not running, we have not to lock the rmutex */
|
||||
if (sched_active_thread == NULL) {
|
||||
if (thread_get_active() == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -230,7 +230,7 @@ void IRAM_ATTR _lock_release(_lock_t *lock)
|
||||
assert(lock != NULL && *lock != 0);
|
||||
|
||||
/* if scheduler is not running, we have not to unlock the mutex */
|
||||
if (sched_active_thread == NULL) {
|
||||
if (thread_get_active() == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -242,7 +242,7 @@ void IRAM_ATTR _lock_release_recursive(_lock_t *lock)
|
||||
assert(lock != NULL && *lock != 0);
|
||||
|
||||
/* if scheduler is not running, we have not to unlock the rmutex */
|
||||
if (sched_active_thread == NULL) {
|
||||
if (thread_get_active() == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -262,10 +262,11 @@ void IRAM_ATTR thread_yield_higher(void)
|
||||
|
||||
/* yield next task */
|
||||
#if defined(ENABLE_DEBUG) && defined(DEVELHELP)
|
||||
if (sched_active_thread) {
|
||||
thread_t *active_thread = thread_get_active();
|
||||
if (active_thread) {
|
||||
DEBUG("%u old task %u %s %u\n", system_get_time(),
|
||||
sched_active_thread->pid, sched_active_thread->name,
|
||||
sched_active_thread->sp - sched_active_thread-> stack_start);
|
||||
active_thread->pid, active_thread->name,
|
||||
active_thread->sp - active_thread-> stack_start);
|
||||
}
|
||||
#endif
|
||||
if (!irq_is_in()) {
|
||||
@ -285,10 +286,11 @@ void IRAM_ATTR thread_yield_higher(void)
|
||||
}
|
||||
|
||||
#if defined(ENABLE_DEBUG) && defined(DEVELHELP)
|
||||
if (sched_active_thread) {
|
||||
active_thread = thread_get_active();
|
||||
if (active_thread) {
|
||||
DEBUG("%u new task %u %s %u\n", system_get_time(),
|
||||
sched_active_thread->pid, sched_active_thread->name,
|
||||
sched_active_thread->sp - sched_active_thread-> stack_start);
|
||||
active_thread->pid, active_thread->name,
|
||||
active_thread->sp - active_thread-> stack_start);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -307,7 +309,7 @@ void thread_stack_print(void)
|
||||
/* Print the current stack to stdout. */
|
||||
|
||||
#if defined(DEVELHELP)
|
||||
volatile thread_t* task = thread_get(sched_active_pid);
|
||||
thread_t* task = thread_get_active();
|
||||
if (task) {
|
||||
|
||||
char* stack_top = task->stack_start + task->stack_size;
|
||||
@ -415,7 +417,7 @@ NORETURN void cpu_switch_context_exit(void)
|
||||
NORETURN void task_exit(void)
|
||||
{
|
||||
DEBUG("sched_task_exit: ending thread %" PRIkernel_pid "...\n",
|
||||
sched_active_thread ? sched_active_thread->pid : KERNEL_PID_UNDEF);
|
||||
thread_getpid());
|
||||
|
||||
(void) irq_disable();
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user