1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2025-12-18 02:53:52 +01:00

core: add core_mutex_debug to aid debugging deadlocks

Adding `USEMODULE += core_mutex_debug` to your `Makefile` results in
on log messages such as

    [mutex] waiting for thread 1 (pc = 0x800024d)

being added whenever `mutex_lock()` blocks. This makes tracing down
deadlocks easier.
This commit is contained in:
Marian Buschsieweke 2022-09-21 13:37:04 +02:00
parent 8b58e55580
commit 3e86e39646
No known key found for this signature in database
GPG Key ID: CB8E3238CE715A94
3 changed files with 79 additions and 8 deletions

View File

@ -39,6 +39,9 @@ config MODULE_CORE_MSG_BUS
help help
Messaging Bus API for inter process message broadcast. Messaging Bus API for inter process message broadcast.
config MODULE_CORE_MUTEX_DEBUG
bool "Aid debugging deadlocks by printing on whom mutex_lock() is waiting"
config MODULE_CORE_MUTEX_PRIORITY_INHERITANCE config MODULE_CORE_MUTEX_PRIORITY_INHERITANCE
bool "Use priority inheritance to mitigate priority inversion for mutexes" bool "Use priority inheritance to mitigate priority inversion for mutexes"

View File

@ -97,6 +97,25 @@
* `MUTEX_LOCK`. * `MUTEX_LOCK`.
* - The scheduler is run, so that if the unblocked waiting thread can * - The scheduler is run, so that if the unblocked waiting thread can
* run now, in case it has a higher priority than the running thread. * run now, in case it has a higher priority than the running thread.
*
* Debugging deadlocks
* -------------------
*
* The module `core_mutex_debug` can be used to print on whom `mutex_lock()`
* is waiting. This information includes the thread ID of the owner and the
* program counter (PC) from where `mutex_lock()` was called. Note that the
* information is only valid if:
*
* - The mutex was locked by a thread, and not e.g. by `MUTEX_INIT_LOCKED`
* - The function `cpu_get_caller_pc()` is implemented for the target
* architecture. (The thread ID will remain valid, though.)
* - The caller PC is briefly 0 when the current owner passes over ownership
* to the next thread, but that thread didn't get CPU time yet to write its
* PC into the data structure. Even worse, on architectures where an aligned
* function-pointer-sized write is not atomic, the value may briefly be
* bogus. Chances are close to zero this ever hits and since this only
* effects debug output, the ostrich algorithm was chosen here.
*
* @{ * @{
* *
* @file * @file
@ -112,6 +131,7 @@
#include <stdint.h> #include <stdint.h>
#include <stdbool.h> #include <stdbool.h>
#include "architecture.h"
#include "kernel_defines.h" #include "kernel_defines.h"
#include "list.h" #include "list.h"
#include "thread.h" #include "thread.h"
@ -130,7 +150,8 @@ typedef struct {
* @internal * @internal
*/ */
list_node_t queue; list_node_t queue;
#if defined(DOXYGEN) || defined(MODULE_CORE_MUTEX_PRIORITY_INHERITANCE) #if defined(DOXYGEN) || defined(MODULE_CORE_MUTEX_PRIORITY_INHERITANCE) \
|| defined(MODULE_CORE_MUTEX_DEBUG)
/** /**
* @brief The current owner of the mutex or `NULL` * @brief The current owner of the mutex or `NULL`
* @note Only available if module core_mutex_priority_inheritance * @note Only available if module core_mutex_priority_inheritance
@ -141,6 +162,18 @@ typedef struct {
* this will have the value of `NULL`. * this will have the value of `NULL`.
*/ */
kernel_pid_t owner; kernel_pid_t owner;
#endif
#if defined(DOXYGEN) || defined(MODULE_CORE_MUTEX_DEBUG)
/**
* @brief Program counter of the call to @ref mutex_lock that most
* recently acquired this mutex
*
* This is used when the module `core_mutex_debug` is used to debug
* deadlocks and is non-existing otherwise
*/
uinttxtptr_t owner_calling_pc;
#endif
#if defined(DOXYGEN) || defined(MODULE_CORE_MUTEX_PRIORITY_INHERITANCE)
/** /**
* @brief Original priority of the owner * @brief Original priority of the owner
* @note Only available if module core_mutex_priority_inheritance * @note Only available if module core_mutex_priority_inheritance

View File

@ -47,9 +47,18 @@
* function into both @ref mutex_lock and @ref mutex_lock_cancelable is, * function into both @ref mutex_lock and @ref mutex_lock_cancelable is,
* therefore, beneficial for the majority of applications. * therefore, beneficial for the majority of applications.
*/ */
static inline __attribute__((always_inline)) void _block(mutex_t *mutex, static inline __attribute__((always_inline))
unsigned irq_state) void _block(mutex_t *mutex,
unsigned irq_state,
uinttxtptr_t pc)
{ {
/* pc is only used when MODULE_CORE_MUTEX_DEBUG */
(void)pc;
#if IS_USED(MODULE_CORE_MUTEX_DEBUG)
printf("[mutex] waiting for thread %" PRIkernel_pid " (pc = 0x%" PRIxTXTPTR
")\n",
mutex->owner, mutex->owner_calling_pc);
#endif
thread_t *me = thread_get_active(); thread_t *me = thread_get_active();
/* Fail visibly even if a blocking action is called from somewhere where /* Fail visibly even if a blocking action is called from somewhere where
@ -80,10 +89,17 @@ static inline __attribute__((always_inline)) void _block(mutex_t *mutex,
irq_restore(irq_state); irq_restore(irq_state);
thread_yield_higher(); thread_yield_higher();
/* We were woken up by scheduler. Waker removed us from queue. */ /* We were woken up by scheduler. Waker removed us from queue. */
#if IS_USED(MODULE_CORE_MUTEX_DEBUG)
mutex->owner_calling_pc = pc;
#endif
} }
bool mutex_lock_internal(mutex_t *mutex, bool block) bool mutex_lock_internal(mutex_t *mutex, bool block)
{ {
uinttxtptr_t pc = 0;
#if IS_USED(MODULE_CORE_MUTEX_DEBUG)
pc = cpu_get_caller_pc();
#endif
unsigned irq_state = irq_disable(); unsigned irq_state = irq_disable();
DEBUG("PID[%" PRIkernel_pid "] mutex_lock_internal(block=%u).\n", DEBUG("PID[%" PRIkernel_pid "] mutex_lock_internal(block=%u).\n",
@ -92,9 +108,15 @@ bool mutex_lock_internal(mutex_t *mutex, bool block)
if (mutex->queue.next == NULL) { if (mutex->queue.next == NULL) {
/* mutex is unlocked. */ /* mutex is unlocked. */
mutex->queue.next = MUTEX_LOCKED; mutex->queue.next = MUTEX_LOCKED;
#ifdef MODULE_CORE_MUTEX_PRIORITY_INHERITANCE #if IS_USED(MODULE_CORE_MUTEX_PRIORITY_INHERITANCE) \
|| IS_USED(MODULE_CORE_MUTEX_DEBUG)
thread_t *me = thread_get_active(); thread_t *me = thread_get_active();
mutex->owner = me->pid; mutex->owner = me->pid;
#endif
#if IS_USED(MODULE_CORE_MUTEX_DEBUG)
mutex->owner_calling_pc = pc;
#endif
#if IS_USED(MODULE_CORE_MUTEX_PRIORITY_INHERITANCE)
mutex->owner_original_priority = me->priority; mutex->owner_original_priority = me->priority;
#endif #endif
DEBUG("PID[%" PRIkernel_pid "] mutex_lock(): early out.\n", DEBUG("PID[%" PRIkernel_pid "] mutex_lock(): early out.\n",
@ -106,7 +128,7 @@ bool mutex_lock_internal(mutex_t *mutex, bool block)
irq_restore(irq_state); irq_restore(irq_state);
return false; return false;
} }
_block(mutex, irq_state); _block(mutex, irq_state, pc);
} }
return true; return true;
@ -114,6 +136,10 @@ bool mutex_lock_internal(mutex_t *mutex, bool block)
int mutex_lock_cancelable(mutex_cancel_t *mc) int mutex_lock_cancelable(mutex_cancel_t *mc)
{ {
uinttxtptr_t pc = 0;
#if IS_USED(MODULE_CORE_MUTEX_DEBUG)
pc = cpu_get_caller_pc();
#endif
unsigned irq_state = irq_disable(); unsigned irq_state = irq_disable();
DEBUG("PID[%" PRIkernel_pid "] mutex_lock_cancelable()\n", DEBUG("PID[%" PRIkernel_pid "] mutex_lock_cancelable()\n",
@ -131,9 +157,15 @@ int mutex_lock_cancelable(mutex_cancel_t *mc)
if (mutex->queue.next == NULL) { if (mutex->queue.next == NULL) {
/* mutex is unlocked. */ /* mutex is unlocked. */
mutex->queue.next = MUTEX_LOCKED; mutex->queue.next = MUTEX_LOCKED;
#ifdef MODULE_CORE_MUTEX_PRIORITY_INHERITANCE #if IS_USED(MODULE_CORE_MUTEX_PRIORITY_INHERITANCE) \
|| IS_USED(MODULE_CORE_MUTEX_DEBUG)
thread_t *me = thread_get_active(); thread_t *me = thread_get_active();
mutex->owner = me->pid; mutex->owner = me->pid;
#endif
#if IS_USED(MODULE_CORE_MUTEX_DEBUG)
mutex->owner_calling_pc = pc;
#endif
#if IS_USED(MODULE_CORE_MUTEX_PRIORITY_INHERITANCE)
mutex->owner_original_priority = me->priority; mutex->owner_original_priority = me->priority;
#endif #endif
DEBUG("PID[%" PRIkernel_pid "] mutex_lock_cancelable() early out.\n", DEBUG("PID[%" PRIkernel_pid "] mutex_lock_cancelable() early out.\n",
@ -142,7 +174,7 @@ int mutex_lock_cancelable(mutex_cancel_t *mc)
return 0; return 0;
} }
else { else {
_block(mutex, irq_state); _block(mutex, irq_state, pc);
if (mc->cancelled) { if (mc->cancelled) {
DEBUG("PID[%" PRIkernel_pid "] mutex_lock_cancelable() " DEBUG("PID[%" PRIkernel_pid "] mutex_lock_cancelable() "
"cancelled.\n", thread_getpid()); "cancelled.\n", thread_getpid());
@ -185,7 +217,7 @@ void mutex_unlock(mutex_t *mutex)
uint16_t process_priority = process->priority; uint16_t process_priority = process->priority;
#ifdef MODULE_CORE_MUTEX_PRIORITY_INHERITANCE #if IS_USED(MODULE_CORE_MUTEX_PRIORITY_INHERITANCE)
thread_t *owner = thread_get(mutex->owner); thread_t *owner = thread_get(mutex->owner);
if ((owner) && (owner->priority != mutex->owner_original_priority)) { if ((owner) && (owner->priority != mutex->owner_original_priority)) {
DEBUG("PID[%" PRIkernel_pid "] prio %u --> %u\n", DEBUG("PID[%" PRIkernel_pid "] prio %u --> %u\n",
@ -194,6 +226,9 @@ void mutex_unlock(mutex_t *mutex)
sched_change_priority(owner, mutex->owner_original_priority); sched_change_priority(owner, mutex->owner_original_priority);
} }
#endif #endif
#if IS_USED(MODULE_CORE_MUTEX_DEBUG)
mutex->owner_calling_pc = 0;
#endif
irq_restore(irqstate); irq_restore(irqstate);
sched_switch(process_priority); sched_switch(process_priority);