mirror of
https://github.com/RIOT-OS/RIOT.git
synced 2025-12-15 09:33:50 +01:00
core/rmutex: use atomic utils
Replace use of C11 atomics with atomic utils. This fixes
> error: address argument to atomic operation must be a pointer to a
> trivially-copyable type ('_Atomic(int) *' invalid)
error when compiling on AVR with LLVM.
This commit is contained in:
parent
20793fd6c6
commit
b6b7065ddc
@ -24,11 +24,6 @@
|
|||||||
#define RMUTEX_H
|
#define RMUTEX_H
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#ifdef __cplusplus
|
|
||||||
#include "c11_atomics_compat.hpp"
|
|
||||||
#else
|
|
||||||
#include <stdatomic.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "mutex.h"
|
#include "mutex.h"
|
||||||
#include "sched.h"
|
#include "sched.h"
|
||||||
@ -62,14 +57,14 @@ typedef struct rmutex_t {
|
|||||||
* atomic_int_least16_t is used. Note @ref kernel_pid_t is an int16
|
* atomic_int_least16_t is used. Note @ref kernel_pid_t is an int16
|
||||||
* @internal
|
* @internal
|
||||||
*/
|
*/
|
||||||
atomic_int_least16_t owner;
|
kernel_pid_t owner;
|
||||||
} rmutex_t;
|
} rmutex_t;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Static initializer for rmutex_t.
|
* @brief Static initializer for rmutex_t.
|
||||||
* @details This initializer is preferable to rmutex_init().
|
* @details This initializer is preferable to rmutex_init().
|
||||||
*/
|
*/
|
||||||
#define RMUTEX_INIT { MUTEX_INIT, 0, ATOMIC_VAR_INIT(KERNEL_PID_UNDEF) }
|
#define RMUTEX_INIT { MUTEX_INIT, 0, KERNEL_PID_UNDEF }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Initializes a recursive mutex object.
|
* @brief Initializes a recursive mutex object.
|
||||||
|
|||||||
@ -24,9 +24,10 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <inttypes.h>
|
#include <inttypes.h>
|
||||||
|
|
||||||
|
#include "assert.h"
|
||||||
|
#include "atomic_utils.h"
|
||||||
#include "rmutex.h"
|
#include "rmutex.h"
|
||||||
#include "thread.h"
|
#include "thread.h"
|
||||||
#include "assert.h"
|
|
||||||
|
|
||||||
#define ENABLE_DEBUG 0
|
#define ENABLE_DEBUG 0
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
@ -78,7 +79,7 @@ static int _lock(rmutex_t *rmutex, int trylock)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* ensure that owner is read atomically, since I need a consistent value */
|
/* ensure that owner is read atomically, since I need a consistent value */
|
||||||
owner = atomic_load_explicit(&rmutex->owner, memory_order_relaxed);
|
owner = atomic_load_kernel_pid(&rmutex->owner);
|
||||||
DEBUG("rmutex %" PRIi16 " : mutex held by %" PRIi16 " \n",
|
DEBUG("rmutex %" PRIi16 " : mutex held by %" PRIi16 " \n",
|
||||||
thread_getpid(), owner);
|
thread_getpid(), owner);
|
||||||
|
|
||||||
@ -104,8 +105,7 @@ static int _lock(rmutex_t *rmutex, int trylock)
|
|||||||
DEBUG("rmutex %" PRIi16 " : setting the owner\n", thread_getpid());
|
DEBUG("rmutex %" PRIi16 " : setting the owner\n", thread_getpid());
|
||||||
|
|
||||||
/* ensure that owner is written atomically, since others need a consistent value */
|
/* ensure that owner is written atomically, since others need a consistent value */
|
||||||
atomic_store_explicit(&rmutex->owner, thread_getpid(),
|
atomic_store_kernel_pid(&rmutex->owner, thread_getpid());
|
||||||
memory_order_relaxed);
|
|
||||||
|
|
||||||
DEBUG("rmutex %" PRIi16 " : increasing refs\n", thread_getpid());
|
DEBUG("rmutex %" PRIi16 " : increasing refs\n", thread_getpid());
|
||||||
|
|
||||||
@ -127,8 +127,8 @@ int rmutex_trylock(rmutex_t *rmutex)
|
|||||||
|
|
||||||
void rmutex_unlock(rmutex_t *rmutex)
|
void rmutex_unlock(rmutex_t *rmutex)
|
||||||
{
|
{
|
||||||
assert(atomic_load_explicit(&rmutex->owner,
|
/* ensure that owner is read atomically, since I need a consistent value */
|
||||||
memory_order_relaxed) == thread_getpid());
|
assert(atomic_load_kernel_pid(&rmutex->owner) == thread_getpid());
|
||||||
assert(rmutex->refcount > 0);
|
assert(rmutex->refcount > 0);
|
||||||
|
|
||||||
DEBUG("rmutex %" PRIi16 " : decrementing refs refs\n", thread_getpid());
|
DEBUG("rmutex %" PRIi16 " : decrementing refs refs\n", thread_getpid());
|
||||||
@ -143,8 +143,7 @@ void rmutex_unlock(rmutex_t *rmutex)
|
|||||||
DEBUG("rmutex %" PRIi16 " : resetting owner\n", thread_getpid());
|
DEBUG("rmutex %" PRIi16 " : resetting owner\n", thread_getpid());
|
||||||
|
|
||||||
/* ensure that owner is written only once */
|
/* ensure that owner is written only once */
|
||||||
atomic_store_explicit(&rmutex->owner, KERNEL_PID_UNDEF,
|
atomic_store_kernel_pid(&rmutex->owner, KERNEL_PID_UNDEF);
|
||||||
memory_order_relaxed);
|
|
||||||
|
|
||||||
DEBUG("rmutex %" PRIi16 " : releasing mutex\n", thread_getpid());
|
DEBUG("rmutex %" PRIi16 " : releasing mutex\n", thread_getpid());
|
||||||
|
|
||||||
|
|||||||
@ -23,6 +23,7 @@
|
|||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
#include "xtimer.h"
|
#include "xtimer.h"
|
||||||
|
#include "atomic_utils.h"
|
||||||
#include "msg.h"
|
#include "msg.h"
|
||||||
#include "mutex.h"
|
#include "mutex.h"
|
||||||
#include "rmutex.h"
|
#include "rmutex.h"
|
||||||
@ -224,8 +225,7 @@ int xtimer_rmutex_lock_timeout(rmutex_t *rmutex, uint64_t timeout)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (xtimer_mutex_lock_timeout(&rmutex->mutex, timeout) == 0) {
|
if (xtimer_mutex_lock_timeout(&rmutex->mutex, timeout) == 0) {
|
||||||
atomic_store_explicit(&rmutex->owner,
|
atomic_store_kernel_pid(&rmutex->owner, thread_getpid());
|
||||||
thread_getpid(), memory_order_relaxed);
|
|
||||||
rmutex->refcount++;
|
rmutex->refcount++;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -23,6 +23,7 @@
|
|||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
|
|
||||||
|
#include "atomic_utils.h"
|
||||||
#include "irq.h"
|
#include "irq.h"
|
||||||
#include "mutex.h"
|
#include "mutex.h"
|
||||||
#include "rmutex.h"
|
#include "rmutex.h"
|
||||||
@ -190,8 +191,7 @@ int ztimer_rmutex_lock_timeout(ztimer_clock_t *clock, rmutex_t *rmutex,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (ztimer_mutex_lock_timeout(clock, &rmutex->mutex, timeout) == 0) {
|
if (ztimer_mutex_lock_timeout(clock, &rmutex->mutex, timeout) == 0) {
|
||||||
atomic_store_explicit(&rmutex->owner,
|
atomic_store_kernel_pid(&rmutex->owner, thread_getpid());
|
||||||
thread_getpid(), memory_order_relaxed);
|
|
||||||
rmutex->refcount++;
|
rmutex->refcount++;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -23,6 +23,7 @@
|
|||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
|
|
||||||
|
#include "atomic_utils.h"
|
||||||
#include "irq.h"
|
#include "irq.h"
|
||||||
#include "mutex.h"
|
#include "mutex.h"
|
||||||
#include "rmutex.h"
|
#include "rmutex.h"
|
||||||
@ -183,8 +184,7 @@ int ztimer64_rmutex_lock_until(ztimer64_clock_t *clock, rmutex_t *rmutex,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (ztimer64_mutex_lock_until(clock, &rmutex->mutex, target) == 0) {
|
if (ztimer64_mutex_lock_until(clock, &rmutex->mutex, target) == 0) {
|
||||||
atomic_store_explicit(&rmutex->owner,
|
atomic_store_kernel_pid(&rmutex->owner, thread_getpid());
|
||||||
thread_getpid(), memory_order_relaxed);
|
|
||||||
rmutex->refcount++;
|
rmutex->refcount++;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -20,11 +20,13 @@
|
|||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include "shell.h"
|
|
||||||
#include "xtimer.h"
|
#include "atomic_utils.h"
|
||||||
#include "thread.h"
|
|
||||||
#include "msg.h"
|
|
||||||
#include "irq.h"
|
#include "irq.h"
|
||||||
|
#include "msg.h"
|
||||||
|
#include "shell.h"
|
||||||
|
#include "thread.h"
|
||||||
|
#include "xtimer.h"
|
||||||
|
|
||||||
/* XTIMER_SHIFT can be undefined when using xtimer_on_ztimer on boards
|
/* XTIMER_SHIFT can be undefined when using xtimer_on_ztimer on boards
|
||||||
* incompatible with xtimers tick conversion, e.g. the waspmote-pro
|
* incompatible with xtimers tick conversion, e.g. the waspmote-pro
|
||||||
@ -155,8 +157,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_long_unlocked(int argc,
|
|||||||
|
|
||||||
if (xtimer_rmutex_lock_timeout(&test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
|
if (xtimer_rmutex_lock_timeout(&test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
|
||||||
/* rmutex has to be locked once */
|
/* rmutex has to be locked once */
|
||||||
if (atomic_load_explicit(&test_rmutex.owner,
|
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
|
||||||
memory_order_relaxed) == thread_getpid() &&
|
|
||||||
test_rmutex.refcount == 1 &&
|
test_rmutex.refcount == 1 &&
|
||||||
mutex_trylock(&test_rmutex.mutex) == 0) {
|
mutex_trylock(&test_rmutex.mutex) == 0) {
|
||||||
puts("OK");
|
puts("OK");
|
||||||
@ -209,8 +210,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_long_locked(int argc,
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
/* rmutex has to be locked once */
|
/* rmutex has to be locked once */
|
||||||
if (atomic_load_explicit(&test_rmutex.owner,
|
if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid &&
|
||||||
memory_order_relaxed) == second_t_pid &&
|
|
||||||
test_rmutex.refcount == 1 &&
|
test_rmutex.refcount == 1 &&
|
||||||
mutex_trylock(&test_rmutex.mutex) == 0) {
|
mutex_trylock(&test_rmutex.mutex) == 0) {
|
||||||
puts("OK");
|
puts("OK");
|
||||||
@ -261,8 +261,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_low_prio_thread(
|
|||||||
|
|
||||||
if (xtimer_rmutex_lock_timeout(&test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
|
if (xtimer_rmutex_lock_timeout(&test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
|
||||||
/* rmutex has to be locked once */
|
/* rmutex has to be locked once */
|
||||||
if (atomic_load_explicit(&test_rmutex.owner,
|
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
|
||||||
memory_order_relaxed) == thread_getpid() &&
|
|
||||||
test_rmutex.refcount == 1 &&
|
test_rmutex.refcount == 1 &&
|
||||||
mutex_trylock(&test_rmutex.mutex) == 0) {
|
mutex_trylock(&test_rmutex.mutex) == 0) {
|
||||||
puts("OK");
|
puts("OK");
|
||||||
@ -317,8 +316,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_short_locked(int argc,
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
/* rmutex has to be locked once */
|
/* rmutex has to be locked once */
|
||||||
if (atomic_load_explicit(&test_rmutex.owner,
|
if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid &&
|
||||||
memory_order_relaxed) == second_t_pid &&
|
|
||||||
test_rmutex.refcount == 1 &&
|
test_rmutex.refcount == 1 &&
|
||||||
mutex_trylock(&test_rmutex.mutex) == 0) {
|
mutex_trylock(&test_rmutex.mutex) == 0) {
|
||||||
puts("OK");
|
puts("OK");
|
||||||
@ -355,8 +353,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_short_unlocked(int argc,
|
|||||||
|
|
||||||
if (xtimer_rmutex_lock_timeout(&test_rmutex, SHORT_RMUTEX_TIMEOUT) == 0) {
|
if (xtimer_rmutex_lock_timeout(&test_rmutex, SHORT_RMUTEX_TIMEOUT) == 0) {
|
||||||
/* rmutex has to be locked once */
|
/* rmutex has to be locked once */
|
||||||
if (atomic_load_explicit(&test_rmutex.owner,
|
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
|
||||||
memory_order_relaxed) == thread_getpid() &&
|
|
||||||
test_rmutex.refcount == 1 &&
|
test_rmutex.refcount == 1 &&
|
||||||
mutex_trylock(&test_rmutex.mutex) == 0) {
|
mutex_trylock(&test_rmutex.mutex) == 0) {
|
||||||
puts("OK");
|
puts("OK");
|
||||||
|
|||||||
@ -20,11 +20,13 @@
|
|||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include "shell.h"
|
|
||||||
#include "ztimer.h"
|
#include "atomic_utils.h"
|
||||||
#include "thread.h"
|
|
||||||
#include "msg.h"
|
|
||||||
#include "irq.h"
|
#include "irq.h"
|
||||||
|
#include "msg.h"
|
||||||
|
#include "shell.h"
|
||||||
|
#include "thread.h"
|
||||||
|
#include "ztimer.h"
|
||||||
|
|
||||||
/* timeout at one millisecond (1000 us) to make sure it does not spin. */
|
/* timeout at one millisecond (1000 us) to make sure it does not spin. */
|
||||||
#define LONG_RMUTEX_TIMEOUT 1000
|
#define LONG_RMUTEX_TIMEOUT 1000
|
||||||
@ -148,8 +150,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_long_unlocked(int argc,
|
|||||||
|
|
||||||
if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
|
if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
|
||||||
/* rmutex has to be locked once */
|
/* rmutex has to be locked once */
|
||||||
if (atomic_load_explicit(&test_rmutex.owner,
|
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
|
||||||
memory_order_relaxed) == thread_getpid() &&
|
|
||||||
test_rmutex.refcount == 1 &&
|
test_rmutex.refcount == 1 &&
|
||||||
mutex_trylock(&test_rmutex.mutex) == 0) {
|
mutex_trylock(&test_rmutex.mutex) == 0) {
|
||||||
puts("OK");
|
puts("OK");
|
||||||
@ -202,8 +203,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_long_locked(int argc,
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
/* rmutex has to be locked once */
|
/* rmutex has to be locked once */
|
||||||
if (atomic_load_explicit(&test_rmutex.owner,
|
if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid &&
|
||||||
memory_order_relaxed) == second_t_pid &&
|
|
||||||
test_rmutex.refcount == 1 &&
|
test_rmutex.refcount == 1 &&
|
||||||
mutex_trylock(&test_rmutex.mutex) == 0) {
|
mutex_trylock(&test_rmutex.mutex) == 0) {
|
||||||
puts("OK");
|
puts("OK");
|
||||||
@ -254,8 +254,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_low_prio_thread(
|
|||||||
|
|
||||||
if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
|
if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
|
||||||
/* rmutex has to be locked once */
|
/* rmutex has to be locked once */
|
||||||
if (atomic_load_explicit(&test_rmutex.owner,
|
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
|
||||||
memory_order_relaxed) == thread_getpid() &&
|
|
||||||
test_rmutex.refcount == 1 &&
|
test_rmutex.refcount == 1 &&
|
||||||
mutex_trylock(&test_rmutex.mutex) == 0) {
|
mutex_trylock(&test_rmutex.mutex) == 0) {
|
||||||
puts("OK");
|
puts("OK");
|
||||||
@ -310,8 +309,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_short_locked(int argc,
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
/* rmutex has to be locked once */
|
/* rmutex has to be locked once */
|
||||||
if (atomic_load_explicit(&test_rmutex.owner,
|
if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid &&
|
||||||
memory_order_relaxed) == second_t_pid &&
|
|
||||||
test_rmutex.refcount == 1 &&
|
test_rmutex.refcount == 1 &&
|
||||||
mutex_trylock(&test_rmutex.mutex) == 0) {
|
mutex_trylock(&test_rmutex.mutex) == 0) {
|
||||||
puts("OK");
|
puts("OK");
|
||||||
@ -348,8 +346,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_short_unlocked(int argc,
|
|||||||
|
|
||||||
if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, SHORT_RMUTEX_TIMEOUT) == 0) {
|
if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, SHORT_RMUTEX_TIMEOUT) == 0) {
|
||||||
/* rmutex has to be locked once */
|
/* rmutex has to be locked once */
|
||||||
if (atomic_load_explicit(&test_rmutex.owner,
|
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
|
||||||
memory_order_relaxed) == thread_getpid() &&
|
|
||||||
test_rmutex.refcount == 1 &&
|
test_rmutex.refcount == 1 &&
|
||||||
mutex_trylock(&test_rmutex.mutex) == 0) {
|
mutex_trylock(&test_rmutex.mutex) == 0) {
|
||||||
puts("OK");
|
puts("OK");
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user