1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2025-12-25 06:23:53 +01:00

cpu/esp32/syscalls: migration to ESP-IDF v5.4

This commit is contained in:
Gunar Schorcht 2025-03-02 12:09:36 +01:00
parent d57fd6a559
commit e16bca41cd
2 changed files with 166 additions and 91 deletions

View File

@ -32,10 +32,11 @@
#include "sys/lock.h"
#include "timex.h"
#include "esp_cpu.h"
#include "esp_private/periph_ctrl.h"
#include "esp_rom_caps.h"
#include "hal/interrupt_controller_types.h"
#include "hal/interrupt_controller_ll.h"
#include "hal/timer_hal.h"
#include "hal/timer_ll.h"
#include "hal/wdt_hal.h"
#include "hal/wdt_types.h"
#include "rom/ets_sys.h"
@ -92,6 +93,28 @@ void heap_stats(void)
_alloc + _free, _alloc, _free);
}
#else /* IS_USED(MODULE_ESP_IDF_HEAP) */
void *heap_caps_malloc_prefer(size_t size, size_t num, ...)
{
/* This function usually allocates a chunk of memory in descending order
* of the capabilities as defined in variable parameters. However,
* allocating memory according to given capabilities is only relevant
* if multiple heaps use memories of different capabalities, like the
* alignment, the memory type a.s.o. Since we only use embedded RAM with
* identical capabilities, we just map this function to the standard malloc.
*/
return malloc(size);
}
void *heap_caps_aligned_calloc(size_t alignment, size_t n, size_t size, uint32_t caps)
{
(void)alignment;
(void)caps;
return calloc(n, size);
}
#endif /* IS_USED(MODULE_ESP_IDF_HEAP) */
/**
@ -193,10 +216,10 @@ static struct syscall_stub_table s_stub_table =
{
.__getreent = &__getreent,
._malloc_r = &_malloc_r,
._free_r = &_free_r,
._realloc_r = &_realloc_r,
._calloc_r = &_calloc_r,
._malloc_r = (void * (*)(struct _reent *, size_t))&_malloc_r,
._free_r = (void (*)(struct _reent *, void *))&_free_r,
._realloc_r = (void * (*)(struct _reent *, void *, size_t))&_realloc_r,
._calloc_r = (void * (*)(struct _reent *, size_t, size_t))&_calloc_r,
._sbrk_r = &_sbrk_r,
._system_r = (void*)&_no_sys_func,
@ -256,7 +279,7 @@ static struct syscall_stub_table s_stub_table =
timer_hal_context_t sys_timer = {
.dev = TIMER_LL_GET_HW(TIMER_SYSTEM_GROUP),
.idx = TIMER_SYSTEM_INDEX,
.timer_id = TIMER_SYSTEM_INDEX,
};
#if defined(_RETARGETABLE_LOCKING)
@ -276,16 +299,123 @@ extern struct __lock __attribute__((alias("s_shared_mutex"))) __lock___tz_mutex;
extern struct __lock __attribute__((alias("s_shared_mutex"))) __lock___dd_hash_mutex;
extern struct __lock __attribute__((alias("s_shared_mutex"))) __lock___arc4random_mutex;
#endif
/* map newlib's `__retarget_*` functions to the existing `_lock_*` functions */
void __retarget_lock_init(_LOCK_T *lock)
{
_lock_init(lock);
}
extern void __retarget_lock_init_recursive(_LOCK_T *lock)
{
_lock_init_recursive(lock);
}
void __retarget_lock_close(_LOCK_T lock)
{
_lock_close(&lock);
}
void __retarget_lock_close_recursive(_LOCK_T lock)
{
_lock_close_recursive(&lock);
}
void __retarget_lock_acquire(_LOCK_T lock)
{
if (lock == NULL) {
/* use the shared mutex if lock is NULL */
lock = (_lock_t)&s_shared_mutex;
}
_lock_acquire(&lock);
}
void __retarget_lock_acquire_recursive(_LOCK_T lock)
{
if (lock == NULL) {
/* use the shared rmutex if lock is NULL */
lock = (_lock_t)&s_shared_rmutex;
}
_lock_acquire_recursive(&lock);
}
int __retarget_lock_try_acquire(_LOCK_T lock)
{
if (lock == NULL) {
/* use the shared mutex if lock is NULL */
lock = (_lock_t)&s_shared_mutex;
}
return _lock_try_acquire(&lock);
}
int __retarget_lock_try_acquire_recursive(_LOCK_T lock)
{
if (lock == NULL) {
/* use the shared rmutex if lock is NULL */
lock = (_lock_t)&s_shared_rmutex;
}
return _lock_try_acquire_recursive(&lock);
}
void __retarget_lock_release(_LOCK_T lock)
{
if (lock == NULL) {
/* use the shared mutex if lock is NULL */
lock = (_lock_t)&s_shared_mutex;
}
_lock_release(&lock);
}
void __retarget_lock_release_recursive(_LOCK_T lock)
{
if (lock == NULL) {
/* use the shared rmutex if lock is NULL */
lock = (_lock_t)&s_shared_rmutex;
}
_lock_release(&lock);
}
#endif /* _RETARGETABLE_LOCKING */
void IRAM syscalls_init_arch(void)
{
/* initialize and enable the system timer in us (TMG0 is enabled by default) */
timer_hal_init(&sys_timer, TIMER_SYSTEM_GROUP, TIMER_SYSTEM_INDEX);
timer_hal_set_divider(&sys_timer, rtc_clk_apb_freq_get() / MHZ);
timer_hal_set_counter_increase(&sys_timer, true);
timer_hal_set_auto_reload(&sys_timer, false);
timer_hal_set_counter_enable(&sys_timer, true);
#if 0
/* In ESP-IDF, the newlibc functions in ROM are used that require some
* variables that have to be set to the shared mutex/rmutex. Since we
* don't use the newlib functions in ROM, we don't have to set these
* variables here for the moment
*/
#ifdef CONFIG_IDF_TARGET_ESP32
/* Newlib 2.2.0 is used in ROM, the following lock symbols are defined: */
extern _lock_t __sfp_lock;
__sfp_lock = (_lock_t) &s_shared_rmutex;
extern _lock_t __sinit_lock;
__sinit_lock = (_lock_t) &s_shared_rmutex;
extern _lock_t __env_lock_object;
__env_lock_object = (_lock_t) &s_shared_rmutex;
extern _lock_t __tz_lock_object;
__tz_lock_object = (_lock_t) &s_shared_rmutex;
#elif defined(CONFIG_IDF_TARGET_ESP32S2)
/* Newlib 3.0.0 is used in ROM, the following lock symbols are defined: */
extern _lock_t __sinit_recursive_mutex;
__sinit_recursive_mutex = (_lock_t) &s_shared_rmutex;
extern _lock_t __sfp_recursive_mutex;
__sfp_recursive_mutex = (_lock_t) &s_shared_rmutex;
#endif
#endif
/* initialize and enable the system timer in us */
periph_module_enable(PERIPH_TIMG0_MODULE);
timer_ll_set_clock_source(sys_timer.dev, sys_timer.timer_id, GPTIMER_CLK_SRC_DEFAULT);
timer_ll_enable_clock(sys_timer.dev, sys_timer.timer_id, true);
timer_ll_set_clock_prescale(sys_timer.dev, sys_timer.timer_id, rtc_clk_apb_freq_get() / MHZ);
timer_ll_set_count_direction(sys_timer.dev, sys_timer.timer_id, GPTIMER_COUNT_UP);
timer_ll_enable_auto_reload(sys_timer.dev, sys_timer.timer_id, false);
timer_ll_enable_counter(sys_timer.dev, sys_timer.timer_id, true);
timer_ll_enable_alarm(sys_timer.dev, sys_timer.timer_id, false);
#if SOC_TIMER_SUPPORT_ETM
timer_ll_enable_etm(sys_timer.dev, true);
#endif
#if defined(CPU_FAM_ESP32)
syscall_table_ptr_pro = &s_stub_table;
@ -307,11 +437,10 @@ uint32_t system_get_time_ms(void)
return system_get_time_64() / US_PER_MS;
}
int64_t system_get_time_64(void)
uint64_t system_get_time_64(void)
{
uint64_t ret;
timer_hal_get_counter_value(&sys_timer, &ret);
return ret;
timer_ll_trigger_soft_capture(sys_timer.dev, sys_timer.timer_id);
return timer_ll_get_counter_value(sys_timer.dev, sys_timer.timer_id);
}
wdt_hal_context_t mwdt;
@ -357,32 +486,23 @@ void system_wdt_init(void)
wdt_hal_write_protect_enable(&mwdt);
wdt_hal_write_protect_enable(&rwdt);
#if defined(CPU_FAM_ESP32)
DEBUG("%s TIMERG0 wdtconfig0=%08"PRIx32" wdtconfig1=%08"PRIx32
" wdtconfig2=%08"PRIx32" wdtconfig3=%08"PRIx32
" wdtconfig4=%08"PRIx32" regclk=%08"PRIx32"\n", __func__,
TIMERG0.wdt_config0.val, TIMERG0.wdt_config1.val,
TIMERG0.wdt_config2, TIMERG0.wdt_config3,
TIMERG0.wdt_config4, TIMERG0.clk.val);
#else
DEBUG("%s TIMERG0 wdtconfig0=%08"PRIx32" wdtconfig1=%08"PRIx32
" wdtconfig2=%08"PRIx32" wdtconfig3=%08"PRIx32
" wdtconfig4=%08"PRIx32" regclk=%08"PRIx32"\n", __func__,
TIMERG0.wdtconfig0.val, TIMERG0.wdtconfig1.val,
TIMERG0.wdtconfig2.val, TIMERG0.wdtconfig3.val,
TIMERG0.wdtconfig4.val, TIMERG0.regclk.val);
#endif
/* route WDT peripheral interrupt source to CPU_INUM_WDT */
intr_matrix_set(PRO_CPU_NUM, ETS_TG0_WDT_LEVEL_INTR_SOURCE, CPU_INUM_WDT);
/* set the interrupt handler and activate the interrupt */
intr_cntrl_ll_set_int_handler(CPU_INUM_WDT, system_wdt_int_handler, NULL);
intr_cntrl_ll_enable_interrupts(BIT(CPU_INUM_WDT));
esp_cpu_intr_set_handler(CPU_INUM_WDT, system_wdt_int_handler, NULL);
esp_cpu_intr_enable(BIT(CPU_INUM_WDT));
}
void system_wdt_stop(void)
{
intr_cntrl_ll_disable_interrupts(BIT(CPU_INUM_WDT));
esp_cpu_intr_disable(BIT(CPU_INUM_WDT));
wdt_hal_write_protect_disable(&mwdt);
wdt_hal_disable(&mwdt);
wdt_hal_write_protect_enable(&mwdt);
@ -393,5 +513,16 @@ void system_wdt_start(void)
wdt_hal_write_protect_disable(&mwdt);
wdt_hal_enable(&mwdt);
wdt_hal_write_protect_enable(&mwdt);
intr_cntrl_ll_enable_interrupts(BIT(CPU_INUM_WDT));
esp_cpu_intr_enable(BIT(CPU_INUM_WDT));
}
#ifndef MODULE_POSIX_SLEEP
int usleep(useconds_t us)
{
extern void esp_rom_delay_us(uint32_t us);
esp_rom_delay_us((uint32_t) us);
return 0;
}
#endif

View File

@ -90,6 +90,10 @@ static _lock_t *__malloc_static_object = NULL;
#define _lock_critical_enter() uint32_t __lock_state = irq_disable();
#define _lock_critical_exit() irq_restore(__lock_state);
/* check whether `struct __lock` is large enough to hold a recursive mutex */
static_assert(sizeof(struct __lock) >= sizeof(rmutex_t),
"struct __lock is too small to hold a recursive mutex of type rmutex_t");
#endif
void IRAM_ATTR _lock_init(_lock_t *lock)
@ -318,66 +322,6 @@ void IRAM_ATTR _lock_release_recursive(_lock_t *lock)
_lock_critical_exit();
}
#if defined(_RETARGETABLE_LOCKING)
/* check whether `struct __lock` is large enough to hold a recursive mutex */
static_assert(sizeof(struct __lock) >= sizeof(rmutex_t),
"struct __lock is too small to hold a recursive mutex of type rmutex_t");
/* map newlib's `__retarget_*` functions to the existing `_lock_*` functions */
void __retarget_lock_init(_LOCK_T *lock)
{
_lock_init(lock);
}
extern void __retarget_lock_init_recursive(_LOCK_T *lock)
{
_lock_init_recursive(lock);
}
void __retarget_lock_close(_LOCK_T lock)
{
_lock_close(&lock);
}
void __retarget_lock_close_recursive(_LOCK_T lock)
{
_lock_close_recursive(&lock);
}
void __retarget_lock_acquire(_LOCK_T lock)
{
_lock_acquire(&lock);
}
void __retarget_lock_acquire_recursive(_LOCK_T lock)
{
_lock_acquire_recursive(&lock);
}
int __retarget_lock_try_acquire(_LOCK_T lock)
{
return _lock_try_acquire(&lock);
}
int __retarget_lock_try_acquire_recursive(_LOCK_T lock)
{
return _lock_try_acquire_recursive(&lock);
}
void __retarget_lock_release(_LOCK_T lock)
{
_lock_release(&lock);
}
void __retarget_lock_release_recursive(_LOCK_T lock)
{
_lock_release(&lock);
}
#endif /* _RETARGETABLE_LOCKING */
/**
* @name Memory allocation functions
*/