1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2025-12-30 08:51:19 +01:00

cpu/stm32/periph_eth: fix error handling

An earlier version of periph_eth used to always pack the first chunk of the
outgoing frame to the first DMA descriptor by telling the DMA to jump back
to the first descriptor within the last descriptor. This worked fine unless
the frame was send in one chunk (as e.g. lwip does), which resulted due to a
hardware bug in a frame being send out twice. For that reason, the behavior was
changed to cycle throw the linked DMA descriptor list in round-robin fashion.
However, the error checking was not updated accordingly. Hence, the error
check might run over (parts of) unrelated frames and fail to detect errors
correctly.

This commit fixes the issue and also provides proper return codes for errors.

Additionally, an DMA reset is performed on detected errors during RX/TX. I'm
not sure if/when this is needed, as error conditions are neigh impossible to
produce. But better be safe than sorry.
This commit is contained in:
Marian Buschsieweke 2021-01-15 11:17:00 +01:00
parent b35e4f4a95
commit 788f997452
No known key found for this signature in database
GPG Key ID: 61F64C6599B1539F

View File

@ -228,8 +228,7 @@ static void stm32_eth_set_addr(const uint8_t *addr)
ETH->MACA0LR = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
}
/** Initialization of the DMA descriptors to be used */
static void _init_buffer(void)
static void _init_dma_descriptors(void)
{
size_t i;
for (i = 0; i < ETH_RX_DESCRIPTOR_COUNT; i++) {
@ -254,6 +253,17 @@ static void _init_buffer(void)
ETH->DMATDLAR = (uintptr_t)tx_curr;
}
static void _reset_eth_dma(void)
{
/* disable DMA TX and RX */
ETH->DMAOMR &= ~(ETH_DMAOMR_ST | ETH_DMAOMR_SR);
_init_dma_descriptors();
/* enable DMA TX and RX */
ETH->DMAOMR |= ETH_DMAOMR_ST | ETH_DMAOMR_SR;
}
static int stm32_eth_set(netdev_t *dev, netopt_t opt,
const void *value, size_t max_len)
{
@ -445,8 +455,6 @@ static int stm32_eth_init(netdev_t *netdev)
netdev_eui48_get(netdev, &hwaddr);
stm32_eth_set_addr(hwaddr.uint8);
_init_buffer();
ETH->DMAIER |= ETH_DMAIER_NISE | ETH_DMAIER_TIE | ETH_DMAIER_RIE;
/* enable transmitter and receiver */
@ -456,8 +464,7 @@ static int stm32_eth_init(netdev_t *netdev)
/* wait for FIFO flushing to complete */
while (ETH->DMAOMR & ETH_DMAOMR_FTF) { }
/* enable DMA TX and RX */
ETH->DMAOMR |= ETH_DMAOMR_ST | ETH_DMAOMR_SR;
_reset_eth_dma();
_setup_phy();
@ -476,9 +483,10 @@ static int stm32_eth_send(netdev_t *netdev, const struct iolist *iolist)
/* We cannot send more chunks than allocated descriptors */
assert(iolist_count(iolist) <= ETH_TX_DESCRIPTOR_COUNT);
edma_desc_t *dma_iter = tx_curr;
for (unsigned i = 0; iolist; iolist = iolist->iol_next, i++) {
tx_curr->control = iolist->iol_len;
tx_curr->buffer_addr = iolist->iol_base;
dma_iter->control = iolist->iol_len;
dma_iter->buffer_addr = iolist->iol_base;
uint32_t status = TX_DESC_STAT_IC | TX_DESC_STAT_TCH | TX_DESC_STAT_CIC
| TX_DESC_STAT_OWN;
if (!i) {
@ -489,8 +497,8 @@ static int stm32_eth_send(netdev_t *netdev, const struct iolist *iolist)
/* last chunk */
status |= TX_DESC_STAT_LS;
}
tx_curr->status = status;
tx_curr = tx_curr->desc_next;
dma_iter->status = status;
dma_iter = dma_iter->desc_next;
}
/* start TX */
@ -501,22 +509,40 @@ static int stm32_eth_send(netdev_t *netdev, const struct iolist *iolist)
DEBUG("[stm32_eth] TX completed\n");
/* Error check */
unsigned i = 0;
_debug_tx_descriptor_info(__LINE__);
int error = 0;
while (1) {
uint32_t status = tx_desc[i].status;
uint32_t status = tx_curr->status;
/* The Error Summary (ES) bit is set, if any error during TX occurred */
if (status & TX_DESC_STAT_ES) {
/* TODO: Report better event to reflect error */
netdev->event_callback(netdev, NETDEV_EVENT_TX_COMPLETE);
return -1;
if (status & TX_DESC_STAT_EC) {
DEBUG("[stm32_eth] collision in half duplex mode\n");
error = -EBUSY;
}
else if (status & TX_DESC_STAT_NC) {
DEBUG("[stm32_eth] no carrier detected during TX\n");
error = -ENETDOWN;
}
else {
/* don't detect underflow error here, as we trigger TX only
* after all descriptors have been handed over to the DMA.
* Hence, the DMA should never run out of desciprtors during
* TX. */
DEBUG("[stm32_eth] unhandled error during TX\n");
error = -EIO;
}
_reset_eth_dma();
}
tx_curr = tx_curr->desc_next;
if (status & TX_DESC_STAT_LS) {
break;
}
i++;
}
netdev->event_callback(netdev, NETDEV_EVENT_TX_COMPLETE);
if (error) {
return error;
}
return (int)bytes_to_send;
}
@ -533,10 +559,12 @@ static int get_rx_frame_size(void)
}
if (status & RX_DESC_STAT_DE) {
DEBUG("[stm32_eth] Overflow during RX\n");
_reset_eth_dma();
return -EOVERFLOW;
}
if (status & RX_DESC_STAT_ES) {
DEBUG("[stm32_eth] Error during RX\n");
_reset_eth_dma();
return -EIO;
}
if (status & RX_DESC_STAT_LS) {