mirror of
https://github.com/RIOT-OS/RIOT.git
synced 2025-12-24 14:03:55 +01:00
Merge pull request #13962 from haukepetersen/fix_nimble_netifmcastlock
nimble/netif: fix deadlock when sending multicast under load
This commit is contained in:
commit
c2896cc798
@ -126,6 +126,20 @@ int nimble_netif_conn_get_by_gaphandle(uint16_t gaphandle);
|
||||
void nimble_netif_conn_foreach(uint16_t filter,
|
||||
nimble_netif_conn_iter_t cb, void *arg);
|
||||
|
||||
/**
|
||||
* @brief Find the next context that matches the filter condition
|
||||
*
|
||||
* This function allows for iterating connection contexts in a non-blocking way.
|
||||
*
|
||||
* @param[in] handle last used handle, set to NIMBLE_NETIF_CONN_INVALID
|
||||
to get the first matching entry
|
||||
* @param[in] filter filter mask
|
||||
*
|
||||
* @return handle of the next matching connection context
|
||||
* @return NIMBLE_NETIF_CONN_INVALID if no matching context was found
|
||||
*/
|
||||
int nimble_netif_conn_get_next(int handle, uint16_t filter);
|
||||
|
||||
/**
|
||||
* @brief Count the number of connections contexts for which the given filter
|
||||
* applies
|
||||
|
||||
@ -141,14 +141,6 @@ static int _send_pkt(nimble_netif_conn_t *conn, gnrc_pktsnip_t *pkt)
|
||||
return num_bytes;
|
||||
}
|
||||
|
||||
static int _netif_send_iter(nimble_netif_conn_t *conn,
|
||||
int handle, void *arg)
|
||||
{
|
||||
(void)handle;
|
||||
_send_pkt(conn, (gnrc_pktsnip_t *)arg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _netif_send(gnrc_netif_t *netif, gnrc_pktsnip_t *pkt)
|
||||
{
|
||||
assert(pkt->type == GNRC_NETTYPE_NETIF);
|
||||
@ -160,9 +152,12 @@ static int _netif_send(gnrc_netif_t *netif, gnrc_pktsnip_t *pkt)
|
||||
/* if packet is bcast or mcast, we send it to every connected node */
|
||||
if (hdr->flags &
|
||||
(GNRC_NETIF_HDR_FLAGS_BROADCAST | GNRC_NETIF_HDR_FLAGS_MULTICAST)) {
|
||||
nimble_netif_conn_foreach(NIMBLE_NETIF_L2CAP_CONNECTED,
|
||||
_netif_send_iter, pkt->next);
|
||||
res = (int)gnrc_pkt_len(pkt->next);
|
||||
int handle = nimble_netif_conn_get_next(NIMBLE_NETIF_CONN_INVALID,
|
||||
NIMBLE_NETIF_L2CAP_CONNECTED);
|
||||
while (handle != NIMBLE_NETIF_CONN_INVALID) {
|
||||
res = _send_pkt(nimble_netif_conn_get(handle), pkt->next);
|
||||
handle = nimble_netif_conn_get_next(handle, NIMBLE_NETIF_L2CAP_CONNECTED);
|
||||
}
|
||||
}
|
||||
/* send unicast */
|
||||
else {
|
||||
|
||||
@ -188,6 +188,21 @@ void nimble_netif_conn_foreach(uint16_t filter,
|
||||
mutex_unlock(&_lock);
|
||||
}
|
||||
|
||||
int nimble_netif_conn_get_next(int handle, uint16_t filter)
|
||||
{
|
||||
/* if handle is undefined, start from the beginning */
|
||||
if (handle < 0) {
|
||||
handle = -1;
|
||||
}
|
||||
for (int i = (handle + 1); i < (int)CONN_CNT; i++) {
|
||||
if (_conn[i].state & filter) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return NIMBLE_NETIF_CONN_INVALID;
|
||||
}
|
||||
|
||||
unsigned nimble_netif_conn_count(uint16_t filter)
|
||||
{
|
||||
unsigned cnt = 0;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user