1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2025-12-27 15:31:17 +01:00

Merge pull request #17210 from benpicco/drivers/dose-ringbuffer

drivers/dose: make use of ringbuffer for RX
This commit is contained in:
benpicco 2022-01-19 14:45:47 +01:00 committed by GitHub
commit 038b41453e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 661 additions and 67 deletions

View File

@ -7,6 +7,7 @@ ifneq (,$(filter dose_watchdog,$(USEMODULE)))
FEATURES_REQUIRED += periph_timer_periodic
endif
USEMODULE += chunked_ringbuffer
USEMODULE += eui_provider
USEMODULE += iolist
USEMODULE += netdev_eth

View File

@ -66,6 +66,14 @@ static uint16_t crc16_update(uint16_t crc, uint8_t octet)
return crc;
}
static void _crc_cb(void *ctx, uint8_t *data, size_t len)
{
uint16_t *crc = ctx;
for (uint8_t *end = data + len; data != end; ++data) {
*crc = crc16_update(*crc, *data);
}
}
static void _init_standby(dose_t *ctx, const dose_params_t *params)
{
ctx->standby_pin = params->standby_pin;
@ -145,8 +153,8 @@ static void _dose_watchdog_cb(void *arg, int chan)
switch (ctx->state) {
case DOSE_STATE_RECV:
if (ctx->recv_buf_ptr_last != ctx->recv_buf_ptr) {
ctx->recv_buf_ptr_last = ctx->recv_buf_ptr;
if (ctx->recv_buf_ptr_last != ctx->rb.cur) {
ctx->recv_buf_ptr_last = ctx->rb.cur;
break;
}
@ -194,11 +202,17 @@ static dose_signal_t state_transit_idle(dose_t *ctx, dose_signal_t signal)
_watchdog_stop();
if (ctx->state == DOSE_STATE_RECV) {
bool dirty = ctx->flags & DOSE_FLAG_RECV_BUF_DIRTY;
bool done = ctx->flags & DOSE_FLAG_END_RECEIVED;
/* We got here from RECV state. The driver's thread has to look
* if this frame should be processed. By queuing NETDEV_EVENT_ISR,
* the netif thread will call _isr at some time. */
SETBIT(ctx->flags, DOSE_FLAG_RECV_BUF_DIRTY);
netdev_trigger_event_isr(&ctx->netdev);
if (crb_end_chunk(&ctx->rb, !dirty && done)) {
netdev_trigger_event_isr(&ctx->netdev);
}
clear_recv_buf(ctx);
}
/* Enable interrupt for start bit sensing */
@ -221,11 +235,13 @@ static dose_signal_t state_transit_recv(dose_t *ctx, dose_signal_t signal)
* anymore. Disable RX Start IRQs during the transmission. */
_disable_sense(ctx);
_watchdog_start();
crb_start_chunk(&ctx->rb);
}
if (signal == DOSE_SIGNAL_UART) {
/* We received a new octet */
int esc = (ctx->flags & DOSE_FLAG_ESC_RECEIVED);
bool esc = ctx->flags & DOSE_FLAG_ESC_RECEIVED;
bool dirty = ctx->flags & DOSE_FLAG_RECV_BUF_DIRTY;
if (!esc && ctx->uart_octet == DOSE_OCTET_ESC) {
SETBIT(ctx->flags, DOSE_FLAG_ESC_RECEIVED);
}
@ -237,12 +253,9 @@ static dose_signal_t state_transit_recv(dose_t *ctx, dose_signal_t signal)
if (esc) {
CLRBIT(ctx->flags, DOSE_FLAG_ESC_RECEIVED);
}
/* Since the dirty flag is set after the RECV state is left,
* it indicates that the receive buffer contains unprocessed data
* from a previously received frame. Thus, we just ignore new data. */
if (!(ctx->flags & DOSE_FLAG_RECV_BUF_DIRTY)
&& ctx->recv_buf_ptr < DOSE_FRAME_LEN) {
ctx->recv_buf[ctx->recv_buf_ptr++] = ctx->uart_octet;
if (!dirty && !crb_add_byte(&ctx->rb, ctx->uart_octet)) {
SETBIT(ctx->flags, DOSE_FLAG_RECV_BUF_DIRTY);
}
}
}
@ -362,9 +375,8 @@ static void clear_recv_buf(dose_t *ctx)
{
unsigned irq_state = irq_disable();
ctx->recv_buf_ptr = 0;
#ifdef MODULE_DOSE_WATCHDOG
ctx->recv_buf_ptr_last = -1;
ctx->recv_buf_ptr_last = NULL;
#endif
CLRBIT(ctx->flags, DOSE_FLAG_RECV_BUF_DIRTY);
CLRBIT(ctx->flags, DOSE_FLAG_END_RECEIVED);
@ -374,58 +386,39 @@ static void clear_recv_buf(dose_t *ctx)
static void _isr(netdev_t *netdev)
{
uint8_t dst[ETHERNET_ADDR_LEN];
dose_t *ctx = container_of(netdev, dose_t, netdev);
unsigned irq_state;
int dirty, end;
/* Get current flags atomically */
irq_state = irq_disable();
dirty = (ctx->flags & DOSE_FLAG_RECV_BUF_DIRTY);
end = (ctx->flags & DOSE_FLAG_END_RECEIVED);
irq_restore(irq_state);
/* If the receive buffer does not contain any data just abort ... */
if (!dirty) {
DEBUG("dose _isr(): no frame -> drop\n");
return;
}
/* If we haven't received a valid END octet just drop the incomplete frame. */
if (!end) {
DEBUG("dose _isr(): incomplete frame -> drop\n");
clear_recv_buf(ctx);
return;
}
/* The set dirty flag prevents recv_buf or recv_buf_ptr from being
* touched in ISR context. Thus, it is safe to work with them without
* IRQs being disabled or mutexes being locked. */
size_t len;
/* Check for minimum length of an Ethernet packet */
if (ctx->recv_buf_ptr < sizeof(ethernet_hdr_t) + DOSE_FRAME_CRC_LEN) {
if (!crb_get_chunk_size(&ctx->rb, &len) ||
len < sizeof(ethernet_hdr_t) + DOSE_FRAME_CRC_LEN) {
DEBUG("dose _isr(): frame too short -> drop\n");
clear_recv_buf(ctx);
crb_consume_chunk(&ctx->rb, NULL, 0);
return;
}
/* Check the dst mac addr if the iface is not in promiscuous mode */
if (!(ctx->opts & DOSE_OPT_PROMISCUOUS)) {
ethernet_hdr_t *hdr = (ethernet_hdr_t *) ctx->recv_buf;
if ((hdr->dst[0] & 0x1) == 0 && memcmp(hdr->dst, ctx->mac_addr.uint8, ETHERNET_ADDR_LEN) != 0) {
/* get destination address - length of RX frame has ben checked before */
crb_peek_bytes(&ctx->rb, dst, offsetof(ethernet_hdr_t, dst), sizeof(dst));
/* destination has to be either broadcast or our address */
if ((dst[0] & 0x1) == 0 && memcmp(dst, ctx->mac_addr.uint8, ETHERNET_ADDR_LEN) != 0) {
DEBUG("dose _isr(): dst mac not matching -> drop\n");
clear_recv_buf(ctx);
crb_consume_chunk(&ctx->rb, NULL, 0);
return;
}
}
/* Check the CRC */
uint16_t crc = 0xffff;
for (size_t i = 0; i < ctx->recv_buf_ptr; i++) {
crc = crc16_update(crc, ctx->recv_buf[i]);
}
crb_chunk_foreach(&ctx->rb, _crc_cb, &crc);
if (crc != 0x0000) {
DEBUG("dose _isr(): wrong crc 0x%04x -> drop\n", crc);
clear_recv_buf(ctx);
crb_consume_chunk(&ctx->rb, NULL, 0);
return;
}
@ -440,28 +433,21 @@ static int _recv(netdev_t *dev, void *buf, size_t len, void *info)
(void)info;
size_t pktlen = ctx->recv_buf_ptr - DOSE_FRAME_CRC_LEN;
if (!buf && !len) {
size_t pktlen;
/* Return the amount of received bytes */
return pktlen;
if (crb_get_chunk_size(&ctx->rb, &pktlen)) {
return pktlen - DOSE_FRAME_CRC_LEN;
} else {
return 0;
}
}
else if (!buf && len) {
/* The user drops the packet */
clear_recv_buf(ctx);
return pktlen;
}
else if (len < pktlen) {
/* The provided buffer is too small! */
DEBUG("dose _recv(): receive buffer too small\n");
clear_recv_buf(ctx);
if (crb_consume_chunk(&ctx->rb, buf, len)) {
return len;
} else {
return -1;
}
else {
/* Copy the packet to the provided buffer. */
memcpy(buf, ctx->recv_buf, pktlen);
clear_recv_buf(ctx);
return pktlen;
}
}
static uint8_t wait_for_state(dose_t *ctx, uint8_t state)
@ -748,9 +734,9 @@ static int _init(netdev_t *dev)
/* Set state machine to defaults */
irq_state = irq_disable();
ctx->opts = 0;
ctx->recv_buf_ptr = 0;
ctx->flags = 0;
ctx->state = DOSE_STATE_INIT;
crb_init(&ctx->rb, ctx->recv_buf, sizeof(ctx->recv_buf));
irq_restore(irq_state);
state(ctx, DOSE_SIGNAL_INIT);

View File

@ -56,6 +56,7 @@
#ifndef DOSE_H
#define DOSE_H
#include "chunked_ringbuffer.h"
#include "periph/uart.h"
#include "periph/gpio.h"
#include "net/netdev.h"
@ -154,9 +155,13 @@ typedef struct {
dose_state_t state; /**< Current state of the driver's state machine */
mutex_t state_mtx; /**< Is unlocked every time a state is (re)entered */
uint8_t recv_buf[DOSE_FRAME_LEN]; /**< Receive buffer for incoming frames */
size_t recv_buf_ptr; /**< Index of the next empty octet of the recveive buffer */
chunk_ringbuf_t rb; /**< Ringbuffer to store received frames. */
/* Written to from interrupts (with irq_disable */
/* to prevent any simultaneous writes), */
/* consumed exclusively in the network stack's */
/* loop at _isr. */
#if defined(MODULE_DOSE_WATCHDOG) || DOXYGEN
size_t recv_buf_ptr_last; /**< Last value of recv_buf_ptr when the watchdog visited */
void *recv_buf_ptr_last; /**< Last value of recv_buf_ptr when the watchdog visited */
#endif
#if !defined(MODULE_PERIPH_UART_RXSTART_IRQ) || DOXYGEN
gpio_t sense_pin; /**< GPIO to sense for start bits on the UART's rx line */

View File

@ -0,0 +1 @@
include $(RIOTBASE)/Makefile.base

View File

@ -0,0 +1,237 @@
/*
* Copyright (C) 2021 ML!PA Consulting GmbH
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/
/**
* @{
*
* @file
*
* @author Benjamin Valentin <benjamin.valentin@ml-pa.com>
*/
#include <string.h>
#include "atomic_utils.h"
#include "chunked_ringbuffer.h"
#include "irq.h"
static int _get_free_chunk(chunk_ringbuf_t *rb)
{
int idx = rb->chunk_cur;
for (int i = 0; i < CHUNK_NUM_MAX; ++i) {
uintptr_t _ptr = atomic_load_uintptr((uintptr_t *)&rb->chunk_start[idx]);
if (_ptr == 0) {
return idx;
}
if (++idx == CHUNK_NUM_MAX) {
idx = 0;
}
}
return -1;
}
static int _get_complete_chunk(chunk_ringbuf_t *rb)
{
int idx = rb->chunk_cur;
for (int i = 0; i < CHUNK_NUM_MAX; ++i) {
uintptr_t _ptr = atomic_load_uintptr((uintptr_t *)&rb->chunk_start[idx]);
if (_ptr) {
return idx;
}
if (++idx == CHUNK_NUM_MAX) {
idx = 0;
}
}
return -1;
}
bool crb_add_bytes(chunk_ringbuf_t *rb, const void *data, size_t len)
{
const uint8_t *in = data;
for (size_t i = 0; i < len; ++i) {
if (!crb_add_byte(rb ,in[i])) {
return false;
}
}
return true;
}
bool crb_add_chunk(chunk_ringbuf_t *rb, const void *data, size_t len)
{
if (!crb_start_chunk(rb)) {
return false;
}
bool keep = crb_add_bytes(rb ,data, len);
return crb_end_chunk(rb ,keep);
}
static unsigned _get_cur_len(chunk_ringbuf_t *rb)
{
if (rb->cur > rb->cur_start) {
return rb->cur - rb->cur_start;
} else {
/* buffer_end point to the last element */
return (rb->cur - rb->buffer) + 1
+ (rb->buffer_end - rb->cur_start);
}
}
bool crb_end_chunk(chunk_ringbuf_t *rb, bool keep)
{
int idx;
/* no chunk was started */
if (rb->cur_start == NULL) {
return false;
}
if (keep) {
idx = _get_free_chunk(rb);
} else {
idx = -1;
}
/* discard chunk */
if (idx < 0) {
if (rb->protect == rb->cur_start) {
rb->protect = NULL;
}
rb->cur = rb->cur_start;
rb->cur_start = NULL;
return false;
}
/* store complete chunk */
rb->chunk_start[idx] = rb->cur_start;
rb->chunk_len[idx] = _get_cur_len(rb);
rb->cur_start = NULL;
return true;
}
bool crb_get_chunk_size(chunk_ringbuf_t *rb, size_t *len)
{
int idx = _get_complete_chunk(rb);
if (idx < 0) {
return false;
}
*len = rb->chunk_len[idx];
return true;
}
bool crb_peek_bytes(chunk_ringbuf_t *rb, void *dst, size_t offset, size_t len)
{
int idx = _get_complete_chunk(rb);
if (idx < 0) {
return false;
}
if (offset + len > rb->chunk_len[idx]) {
return false;
}
const uint8_t *start = rb->chunk_start[idx];
start += offset;
if (start > rb->buffer_end) {
start = ((uint8_t *)rb->buffer) + (start - rb->buffer_end + 1);
memcpy(dst, start, len);
} else if (start + len <= rb->buffer_end) {
memcpy(dst, start, len);
} else {
size_t len_0 = 1 + rb->buffer_end - start;
memcpy(dst, start, len_0);
memcpy((uint8_t *)dst + len_0, rb->buffer, len - len_0);
}
return true;
}
bool crb_chunk_foreach(chunk_ringbuf_t *rb, crb_foreach_callback_t func, void *ctx)
{
size_t len;
int idx = _get_complete_chunk(rb);
if (idx < 0) {
return false;
}
len = rb->chunk_len[idx];
if (rb->chunk_start[idx] + len <= rb->buffer_end) {
/* chunk is continuous */
func(ctx, rb->chunk_start[idx], len);
} else {
/* chunk wraps around */
size_t len_0 = 1 + rb->buffer_end - rb->chunk_start[idx];
func(ctx, rb->chunk_start[idx], len_0);
func(ctx, rb->buffer, len - len_0);
}
return true;
}
bool crb_consume_chunk(chunk_ringbuf_t *rb, void *dst, size_t len)
{
int idx = _get_complete_chunk(rb);
if (idx < 0) {
return false;
}
if (len > rb->chunk_len[idx]) {
len = rb->chunk_len[idx];
}
if (dst) {
if (rb->chunk_start[idx] + len <= rb->buffer_end) {
/* chunk is continuous */
memcpy(dst, rb->chunk_start[idx], len);
} else {
/* chunk wraps around */
uint8_t *dst8 = dst;
size_t len_0 = 1 + rb->buffer_end - rb->chunk_start[idx];
memcpy(dst8, rb->chunk_start[idx], len_0);
memcpy(dst8 + len_0, rb->buffer, len - len_0);
}
}
unsigned state = irq_disable();
rb->chunk_start[idx] = NULL;
/* advance protect marker */
idx = _get_complete_chunk(rb);
if (idx < 0) {
rb->protect = rb->cur_start;
} else {
rb->protect = rb->chunk_start[idx];
}
/* advance first used slot nr */
rb->chunk_cur = (rb->chunk_cur + 1) % CHUNK_NUM_MAX;
irq_restore(state);
return true;
}
void crb_init(chunk_ringbuf_t *rb, void *buffer, size_t len)
{
memset(rb ,0, sizeof(*rb));
rb->buffer = buffer;
rb->buffer_end = &rb->buffer[len - 1];
rb->cur = rb->buffer;
}
/** @} */

View File

@ -0,0 +1,243 @@
/*
* Copyright (C) 2021 ML!PA Consulting GmbH
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/
/**
* @defgroup sys_chunk_buffer chunked Ringbuffer
* @ingroup sys
* @brief Implementation of a Ringbuffer to store chunks of data
* @{
*
* @file
* @brief Chunked Ringbuffer
*
* A chunked ringbuffer is a ringbuffer that holds chunks of data.
*
* @author Benjamin Valentin <benjamin.valentin@ml-pa.com>
*/
#ifndef CHUNKED_RINGBUFFER_H
#define CHUNKED_RINGBUFFER_H
#include <stdbool.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief The maximum number of chunks that can be stored in a Chunked Ringbuffer
*
*/
#ifndef CHUNK_NUM_MAX
#define CHUNK_NUM_MAX (4)
#endif
/**
* @brief A chunked ringbuffer
*
*/
typedef struct {
uint8_t *buffer; /**< pointer to the memory to hold the data */
uint8_t *buffer_end; /**< last data element */
uint8_t *cur; /**< current write pointer */
uint8_t *cur_start; /**< start of the currently written chunk */
uint8_t *protect; /**< start of the first valid chunk */
uint8_t *chunk_start[CHUNK_NUM_MAX]; /**< Array to hold start of done chunks */
uint16_t chunk_len[CHUNK_NUM_MAX]; /**< Length of valid chunks */
uint8_t chunk_cur; /**< Index of the first valid chunk */
} chunk_ringbuf_t;
/**
* @brief Callback function for @ref crb_chunk_foreach
*
* @param[in] ctx Callback context
* @param[in] bytes Chunk data
* @param[in] len Length of data
*/
typedef void (*crb_foreach_callback_t)(void *ctx, uint8_t *bytes, size_t len);
/**
* @brief Initialize a Chunked Ringbuffer
*
* @param[in] rb The Ringbuffer to work on
* @param buffer The Ringbuffer work area
* @param len Size of the Ringbuffer work area
*/
void crb_init(chunk_ringbuf_t *rb, void *buffer, size_t len);
/**
* @brief Start a new chunk on the ringbuffer
*
* @note This function is expected to be called in ISR context / with
* interrupts disabled.
*
* @param[in] rb The Ringbuffer to work on
*
* @return true If a new chunk could be started
* @return false If the ringbuffer is full
*/
static inline bool crb_start_chunk(chunk_ringbuf_t *rb)
{
/* pointing to the start of the first chunk */
if (rb->cur == rb->protect) {
return false;
}
rb->cur_start = rb->cur;
if (rb->protect == NULL) {
rb->protect = rb->cur_start;
}
return true;
}
/**
* @brief Insert a byte into the current chunk
*
* @note This function is expected to be called in ISR context / with
* interrupts disabled.
*
* @pre A new chunk has been started with @ref crb_start_chunk
*
* @param[in] rb The Ringbuffer to work on
* @param[in] b The byte to write
*
* @return true If the byte could be written
* @return false If the ringbuffer is full
*/
static inline bool crb_add_byte(chunk_ringbuf_t *rb, uint8_t b)
{
/* if this is the first chunk, protect will be at start */
if (rb->cur == rb->protect &&
rb->cur != rb->cur_start) {
return false;
}
*rb->cur = b;
/* handle wrap around */
if (rb->cur == rb->buffer_end) {
rb->cur = rb->buffer;
} else {
++rb->cur;
}
return true;
}
/**
* @brief Insert a number of bytes into the current chunk
*
* @note This function is expected to be called in ISR context / with
* interrupts disabled.
*
* @pre A new chunk has been started with @ref crb_start_chunk
*
* @param[in] rb The Ringbuffer to work on
* @param[in] data The data to write
* @param[in] len Size of data
*
* @return true If the bytes could be written
* @return false If the ringbuffer is full
*/
bool crb_add_bytes(chunk_ringbuf_t *rb, const void *data, size_t len);
/**
* @brief Close the current chunk
*
* @note This function is expected to be called in ISR context / with
* interrupts disabled.
*
* @param[in] rb The Ringbuffer to work on
* @param[in] valid True if the chunk is valid and should be stored
* False if the current chunk should be discarded
*
* @return true If the chunk could be stored in the valid chunk array
* @return false If there is no more space in the valid chunk array
*/
bool crb_end_chunk(chunk_ringbuf_t *rb, bool valid);
/**
* @brief Add a complete chunk to the Ringbuffer
*
* @note This function is expected to be called in ISR context / with
* interrupts disabled.
*
* This is a convenience function that combines @ref crb_start_chunk,
* @ref crb_add_bytes and @ref crb_end_chunk
*
* @param[in] rb The Ringbuffer to work on
* @param[in] data The data to write
* @param[in] len Size of data
*
* @return true If the chunk could be added to the valid chunk array
* @return false There was not enough space and the chunk was discarded
*/
bool crb_add_chunk(chunk_ringbuf_t *rb, const void *data, size_t len);
/**
* @brief Get the size of the first valid chunk
*
* @param[in] rb The Ringbuffer to work on
* @param[out] len Pointer to store the size of the first valid chunk
*
* @return true If a valid chunk exists and @p size was written
* @return false If no valid chunk exists
*/
bool crb_get_chunk_size(chunk_ringbuf_t *rb, size_t *len);
/**
* @brief Get a number of bytes from the first valid chunk without consuming it.
*
* @param[in] rb The Ringbuffer to work on
* @param[out] dst Destination buffer
* @param[in] offset Offset to the start of the chunk
* @param[in] len Number of bytes to read
*
* @return true If the data could be read
* @return false If no valid chunk exists or the bytes could not be read
*/
bool crb_peek_bytes(chunk_ringbuf_t *rb, void *dst, size_t offset, size_t len);
/**
* @brief Remove a chunk from the valid chunk array
*
* @param[in] rb The Ringbuffer to work on
* @param[out] dst Destination where the chunk contents should be copied to.
* May be NULL, then the chunk is just discarded.
* @param[in] len Max number of bytes to read. If there are bytes left in the
* chunk beyond that, they will be discarded
*
* @return true If a chunk was consumed
* @return false If no valid chunk did exist
*/
bool crb_consume_chunk(chunk_ringbuf_t *rb, void *dst, size_t len);
/**
* @brief Execute a callback for each byte in the first valid chunk
* The callback function may be called twice if the chunk is non-continuous.
*
* This function will not consume the chunk.
*
* @param[in] rb The Ringbuffer to work on
* @param[in] func The function to call for each byte
* @param[in] ctx Optional function argument
*
* @return true If a valid chunk exits on which the function was executed
* @return false If no valid chunk exists
*/
bool crb_chunk_foreach(chunk_ringbuf_t *rb, crb_foreach_callback_t func, void *ctx);
#ifdef __cplusplus
}
#endif
#endif /* CHUNKED_RINGBUFFER_H */
/** @} */

View File

@ -0,0 +1 @@
include $(RIOTBASE)/Makefile.base

View File

@ -0,0 +1 @@
USEMODULE += chunked_ringbuffer

View File

@ -0,0 +1,119 @@
/*
* Copyright (C) 2021 ML!PA Consulting GmbH
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/
/**
* @{
*
* @file
*/
#include <string.h>
#include "embUnit.h"
#include "chunked_ringbuffer.h"
static void test_crb_add_and_consume(void)
{
size_t len;
uint8_t buffer[16];
char buf_out[6];
chunk_ringbuf_t cb;
crb_init(&cb, buffer, sizeof(buffer));
TEST_ASSERT(crb_add_chunk(&cb, "one", 4));
TEST_ASSERT(crb_add_chunk(&cb, "two", 4));
TEST_ASSERT(crb_add_chunk(&cb, "three", 6));
TEST_ASSERT(crb_get_chunk_size(&cb, &len));
TEST_ASSERT_EQUAL_INT(4, len);
TEST_ASSERT(crb_consume_chunk(&cb, buf_out, sizeof(buf_out)));
TEST_ASSERT_EQUAL_STRING("one", buf_out);
TEST_ASSERT(crb_get_chunk_size(&cb, &len));
TEST_ASSERT_EQUAL_INT(4, len);
TEST_ASSERT(crb_consume_chunk(&cb, buf_out, sizeof(buf_out)));
TEST_ASSERT_EQUAL_STRING("two", buf_out);
TEST_ASSERT(crb_add_chunk(&cb, "four", 5));
TEST_ASSERT(crb_get_chunk_size(&cb, &len));
TEST_ASSERT_EQUAL_INT(6, len);
TEST_ASSERT(crb_consume_chunk(&cb, buf_out, sizeof(buf_out)));
TEST_ASSERT_EQUAL_STRING("three", buf_out);
TEST_ASSERT(crb_add_chunk(&cb, "five", 5));
TEST_ASSERT(crb_add_chunk(&cb, "six", 4));
TEST_ASSERT(crb_get_chunk_size(&cb, &len));
TEST_ASSERT_EQUAL_INT(5, len);
TEST_ASSERT(crb_consume_chunk(&cb, buf_out, sizeof(buf_out)));
TEST_ASSERT_EQUAL_STRING("four", buf_out);
TEST_ASSERT(crb_get_chunk_size(&cb, &len));
TEST_ASSERT_EQUAL_INT(5, len);
TEST_ASSERT(crb_consume_chunk(&cb, buf_out, sizeof(buf_out)));
TEST_ASSERT_EQUAL_STRING("five", buf_out);
TEST_ASSERT(crb_get_chunk_size(&cb, &len));
TEST_ASSERT_EQUAL_INT(4, len);
TEST_ASSERT(crb_consume_chunk(&cb, buf_out, sizeof(buf_out)));
TEST_ASSERT_EQUAL_STRING("six", buf_out);
}
static void test_crb_add_while_consume(void)
{
size_t len;
uint8_t buffer[16];
char buf_out[12];
chunk_ringbuf_t cb;
crb_init(&cb, buffer, sizeof(buffer));
TEST_ASSERT(crb_add_chunk(&cb, "one", 4));
TEST_ASSERT(crb_start_chunk(&cb));
TEST_ASSERT(crb_add_bytes(&cb, "Hello", 5));
TEST_ASSERT(crb_get_chunk_size(&cb, &len));
TEST_ASSERT_EQUAL_INT(4, len);
TEST_ASSERT(crb_consume_chunk(&cb, buf_out, sizeof(buf_out)));
TEST_ASSERT_EQUAL_STRING("one", buf_out);
TEST_ASSERT(!crb_get_chunk_size(&cb, &len));
TEST_ASSERT(!crb_consume_chunk(&cb, buf_out, sizeof(buf_out)));
TEST_ASSERT(crb_add_bytes(&cb, "World", 6));
TEST_ASSERT(crb_end_chunk(&cb, true));
TEST_ASSERT(crb_get_chunk_size(&cb, &len));
TEST_ASSERT_EQUAL_INT(11, len);
TEST_ASSERT(crb_peek_bytes(&cb, buf_out, 5, 6));
TEST_ASSERT_EQUAL_STRING("World", buf_out);
TEST_ASSERT(crb_consume_chunk(&cb, buf_out, sizeof(buf_out)));
TEST_ASSERT_EQUAL_STRING("HelloWorld", buf_out);
}
static Test *chunked_ringbuffer_tests(void)
{
EMB_UNIT_TESTFIXTURES(fixtures) {
new_TestFixture(test_crb_add_and_consume),
new_TestFixture(test_crb_add_while_consume),
};
EMB_UNIT_TESTCALLER(crb_tests, NULL, NULL, fixtures);
return (Test *)&crb_tests;
}
void tests_chunked_ringbuffer(void)
{
TESTS_RUN(chunked_ringbuffer_tests());
}
/** @} */