aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorMaxim Uvarov <maxim.uvarov@linaro.org>2016-02-08 20:47:58 +0300
committerMaxim Uvarov <maxim.uvarov@linaro.org>2016-02-08 20:47:58 +0300
commitbbd5ac81d9e606415f2ad2ecb999c2ff8c3716c9 (patch)
treed6da6a9a491b05721cd562b1c22ea49c6076b107 /test
parentee90f447a6f5c76ab7751e6b747d1da3ac407fb1 (diff)
parentfd93da30b42937f62afecccd43809815044f5a45 (diff)
Merge branch 'next'v1.7.0.0
Diffstat (limited to 'test')
-rw-r--r--test/api_test/odp_common.c4
-rw-r--r--test/performance/odp_atomic.c4
-rw-r--r--test/performance/odp_l2fwd.c1126
-rw-r--r--test/performance/odp_pktio_perf.c34
-rw-r--r--test/performance/odp_scheduling.c28
-rw-r--r--test/validation/Makefile.am8
-rw-r--r--test/validation/atomic/.gitignore1
-rw-r--r--test/validation/atomic/Makefile.am10
-rw-r--r--test/validation/atomic/atomic.c881
-rw-r--r--test/validation/atomic/atomic.h38
-rw-r--r--test/validation/atomic/atomic_main.c (renamed from test/validation/synchronizers/synchronizers_main.c)4
-rw-r--r--test/validation/barrier/.gitignore1
-rw-r--r--test/validation/barrier/Makefile.am10
-rw-r--r--test/validation/barrier/barrier.c393
-rw-r--r--test/validation/barrier/barrier.h29
-rw-r--r--test/validation/barrier/barrier_main.c12
-rw-r--r--test/validation/classification/classification.h10
-rw-r--r--test/validation/classification/odp_classification_basic.c4
-rw-r--r--test/validation/classification/odp_classification_common.c99
-rw-r--r--test/validation/classification/odp_classification_test_pmr.c409
-rw-r--r--test/validation/classification/odp_classification_tests.c76
-rw-r--r--test/validation/classification/odp_classification_testsuites.h5
-rw-r--r--test/validation/common/odp_cunit_common.h7
-rw-r--r--test/validation/crypto/crypto.c3
-rw-r--r--test/validation/init/init.c4
-rw-r--r--test/validation/lock/.gitignore1
-rw-r--r--test/validation/lock/Makefile.am10
-rw-r--r--test/validation/lock/lock.c (renamed from test/validation/synchronizers/synchronizers.c)608
-rw-r--r--test/validation/lock/lock.h45
-rw-r--r--test/validation/lock/lock_main.c12
-rw-r--r--test/validation/pktio/pktio.c594
-rw-r--r--test/validation/pktio/pktio.h11
-rw-r--r--test/validation/queue/queue.c50
-rw-r--r--test/validation/scheduler/scheduler.c89
-rw-r--r--test/validation/std_clib/std_clib.c38
-rw-r--r--test/validation/synchronizers/.gitignore1
-rw-r--r--test/validation/synchronizers/Makefile.am10
-rw-r--r--test/validation/synchronizers/synchronizers.h54
-rw-r--r--test/validation/system/system.c115
-rw-r--r--test/validation/system/system.h13
-rw-r--r--test/validation/timer/timer.c44
41 files changed, 3653 insertions, 1242 deletions
diff --git a/test/api_test/odp_common.c b/test/api_test/odp_common.c
index cebaa1271..70aee96f8 100644
--- a/test/api_test/odp_common.c
+++ b/test/api_test/odp_common.c
@@ -41,8 +41,8 @@ void odp_print_system_info(void)
printf("ODP system info\n");
printf("---------------\n");
printf("ODP API version: %s\n", odp_version_api_str());
- printf("CPU model: %s\n", odp_sys_cpu_model_str());
- printf("CPU freq (hz): %"PRIu64"\n", odp_sys_cpu_hz());
+ printf("CPU model: %s\n", odp_cpu_model_str());
+ printf("CPU freq (hz): %"PRIu64"\n", odp_cpu_hz_max());
printf("Cache line size: %i\n", odp_sys_cache_line_size());
printf("CPU count: %i\n", odp_cpu_count());
printf("CPU mask: %s\n", str);
diff --git a/test/performance/odp_atomic.c b/test/performance/odp_atomic.c
index 054f653b7..067329bdc 100644
--- a/test/performance/odp_atomic.c
+++ b/test/performance/odp_atomic.c
@@ -337,8 +337,8 @@ void odp_print_system_info(void)
printf("ODP system info\n");
printf("---------------\n");
printf("ODP API version: %s\n", odp_version_api_str());
- printf("CPU model: %s\n", odp_sys_cpu_model_str());
- printf("CPU freq (hz): %"PRIu64"\n", odp_sys_cpu_hz());
+ printf("CPU model: %s\n", odp_cpu_model_str());
+ printf("CPU freq (hz): %"PRIu64"\n", odp_cpu_hz_max());
printf("Cache line size: %i\n", odp_sys_cache_line_size());
printf("CPU count: %i\n", odp_cpu_count());
printf("CPU mask: %s\n", str);
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
index 31f6a7088..4de93fdd1 100644
--- a/test/performance/odp_l2fwd.c
+++ b/test/performance/odp_l2fwd.c
@@ -47,12 +47,19 @@
*/
#define MAX_PKT_BURST 32
+/** Maximum number of pktio queues per interface */
+#define MAX_QUEUES 32
+
+/** Maximum number of pktio interfaces */
+#define MAX_PKTIOS 8
+
/**
* Packet input mode
*/
typedef enum pkt_in_mode_t {
DIRECT_RECV,
- SCHED_NONE,
+ PLAIN_QUEUE,
+ SCHED_PARALLEL,
SCHED_ATOMIC,
SCHED_ORDERED,
} pkt_in_mode_t;
@@ -66,6 +73,7 @@ typedef enum pkt_in_mode_t {
typedef struct {
int cpu_count;
int if_count; /**< Number of interfaces to be used */
+ int num_workers; /**< Number of worker threads */
char **if_names; /**< Array of pointers to interface names */
pkt_in_mode_t mode; /**< Packet input mode */
int time; /**< Time in seconds to run. */
@@ -97,8 +105,22 @@ typedef union {
/**
* Thread specific arguments
*/
-typedef struct {
- int src_idx; /**< Source interface identifier */
+typedef struct thread_args_t {
+ int thr_idx;
+ int num_pktio;
+
+ struct {
+ odp_pktio_t rx_pktio;
+ odp_pktio_t tx_pktio;
+ odp_pktin_queue_t pktin;
+ odp_pktout_queue_t pktout;
+ odp_queue_t rx_queue;
+ int rx_idx;
+ int tx_idx;
+ int rx_queue_idx;
+ int tx_queue_idx;
+ } pktio[MAX_PKTIOS];
+
stats_t *stats; /**< Pointer to per thread stats */
} thread_args_t;
@@ -112,14 +134,25 @@ typedef struct {
appl_args_t appl;
/** Thread specific arguments */
thread_args_t thread[MAX_WORKERS];
- /** Table of pktio handles */
- odp_pktio_t pktios[ODP_CONFIG_PKTIO_ENTRIES];
/** Table of port ethernet addresses */
- odph_ethaddr_t port_eth_addr[ODP_CONFIG_PKTIO_ENTRIES];
+ odph_ethaddr_t port_eth_addr[MAX_PKTIOS];
/** Table of dst ethernet addresses */
- odph_ethaddr_t dst_eth_addr[ODP_CONFIG_PKTIO_ENTRIES];
+ odph_ethaddr_t dst_eth_addr[MAX_PKTIOS];
/** Table of dst ports */
- int dst_port[ODP_CONFIG_PKTIO_ENTRIES];
+ int dst_port[MAX_PKTIOS];
+ /** Table of pktio handles */
+ struct {
+ odp_pktio_t pktio;
+ odp_pktin_queue_t pktin[MAX_QUEUES];
+ odp_pktout_queue_t pktout[MAX_QUEUES];
+ odp_queue_t rx_q[MAX_QUEUES];
+ int num_rx_thr;
+ int num_tx_thr;
+ int num_rx_queue;
+ int num_tx_queue;
+ int next_rx_queue;
+ int next_tx_queue;
+ } pktios[MAX_PKTIOS];
} args_t;
/** Global pointer to args */
@@ -127,22 +160,97 @@ static args_t *gbl_args;
/** Global barrier to synchronize main and workers */
static odp_barrier_t barrier;
-/* helper funcs */
-static inline int lookup_dest_port(odp_packet_t pkt);
-static inline int find_dest_port(int port);
-static inline int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned num);
-static void fill_eth_addrs(odp_packet_t pkt_tbl[], unsigned num,
- int dst_port);
-static void parse_args(int argc, char *argv[], appl_args_t *appl_args);
-static void print_info(char *progname, appl_args_t *appl_args);
-static void usage(char *progname);
+/**
+ * Lookup the destination port for a given packet
+ *
+ * @param pkt ODP packet handle
+ */
+static inline int lookup_dest_port(odp_packet_t pkt)
+{
+ int i, src_idx;
+ odp_pktio_t pktio_src;
+
+ pktio_src = odp_packet_input(pkt);
+
+ for (src_idx = -1, i = 0; gbl_args->pktios[i].pktio
+ != ODP_PKTIO_INVALID; ++i)
+ if (gbl_args->pktios[i].pktio == pktio_src)
+ src_idx = i;
+
+ if (src_idx == -1)
+ LOG_ABORT("Failed to determine pktio input\n");
+
+ return gbl_args->dst_port[src_idx];
+}
+
+/**
+ * Drop packets which input parsing marked as containing errors.
+ *
+ * Frees packets with error and modifies pkt_tbl[] to only contain packets with
+ * no detected errors.
+ *
+ * @param pkt_tbl Array of packets
+ * @param num Number of packets in pkt_tbl[]
+ *
+ * @return Number of packets dropped
+ */
+static inline int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned num)
+{
+ odp_packet_t pkt;
+ unsigned dropped = 0;
+ unsigned i, j;
+
+ for (i = 0, j = 0; i < num; ++i) {
+ pkt = pkt_tbl[i];
+
+ if (odp_unlikely(odp_packet_has_error(pkt))) {
+ odp_packet_free(pkt); /* Drop */
+ dropped++;
+ } else if (odp_unlikely(i != j++)) {
+ pkt_tbl[j - 1] = pkt;
+ }
+ }
+
+ return dropped;
+}
+
+/**
+ * Fill packets' eth addresses according to the destination port
+ *
+ * @param pkt_tbl Array of packets
+ * @param num Number of packets in the array
+ * @param dst_port Destination port
+ */
+static inline void fill_eth_addrs(odp_packet_t pkt_tbl[],
+ unsigned num, int dst_port)
+{
+ odp_packet_t pkt;
+ odph_ethhdr_t *eth;
+ unsigned i;
+
+ if (!gbl_args->appl.dst_change && !gbl_args->appl.src_change)
+ return;
+
+ for (i = 0; i < num; ++i) {
+ pkt = pkt_tbl[i];
+ if (odp_packet_has_eth(pkt)) {
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+
+ if (gbl_args->appl.src_change)
+ eth->src = gbl_args->port_eth_addr[dst_port];
+
+ if (gbl_args->appl.dst_change)
+ eth->dst = gbl_args->dst_eth_addr[dst_port];
+ }
+ }
+}
/**
- * Packet IO worker thread using ODP queues
+ * Packet IO worker thread using scheduled queues
*
* @param arg thread arguments of type 'thread_args_t *'
*/
-static void *pktio_queue_thread(void *arg)
+static void *run_worker_sched_mode(void *arg)
{
odp_event_t ev_tbl[MAX_PKT_BURST];
odp_packet_t pkt_tbl[MAX_PKT_BURST];
@@ -150,20 +258,34 @@ static void *pktio_queue_thread(void *arg)
int thr;
uint64_t wait;
int dst_idx;
- odp_pktio_t pktio_dst;
+ int thr_idx;
+ int i;
+ odp_pktout_queue_t pktout[MAX_PKTIOS];
thread_args_t *thr_args = arg;
stats_t *stats = thr_args->stats;
thr = odp_thread_id();
+ thr_idx = thr_args->thr_idx;
+
+ memset(pktout, 0, sizeof(pktout));
+ for (i = 0; i < gbl_args->appl.if_count; i++) {
+ if (gbl_args->pktios[i].num_tx_queue ==
+ gbl_args->appl.num_workers)
+ pktout[i] = gbl_args->pktios[i].pktout[thr_idx];
+ else if (gbl_args->pktios[i].num_tx_queue == 1)
+ pktout[i] = gbl_args->pktios[i].pktout[0];
+ else
+ LOG_ABORT("Bad number of output queues %i\n", i);
+ }
- printf("[%02i] QUEUE mode\n", thr);
+ printf("[%02i] SCHEDULED QUEUE mode\n", thr);
odp_barrier_wait(&barrier);
wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS * 100);
/* Loop packets */
while (!exit_threads) {
- int sent, i;
+ int sent;
unsigned tx_drops;
pkts = odp_schedule_multi(NULL, wait, ev_tbl, MAX_PKT_BURST);
@@ -192,9 +314,7 @@ static void *pktio_queue_thread(void *arg)
/* packets from the same queue are from the same interface */
dst_idx = lookup_dest_port(pkt_tbl[0]);
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
- pktio_dst = gbl_args->pktios[dst_idx];
-
- sent = odp_pktio_send(pktio_dst, pkt_tbl, pkts);
+ sent = odp_pktio_send_queue(pktout[dst_idx], pkt_tbl, pkts);
sent = odp_unlikely(sent < 0) ? 0 : sent;
tx_drops = pkts - sent;
@@ -217,43 +337,94 @@ static void *pktio_queue_thread(void *arg)
}
/**
- * Lookup the destination port for a given packet
+ * Packet IO worker thread using plain queues
*
- * @param pkt ODP packet handle
+ * @param arg thread arguments of type 'thread_args_t *'
*/
-static inline int lookup_dest_port(odp_packet_t pkt)
+static void *run_worker_plain_queue_mode(void *arg)
{
- int i, src_idx;
- odp_pktio_t pktio_src;
+ int thr;
+ int pkts;
+ odp_packet_t pkt_tbl[MAX_PKT_BURST];
+ int dst_idx, num_pktio;
+ odp_queue_t queue;
+ odp_pktout_queue_t pktout;
+ int pktio = 0;
+ thread_args_t *thr_args = arg;
+ stats_t *stats = thr_args->stats;
- pktio_src = odp_packet_input(pkt);
+ thr = odp_thread_id();
- for (src_idx = -1, i = 0; gbl_args->pktios[i] != ODP_PKTIO_INVALID; ++i)
- if (gbl_args->pktios[i] == pktio_src)
- src_idx = i;
+ num_pktio = thr_args->num_pktio;
+ dst_idx = thr_args->pktio[pktio].tx_idx;
+ queue = thr_args->pktio[pktio].rx_queue;
+ pktout = thr_args->pktio[pktio].pktout;
- if (src_idx == -1)
- LOG_ABORT("Failed to determine pktio input\n");
+ printf("[%02i] num pktios %i, PLAIN QUEUE mode\n", thr, num_pktio);
+ odp_barrier_wait(&barrier);
- return gbl_args->dst_port[src_idx];
-}
+ /* Loop packets */
+ while (!exit_threads) {
+ int sent;
+ unsigned tx_drops;
+ odp_event_t event[MAX_PKT_BURST];
+ int i;
-/**
- * Find the destination port for a given input port
- *
- * @param port Input port index
- */
-static inline int find_dest_port(int port)
-{
- /* Even number of ports */
- if (gbl_args->appl.if_count % 2 == 0)
- return (port % 2 == 0) ? port + 1 : port - 1;
+ pkts = odp_queue_deq_multi(queue, event, MAX_PKT_BURST);
+ if (odp_unlikely(pkts <= 0))
+ continue;
- /* Odd number of ports */
- if (port == gbl_args->appl.if_count - 1)
- return 0;
- else
- return port + 1;
+ for (i = 0; i < pkts; i++)
+ pkt_tbl[i] = odp_packet_from_event(event[i]);
+
+ if (gbl_args->appl.error_check) {
+ int rx_drops;
+
+ /* Drop packets with errors */
+ rx_drops = drop_err_pkts(pkt_tbl, pkts);
+
+ if (odp_unlikely(rx_drops)) {
+ stats->s.rx_drops += rx_drops;
+ if (pkts == rx_drops)
+ continue;
+
+ pkts -= rx_drops;
+ }
+ }
+
+ fill_eth_addrs(pkt_tbl, pkts, dst_idx);
+
+ sent = odp_pktio_send_queue(pktout, pkt_tbl, pkts);
+
+ sent = odp_unlikely(sent < 0) ? 0 : sent;
+ tx_drops = pkts - sent;
+
+ if (odp_unlikely(tx_drops)) {
+ int i;
+
+ stats->s.tx_drops += tx_drops;
+
+ /* Drop rejected packets */
+ for (i = sent; i < pkts; i++)
+ odp_packet_free(pkt_tbl[i]);
+ }
+
+ stats->s.packets += pkts;
+
+ if (num_pktio > 1) {
+ dst_idx = thr_args->pktio[pktio].tx_idx;
+ queue = thr_args->pktio[pktio].rx_queue;
+ pktout = thr_args->pktio[pktio].pktout;
+ pktio++;
+ if (pktio == num_pktio)
+ pktio = 0;
+ }
+ }
+
+ /* Make sure that latest stat writes are visible to other threads */
+ odp_mb_full();
+
+ return NULL;
}
/**
@@ -261,37 +432,34 @@ static inline int find_dest_port(int port)
*
* @param arg thread arguments of type 'thread_args_t *'
*/
-static void *pktio_direct_recv_thread(void *arg)
+static void *run_worker_direct_mode(void *arg)
{
int thr;
int pkts;
odp_packet_t pkt_tbl[MAX_PKT_BURST];
- int src_idx, dst_idx;
- odp_pktio_t pktio_src, pktio_dst;
+ int dst_idx, num_pktio;
+ odp_pktin_queue_t pktin;
+ odp_pktout_queue_t pktout;
+ int pktio = 0;
thread_args_t *thr_args = arg;
stats_t *stats = thr_args->stats;
thr = odp_thread_id();
- src_idx = thr_args->src_idx;
- dst_idx = gbl_args->dst_port[src_idx];
- pktio_src = gbl_args->pktios[src_idx];
- pktio_dst = gbl_args->pktios[dst_idx];
-
- printf("[%02i] srcif:%s dstif:%s spktio:%02" PRIu64
- " dpktio:%02" PRIu64 " DIRECT RECV mode\n",
- thr,
- gbl_args->appl.if_names[src_idx],
- gbl_args->appl.if_names[dst_idx],
- odp_pktio_to_u64(pktio_src), odp_pktio_to_u64(pktio_dst));
+ num_pktio = thr_args->num_pktio;
+ dst_idx = thr_args->pktio[pktio].tx_idx;
+ pktin = thr_args->pktio[pktio].pktin;
+ pktout = thr_args->pktio[pktio].pktout;
+
+ printf("[%02i] num pktios %i, DIRECT RECV mode\n", thr, num_pktio);
odp_barrier_wait(&barrier);
/* Loop packets */
while (!exit_threads) {
- int sent, i;
+ int sent;
unsigned tx_drops;
- pkts = odp_pktio_recv(pktio_src, pkt_tbl, MAX_PKT_BURST);
+ pkts = odp_pktio_recv_queue(pktin, pkt_tbl, MAX_PKT_BURST);
if (odp_unlikely(pkts <= 0))
continue;
@@ -312,12 +480,14 @@ static void *pktio_direct_recv_thread(void *arg)
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
- sent = odp_pktio_send(pktio_dst, pkt_tbl, pkts);
+ sent = odp_pktio_send_queue(pktout, pkt_tbl, pkts);
sent = odp_unlikely(sent < 0) ? 0 : sent;
tx_drops = pkts - sent;
if (odp_unlikely(tx_drops)) {
+ int i;
+
stats->s.tx_drops += tx_drops;
/* Drop rejected packets */
@@ -326,6 +496,16 @@ static void *pktio_direct_recv_thread(void *arg)
}
stats->s.packets += pkts;
+
+ if (num_pktio > 1) {
+ dst_idx = thr_args->pktio[pktio].tx_idx;
+ pktin = thr_args->pktio[pktio].pktin;
+ pktout = thr_args->pktio[pktio].pktout;
+ pktio++;
+ if (pktio == num_pktio)
+ pktio = 0;
+ }
+
}
/* Make sure that latest stat writes are visible to other threads */
@@ -337,70 +517,171 @@ static void *pktio_direct_recv_thread(void *arg)
/**
* Create a pktio handle, optionally associating a default input queue.
*
- * @param dev Name of device to open
- * @param pool Pool to associate with device for packet RX/TX
+ * @param dev Name of device to open
+ * @param index Pktio index
+ * @param pool Pool to associate with device for packet RX/TX
*
- * @return The handle of the created pktio object.
- * @retval ODP_PKTIO_INVALID if the create fails.
+ * @retval 0 on success
+ * @retval -1 on failure
*/
-static odp_pktio_t create_pktio(const char *dev, odp_pool_t pool)
+static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
+ odp_pool_t pool)
{
- char inq_name[ODP_QUEUE_NAME_LEN];
- odp_queue_param_t qparam;
- odp_queue_t inq_def;
odp_pktio_t pktio;
- int ret;
odp_pktio_param_t pktio_param;
odp_schedule_sync_t sync_mode;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t in_queue_param;
+ odp_pktout_queue_param_t out_queue_param;
+ odp_pktio_op_mode_t mode_rx = ODP_PKTIO_OP_MT_UNSAFE;
+ odp_pktio_op_mode_t mode_tx = ODP_PKTIO_OP_MT_UNSAFE;
odp_pktio_param_init(&pktio_param);
- if (gbl_args->appl.mode == DIRECT_RECV)
- pktio_param.in_mode = ODP_PKTIN_MODE_RECV;
- else
+ if (gbl_args->appl.mode == DIRECT_RECV) {
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+ } else if (gbl_args->appl.mode == PLAIN_QUEUE) {
+ pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+ } else {
pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+ }
pktio = odp_pktio_open(dev, pool, &pktio_param);
if (pktio == ODP_PKTIO_INVALID) {
LOG_ERR("Error: failed to open %s\n", dev);
- return ODP_PKTIO_INVALID;
+ return -1;
}
printf("created pktio %" PRIu64 " (%s)\n",
odp_pktio_to_u64(pktio), dev);
- /* no further setup needed for direct receive mode */
- if (gbl_args->appl.mode == DIRECT_RECV)
- return pktio;
+ if (odp_pktio_capability(pktio, &capa)) {
+ LOG_ERR("Error: capability query failed %s\n", dev);
+ return -1;
+ }
+
+ if (num_rx > (int)capa.max_input_queues) {
+ printf("Sharing %i input queues between %i workers\n",
+ capa.max_input_queues, num_rx);
+ num_rx = capa.max_input_queues;
+ mode_rx = ODP_PKTIO_OP_MT;
+ }
+
+ odp_pktin_queue_param_init(&in_queue_param);
+ odp_pktout_queue_param_init(&out_queue_param);
+
+ if (gbl_args->appl.mode == DIRECT_RECV ||
+ gbl_args->appl.mode == PLAIN_QUEUE) {
+
+ if (num_tx > (int)capa.max_output_queues) {
+ printf("Sharing %i output queues between %i workers\n",
+ capa.max_output_queues, num_tx);
+ num_tx = capa.max_output_queues;
+ mode_tx = ODP_PKTIO_OP_MT;
+ }
+
+ in_queue_param.op_mode = mode_rx;
+ in_queue_param.hash_enable = 1;
+ in_queue_param.hash_proto.proto.ipv4_udp = 1;
+ in_queue_param.num_queues = num_rx;
+
+ if (odp_pktin_queue_config(pktio, &in_queue_param)) {
+ LOG_ERR("Error: input queue config failed %s\n", dev);
+ return -1;
+ }
+
+ out_queue_param.op_mode = mode_tx;
+ out_queue_param.num_queues = num_tx;
+
+ if (odp_pktout_queue_config(pktio, &out_queue_param)) {
+ LOG_ERR("Error: output queue config failed %s\n", dev);
+ return -1;
+ }
+
+ if (gbl_args->appl.mode == DIRECT_RECV) {
+ if (odp_pktin_queue(pktio, gbl_args->pktios[idx].pktin,
+ num_rx) != num_rx) {
+ LOG_ERR("Error: pktin queue query failed %s\n",
+ dev);
+ return -1;
+ }
+ } else { /* PLAIN QUEUE */
+ if (odp_pktin_event_queue(pktio,
+ gbl_args->pktios[idx].rx_q,
+ num_rx) != num_rx) {
+ LOG_ERR("Error: input queue query failed %s\n",
+ dev);
+ return -1;
+ }
+ }
+
+ if (odp_pktout_queue(pktio, gbl_args->pktios[idx].pktout,
+ num_tx) != num_tx) {
+ LOG_ERR("Error: pktout queue query failed %s\n", dev);
+ return -1;
+ }
+
+ printf("created %i input and %i output queues on (%s)\n",
+ num_rx, num_tx, dev);
+
+ gbl_args->pktios[idx].num_rx_queue = num_rx;
+ gbl_args->pktios[idx].num_tx_queue = num_tx;
+ gbl_args->pktios[idx].pktio = pktio;
+
+ return 0;
+ }
+
+ if (num_tx > (int)capa.max_output_queues) {
+ printf("Sharing 1 output queue between %i workers\n",
+ num_tx);
+ num_tx = 1;
+ mode_tx = ODP_PKTIO_OP_MT;
+ }
if (gbl_args->appl.mode == SCHED_ATOMIC)
sync_mode = ODP_SCHED_SYNC_ATOMIC;
else if (gbl_args->appl.mode == SCHED_ORDERED)
sync_mode = ODP_SCHED_SYNC_ORDERED;
else
- sync_mode = ODP_SCHED_SYNC_NONE;
-
- odp_queue_param_init(&qparam);
- qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- qparam.sched.sync = sync_mode;
- qparam.sched.group = ODP_SCHED_GROUP_ALL;
- snprintf(inq_name, sizeof(inq_name), "%" PRIu64 "-pktio_inq_def",
- odp_pktio_to_u64(pktio));
- inq_name[ODP_QUEUE_NAME_LEN - 1] = '\0';
-
- inq_def = odp_queue_create(inq_name, ODP_QUEUE_TYPE_PKTIN, &qparam);
- if (inq_def == ODP_QUEUE_INVALID) {
- LOG_ERR("Error: pktio queue creation failed\n");
- return ODP_PKTIO_INVALID;
+ sync_mode = ODP_SCHED_SYNC_PARALLEL;
+
+ in_queue_param.hash_enable = 1;
+ in_queue_param.hash_proto.proto.ipv4_udp = 1;
+ in_queue_param.num_queues = num_rx;
+ in_queue_param.queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ in_queue_param.queue_param.sched.sync = sync_mode;
+ in_queue_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ if (odp_pktin_queue_config(pktio, &in_queue_param)) {
+ LOG_ERR("Error: input queue config failed %s\n", dev);
+ return -1;
}
- ret = odp_pktio_inq_setdef(pktio, inq_def);
- if (ret != 0) {
- LOG_ERR("Error: default input-Q setup\n");
- return ODP_PKTIO_INVALID;
+ out_queue_param.op_mode = mode_tx;
+ out_queue_param.num_queues = num_tx;
+
+ if (odp_pktout_queue_config(pktio, &out_queue_param)) {
+ LOG_ERR("Error: output queue config failed %s\n", dev);
+ return -1;
}
- return pktio;
+ if (odp_pktout_queue(pktio, gbl_args->pktios[idx].pktout, num_tx)
+ != num_tx) {
+ LOG_ERR("Error: pktout queue query failed %s\n", dev);
+ return -1;
+ }
+
+ printf("created %i input and %i output queues on (%s)\n",
+ num_rx, num_tx, dev);
+
+ gbl_args->pktios[idx].num_rx_queue = num_rx;
+ gbl_args->pktios[idx].num_tx_queue = num_tx;
+ gbl_args->pktios[idx].pktio = pktio;
+
+ return 0;
}
/**
@@ -466,232 +747,231 @@ static int print_speed_stats(int num_workers, stats_t *thr_stats,
return pkts > 100 ? 0 : -1;
}
-/**
- * ODP L2 forwarding main function
- */
-int main(int argc, char *argv[])
+static void print_port_mapping(void)
{
- odph_linux_pthread_t thread_tbl[MAX_WORKERS];
- odp_pool_t pool;
- int i;
- int cpu;
- int num_workers;
- odp_shm_t shm;
- odp_cpumask_t cpumask;
- char cpumaskstr[ODP_CPUMASK_STR_SIZE];
- odph_ethaddr_t new_addr;
- odp_pktio_t pktio;
- odp_pool_param_t params;
- int ret;
- stats_t *stats;
-
- /* Init ODP before calling anything else */
- if (odp_init_global(NULL, NULL)) {
- LOG_ERR("Error: ODP global init failed.\n");
- exit(EXIT_FAILURE);
- }
-
- /* Init this thread */
- if (odp_init_local(ODP_THREAD_CONTROL)) {
- LOG_ERR("Error: ODP local init failed.\n");
- exit(EXIT_FAILURE);
- }
+ int if_count, num_workers;
+ int thr, pktio;
- /* Reserve memory for args from shared mem */
- shm = odp_shm_reserve("shm_args", sizeof(args_t),
- ODP_CACHE_LINE_SIZE, 0);
- gbl_args = odp_shm_addr(shm);
+ if_count = gbl_args->appl.if_count;
+ num_workers = gbl_args->appl.num_workers;
- if (gbl_args == NULL) {
- LOG_ERR("Error: shared mem alloc failed.\n");
- exit(EXIT_FAILURE);
- }
- memset(gbl_args, 0, sizeof(*gbl_args));
+ printf("\nWorker mapping table (port[queue])\n--------------------\n");
- /* Parse and store the application arguments */
- parse_args(argc, argv, &gbl_args->appl);
+ for (thr = 0; thr < num_workers; thr++) {
+ int rx_idx, tx_idx;
+ int rx_queue_idx, tx_queue_idx;
+ thread_args_t *thr_args = &gbl_args->thread[thr];
+ int num = thr_args->num_pktio;
- /* Print both system and application information */
- print_info(NO_PATH(argv[0]), &gbl_args->appl);
+ printf("Worker %i\n", thr);
- /* Default to system CPU count unless user specified */
- num_workers = MAX_WORKERS;
- if (gbl_args->appl.cpu_count)
- num_workers = gbl_args->appl.cpu_count;
-
- /* Get default worker cpumask */
- num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
- (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
-
- printf("num worker threads: %i\n", num_workers);
- printf("first CPU: %i\n", odp_cpumask_first(&cpumask));
- printf("cpu mask: %s\n", cpumaskstr);
-
- if (num_workers < gbl_args->appl.if_count) {
- LOG_ERR("Error: CPU count %d less than interface count\n",
- num_workers);
- exit(EXIT_FAILURE);
- }
-
- /* Create packet pool */
- odp_pool_param_init(&params);
- params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE;
- params.pkt.len = SHM_PKT_POOL_BUF_SIZE;
- params.pkt.num = SHM_PKT_POOL_SIZE;
- params.type = ODP_POOL_PACKET;
-
- pool = odp_pool_create("packet pool", &params);
-
- if (pool == ODP_POOL_INVALID) {
- LOG_ERR("Error: packet pool create failed.\n");
- exit(EXIT_FAILURE);
- }
- odp_pool_print(pool);
-
- for (i = 0; i < gbl_args->appl.if_count; ++i) {
- pktio = create_pktio(gbl_args->appl.if_names[i], pool);
- if (pktio == ODP_PKTIO_INVALID)
- exit(EXIT_FAILURE);
- gbl_args->pktios[i] = pktio;
-
- /* Save interface ethernet address */
- if (odp_pktio_mac_addr(pktio, gbl_args->port_eth_addr[i].addr,
- ODPH_ETHADDR_LEN) != ODPH_ETHADDR_LEN) {
- LOG_ERR("Error: interface ethernet address unknown\n");
- exit(EXIT_FAILURE);
- }
-
- /* Save destination eth address */
- if (gbl_args->appl.dst_change) {
- /* 02:00:00:00:00:XX */
- memset(&new_addr, 0, sizeof(odph_ethaddr_t));
- new_addr.addr[0] = 0x02;
- new_addr.addr[5] = i;
- gbl_args->dst_eth_addr[i] = new_addr;
+ for (pktio = 0; pktio < num; pktio++) {
+ rx_idx = thr_args->pktio[pktio].rx_idx;
+ tx_idx = thr_args->pktio[pktio].tx_idx;
+ rx_queue_idx = thr_args->pktio[pktio].rx_queue_idx;
+ tx_queue_idx = thr_args->pktio[pktio].tx_queue_idx;
+ printf(" %i[%i] -> %i[%i]\n",
+ rx_idx, rx_queue_idx, tx_idx, tx_queue_idx);
}
-
- /* Save interface destination port */
- gbl_args->dst_port[i] = find_dest_port(i);
}
- gbl_args->pktios[i] = ODP_PKTIO_INVALID;
-
- memset(thread_tbl, 0, sizeof(thread_tbl));
-
- stats = gbl_args->stats;
-
- odp_barrier_init(&barrier, num_workers + 1);
-
- /* Create worker threads */
- cpu = odp_cpumask_first(&cpumask);
- for (i = 0; i < num_workers; ++i) {
- odp_cpumask_t thd_mask;
- void *(*thr_run_func) (void *);
-
- if (gbl_args->appl.mode == DIRECT_RECV)
- thr_run_func = pktio_direct_recv_thread;
- else /* SCHED_NONE / SCHED_ATOMIC / SCHED_ORDERED */
- thr_run_func = pktio_queue_thread;
-
- gbl_args->thread[i].src_idx = i % gbl_args->appl.if_count;
- gbl_args->thread[i].stats = &stats[i];
+ printf("\nPort config\n--------------------\n");
- odp_cpumask_zero(&thd_mask);
- odp_cpumask_set(&thd_mask, cpu);
- odph_linux_pthread_create(&thread_tbl[i], &thd_mask,
- thr_run_func,
- &gbl_args->thread[i],
- ODP_THREAD_WORKER);
- cpu = odp_cpumask_next(&cpumask, cpu);
- }
+ for (pktio = 0; pktio < if_count; pktio++) {
+ const char *dev = gbl_args->appl.if_names[pktio];
- /* Start packet receive and transmit */
- for (i = 0; i < gbl_args->appl.if_count; ++i) {
- pktio = gbl_args->pktios[i];
- ret = odp_pktio_start(pktio);
- if (ret) {
- LOG_ERR("Error: unable to start %s\n",
- gbl_args->appl.if_names[i]);
- exit(EXIT_FAILURE);
- }
+ printf("Port %i (%s)\n", pktio, dev);
+ printf(" rx workers %i\n",
+ gbl_args->pktios[pktio].num_rx_thr);
+ printf(" tx workers %i\n",
+ gbl_args->pktios[pktio].num_tx_thr);
+ printf(" rx queues %i\n",
+ gbl_args->pktios[pktio].num_rx_queue);
+ printf(" tx queues %i\n",
+ gbl_args->pktios[pktio].num_tx_queue);
}
- ret = print_speed_stats(num_workers, stats, gbl_args->appl.time,
- gbl_args->appl.accuracy);
- exit_threads = 1;
-
- /* Master thread waits for other threads to exit */
- odph_linux_pthread_join(thread_tbl, num_workers);
-
- free(gbl_args->appl.if_names);
- free(gbl_args->appl.if_str);
- printf("Exit\n\n");
-
- return ret;
+ printf("\n");
}
/**
- * Drop packets which input parsing marked as containing errors.
- *
- * Frees packets with error and modifies pkt_tbl[] to only contain packets with
- * no detected errors.
- *
- * @param pkt_tbl Array of packets
- * @param num Number of packets in pkt_tbl[]
+ * Find the destination port for a given input port
*
- * @return Number of packets dropped
+ * @param port Input port index
*/
-static int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned num)
+static int find_dest_port(int port)
{
- odp_packet_t pkt;
- unsigned dropped = 0;
- unsigned i, j;
+ /* Even number of ports */
+ if (gbl_args->appl.if_count % 2 == 0)
+ return (port % 2 == 0) ? port + 1 : port - 1;
- for (i = 0, j = 0; i < num; ++i) {
- pkt = pkt_tbl[i];
+ /* Odd number of ports */
+ if (port == gbl_args->appl.if_count - 1)
+ return 0;
+ else
+ return port + 1;
+}
- if (odp_unlikely(odp_packet_has_error(pkt))) {
- odp_packet_free(pkt); /* Drop */
- dropped++;
- } else if (odp_unlikely(i != j++)) {
- pkt_tbl[j-1] = pkt;
+/*
+ * Bind worker threads to interfaces and calculate number of queues needed
+ *
+ * less workers (N) than interfaces (M)
+ * - assign each worker to process every Nth interface
+ * - workers process inequal number of interfaces, when M is not divisible by N
+ * - needs only single queue per interface
+ * otherwise
+ * - assign an interface to every Mth worker
+ * - interfaces are processed by inequal number of workers, when N is not
+ * divisible by M
+ * - tries to configure a queue per worker per interface
+ * - shares queues, if interface capability does not allows a queue per worker
+ */
+static void bind_workers(void)
+{
+ int if_count, num_workers;
+ int rx_idx, tx_idx, thr, pktio;
+ thread_args_t *thr_args;
+
+ if_count = gbl_args->appl.if_count;
+ num_workers = gbl_args->appl.num_workers;
+
+ /* initialize port forwarding table */
+ for (rx_idx = 0; rx_idx < if_count; rx_idx++)
+ gbl_args->dst_port[rx_idx] = find_dest_port(rx_idx);
+
+ if (if_count > num_workers) {
+ thr = 0;
+
+ for (rx_idx = 0; rx_idx < if_count; rx_idx++) {
+ thr_args = &gbl_args->thread[thr];
+ pktio = thr_args->num_pktio;
+ tx_idx = gbl_args->dst_port[rx_idx];
+ thr_args->pktio[pktio].rx_idx = rx_idx;
+ thr_args->pktio[pktio].tx_idx = tx_idx;
+ thr_args->num_pktio++;
+
+ gbl_args->pktios[rx_idx].num_rx_thr++;
+ gbl_args->pktios[tx_idx].num_tx_thr++;
+
+ thr++;
+ if (thr >= num_workers)
+ thr = 0;
+ }
+ } else {
+ rx_idx = 0;
+
+ for (thr = 0; thr < num_workers; thr++) {
+ thr_args = &gbl_args->thread[thr];
+ pktio = thr_args->num_pktio;
+ tx_idx = gbl_args->dst_port[rx_idx];
+ thr_args->pktio[pktio].rx_idx = rx_idx;
+ thr_args->pktio[pktio].tx_idx = tx_idx;
+ thr_args->num_pktio++;
+
+ gbl_args->pktios[rx_idx].num_rx_thr++;
+ gbl_args->pktios[tx_idx].num_tx_thr++;
+
+ rx_idx++;
+ if (rx_idx >= if_count)
+ rx_idx = 0;
}
}
-
- return dropped;
}
-/**
- * Fill packets' eth addresses according to the destination port
- *
- * @param pkt_tbl Array of packets
- * @param num Number of packets in the array
- * @param dst_port Destination port
+/*
+ * Bind queues to threads and fill in missing thread arguments (handles)
*/
-static void fill_eth_addrs(odp_packet_t pkt_tbl[], unsigned num, int dst_port)
+static void bind_queues(void)
{
- odp_packet_t pkt;
- odph_ethhdr_t *eth;
- unsigned i;
-
- if (!gbl_args->appl.dst_change && !gbl_args->appl.src_change)
- return;
-
- for (i = 0; i < num; ++i) {
- pkt = pkt_tbl[i];
- if (odp_packet_has_eth(pkt)) {
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
-
- if (gbl_args->appl.src_change)
- eth->src = gbl_args->port_eth_addr[dst_port];
-
- if (gbl_args->appl.dst_change)
- eth->dst = gbl_args->dst_eth_addr[dst_port];
+ int num_workers;
+ int thr, pktio;
+
+ num_workers = gbl_args->appl.num_workers;
+
+ for (thr = 0; thr < num_workers; thr++) {
+ int rx_idx, tx_idx;
+ thread_args_t *thr_args = &gbl_args->thread[thr];
+ int num = thr_args->num_pktio;
+
+ for (pktio = 0; pktio < num; pktio++) {
+ int rx_queue, tx_queue;
+
+ rx_idx = thr_args->pktio[pktio].rx_idx;
+ tx_idx = thr_args->pktio[pktio].tx_idx;
+ rx_queue = gbl_args->pktios[rx_idx].next_rx_queue;
+ tx_queue = gbl_args->pktios[tx_idx].next_tx_queue;
+
+ thr_args->pktio[pktio].rx_queue_idx = rx_queue;
+ thr_args->pktio[pktio].tx_queue_idx = tx_queue;
+ thr_args->pktio[pktio].pktin =
+ gbl_args->pktios[rx_idx].pktin[rx_queue];
+ thr_args->pktio[pktio].pktout =
+ gbl_args->pktios[tx_idx].pktout[tx_queue];
+ thr_args->pktio[pktio].rx_queue =
+ gbl_args->pktios[rx_idx].rx_q[rx_queue];
+ thr_args->pktio[pktio].rx_pktio =
+ gbl_args->pktios[rx_idx].pktio;
+ thr_args->pktio[pktio].tx_pktio =
+ gbl_args->pktios[tx_idx].pktio;
+
+ rx_queue++;
+ tx_queue++;
+
+ if (rx_queue >= gbl_args->pktios[rx_idx].num_rx_queue)
+ rx_queue = 0;
+ if (tx_queue >= gbl_args->pktios[tx_idx].num_tx_queue)
+ tx_queue = 0;
+
+ gbl_args->pktios[rx_idx].next_rx_queue = rx_queue;
+ gbl_args->pktios[tx_idx].next_tx_queue = tx_queue;
}
}
}
/**
+ * Prinf usage information
+ */
+static void usage(char *progname)
+{
+ printf("\n"
+ "OpenDataPlane L2 forwarding application.\n"
+ "\n"
+ "Usage: %s OPTIONS\n"
+ " E.g. %s -i eth0,eth1,eth2,eth3 -m 0 -t 1\n"
+ " In the above example,\n"
+ " eth0 will send pkts to eth1 and vice versa\n"
+ " eth2 will send pkts to eth3 and vice versa\n"
+ "\n"
+ "Mandatory OPTIONS:\n"
+ " -i, --interface Eth interfaces (comma-separated, no spaces)\n"
+ " Interface count min 1, max %i\n"
+ "\n"
+ "Optional OPTIONS\n"
+ " -m, --mode 0: Receive packets directly from pktio interface (default)\n"
+ " 1: Receive packets through scheduler sync parallel queues\n"
+ " 2: Receive packets through scheduler sync atomic queues\n"
+ " 3: Receive packets through scheduler sync ordered queues\n"
+ " 4: Receive packets through plain queues\n"
+ " -c, --count <number> CPU count.\n"
+ " -t, --time <number> Time in seconds to run.\n"
+ " -a, --accuracy <number> Time in seconds get print statistics\n"
+ " (default is 1 second).\n"
+ " -d, --dst_change 0: Don't change packets' dst eth addresses (default)\n"
+ " 1: Change packets' dst eth addresses\n"
+ " -s, --src_change 0: Don't change packets' src eth addresses\n"
+ " 1: Change packets' src eth addresses (default)\n"
+ " -e, --error_check 0: Don't check packet errors (default)\n"
+ " 1: Check packet errors\n"
+ " -h, --help Display help and exit.\n\n"
+ " environment variables: ODP_PKTIO_DISABLE_NETMAP\n"
+ " ODP_PKTIO_DISABLE_SOCKET_MMAP\n"
+ " ODP_PKTIO_DISABLE_SOCKET_MMSG\n"
+ " can be used to advanced pkt I/O selection for linux-generic\n"
+ "\n", NO_PATH(progname), NO_PATH(progname), MAX_PKTIOS
+ );
+}
+
+/**
* Parse and store the command line arguments
*
* @param argc argument count
@@ -764,7 +1044,8 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
appl_args->if_count = i;
- if (appl_args->if_count == 0) {
+ if (appl_args->if_count < 1 ||
+ appl_args->if_count > MAX_PKTIOS) {
usage(argv[0]);
exit(EXIT_FAILURE);
}
@@ -783,11 +1064,13 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
case 'm':
i = atoi(optarg);
if (i == 1)
- appl_args->mode = SCHED_NONE;
+ appl_args->mode = SCHED_PARALLEL;
else if (i == 2)
appl_args->mode = SCHED_ATOMIC;
else if (i == 3)
appl_args->mode = SCHED_ORDERED;
+ else if (i == 4)
+ appl_args->mode = PLAIN_QUEUE;
else
appl_args->mode = DIRECT_RECV;
break;
@@ -829,11 +1112,11 @@ static void print_info(char *progname, appl_args_t *appl_args)
"---------------\n"
"ODP API version: %s\n"
"CPU model: %s\n"
- "CPU freq (hz): %"PRIu64"\n"
+ "CPU freq (hz): %" PRIu64 "\n"
"Cache line size: %i\n"
"CPU count: %i\n"
"\n",
- odp_version_api_str(), odp_sys_cpu_model_str(), odp_sys_cpu_hz(),
+ odp_version_api_str(), odp_cpu_model_str(), odp_cpu_hz_max(),
odp_sys_cache_line_size(), odp_cpu_count());
printf("Running ODP appl: \"%s\"\n"
@@ -847,8 +1130,10 @@ static void print_info(char *progname, appl_args_t *appl_args)
"Mode: ");
if (appl_args->mode == DIRECT_RECV)
printf("DIRECT_RECV");
- else if (appl_args->mode == SCHED_NONE)
- printf("SCHED_NONE");
+ else if (appl_args->mode == PLAIN_QUEUE)
+ printf("PLAIN_QUEUE");
+ else if (appl_args->mode == SCHED_PARALLEL)
+ printf("SCHED_PARALLEL");
else if (appl_args->mode == SCHED_ATOMIC)
printf("SCHED_ATOMIC");
else if (appl_args->mode == SCHED_ORDERED)
@@ -857,43 +1142,202 @@ static void print_info(char *progname, appl_args_t *appl_args)
fflush(NULL);
}
+static void gbl_args_init(args_t *args)
+{
+ int pktio, queue;
+
+ memset(args, 0, sizeof(args_t));
+
+ for (pktio = 0; pktio < MAX_PKTIOS; pktio++) {
+ args->pktios[pktio].pktio = ODP_PKTIO_INVALID;
+
+ for (queue = 0; queue < MAX_QUEUES; queue++)
+ args->pktios[pktio].rx_q[queue] = ODP_QUEUE_INVALID;
+ }
+}
+
/**
- * Prinf usage information
+ * ODP L2 forwarding main function
*/
-static void usage(char *progname)
+int main(int argc, char *argv[])
{
- printf("\n"
- "OpenDataPlane L2 forwarding application.\n"
- "\n"
- "Usage: %s OPTIONS\n"
- " E.g. %s -i eth0,eth1,eth2,eth3 -m 0 -t 1\n"
- " In the above example,\n"
- " eth0 will send pkts to eth1 and vice versa\n"
- " eth2 will send pkts to eth3 and vice versa\n"
- "\n"
- "Mandatory OPTIONS:\n"
- " -i, --interface Eth interfaces (comma-separated, no spaces)\n"
- "\n"
- "Optional OPTIONS\n"
- " -m, --mode 0: Send&receive packets directly from NIC (default)\n"
- " 1: Send&receive packets through scheduler sync none queues\n"
- " 2: Send&receive packets through scheduler sync atomic queues\n"
- " 3: Send&receive packets through scheduler sync ordered queues\n"
- " -c, --count <number> CPU count.\n"
- " -t, --time <number> Time in seconds to run.\n"
- " -a, --accuracy <number> Time in seconds get print statistics\n"
- " (default is 1 second).\n"
- " -d, --dst_change 0: Don't change packets' dst eth addresses (default)\n"
- " 1: Change packets' dst eth addresses\n"
- " -s, --src_change 0: Don't change packets' src eth addresses\n"
- " 1: Change packets' src eth addresses (default)\n"
- " -e, --error_check 0: Don't check packet errors (default)\n"
- " 1: Check packet errors\n"
- " -h, --help Display help and exit.\n\n"
- " environment variables: ODP_PKTIO_DISABLE_NETMAP\n"
- " ODP_PKTIO_DISABLE_SOCKET_MMAP\n"
- " ODP_PKTIO_DISABLE_SOCKET_MMSG\n"
- " can be used to advanced pkt I/O selection for linux-generic\n"
- "\n", NO_PATH(progname), NO_PATH(progname)
- );
+ odph_linux_pthread_t thread_tbl[MAX_WORKERS];
+ odp_pool_t pool;
+ int i;
+ int cpu;
+ int num_workers;
+ odp_shm_t shm;
+ odp_cpumask_t cpumask;
+ char cpumaskstr[ODP_CPUMASK_STR_SIZE];
+ odph_ethaddr_t new_addr;
+ odp_pool_param_t params;
+ int ret;
+ stats_t *stats;
+ int if_count;
+ void *(*thr_run_func)(void *);
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(NULL, NULL)) {
+ LOG_ERR("Error: ODP global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(ODP_THREAD_CONTROL)) {
+ LOG_ERR("Error: ODP local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Reserve memory for args from shared mem */
+ shm = odp_shm_reserve("shm_args", sizeof(args_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ gbl_args = odp_shm_addr(shm);
+
+ if (gbl_args == NULL) {
+ LOG_ERR("Error: shared mem alloc failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ gbl_args_init(gbl_args);
+
+ /* Parse and store the application arguments */
+ parse_args(argc, argv, &gbl_args->appl);
+
+ /* Print both system and application information */
+ print_info(NO_PATH(argv[0]), &gbl_args->appl);
+
+ /* Default to system CPU count unless user specified */
+ num_workers = MAX_WORKERS;
+ if (gbl_args->appl.cpu_count)
+ num_workers = gbl_args->appl.cpu_count;
+
+ /* Get default worker cpumask */
+ num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
+ (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
+
+ gbl_args->appl.num_workers = num_workers;
+
+ for (i = 0; i < num_workers; i++)
+ gbl_args->thread[i].thr_idx = i;
+
+ if_count = gbl_args->appl.if_count;
+
+ printf("num worker threads: %i\n", num_workers);
+ printf("first CPU: %i\n", odp_cpumask_first(&cpumask));
+ printf("cpu mask: %s\n", cpumaskstr);
+
+ /* Create packet pool */
+ odp_pool_param_init(&params);
+ params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE;
+ params.pkt.len = SHM_PKT_POOL_BUF_SIZE;
+ params.pkt.num = SHM_PKT_POOL_SIZE;
+ params.type = ODP_POOL_PACKET;
+
+ pool = odp_pool_create("packet pool", &params);
+
+ if (pool == ODP_POOL_INVALID) {
+ LOG_ERR("Error: packet pool create failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ odp_pool_print(pool);
+
+ bind_workers();
+
+ for (i = 0; i < if_count; ++i) {
+ const char *dev = gbl_args->appl.if_names[i];
+ int num_rx, num_tx;
+
+ /* A queue per worker in scheduled mode */
+ num_rx = num_workers;
+ num_tx = num_workers;
+
+ if (gbl_args->appl.mode == DIRECT_RECV ||
+ gbl_args->appl.mode == PLAIN_QUEUE) {
+ /* A queue per assigned worker */
+ num_rx = gbl_args->pktios[i].num_rx_thr;
+ num_tx = gbl_args->pktios[i].num_tx_thr;
+ }
+
+ if (create_pktio(dev, i, num_rx, num_tx, pool))
+ exit(EXIT_FAILURE);
+
+ /* Save interface ethernet address */
+ if (odp_pktio_mac_addr(gbl_args->pktios[i].pktio,
+ gbl_args->port_eth_addr[i].addr,
+ ODPH_ETHADDR_LEN) != ODPH_ETHADDR_LEN) {
+ LOG_ERR("Error: interface ethernet address unknown\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Save destination eth address */
+ if (gbl_args->appl.dst_change) {
+ /* 02:00:00:00:00:XX */
+ memset(&new_addr, 0, sizeof(odph_ethaddr_t));
+ new_addr.addr[0] = 0x02;
+ new_addr.addr[5] = i;
+ gbl_args->dst_eth_addr[i] = new_addr;
+ }
+ }
+
+ gbl_args->pktios[i].pktio = ODP_PKTIO_INVALID;
+
+ bind_queues();
+
+ if (gbl_args->appl.mode == DIRECT_RECV ||
+ gbl_args->appl.mode == PLAIN_QUEUE)
+ print_port_mapping();
+
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+
+ stats = gbl_args->stats;
+
+ odp_barrier_init(&barrier, num_workers + 1);
+
+ if (gbl_args->appl.mode == DIRECT_RECV)
+ thr_run_func = run_worker_direct_mode;
+ else if (gbl_args->appl.mode == PLAIN_QUEUE)
+ thr_run_func = run_worker_plain_queue_mode;
+ else /* SCHED_PARALLEL / SCHED_ATOMIC / SCHED_ORDERED */
+ thr_run_func = run_worker_sched_mode;
+
+ /* Create worker threads */
+ cpu = odp_cpumask_first(&cpumask);
+ for (i = 0; i < num_workers; ++i) {
+ odp_cpumask_t thd_mask;
+
+ gbl_args->thread[i].stats = &stats[i];
+
+ odp_cpumask_zero(&thd_mask);
+ odp_cpumask_set(&thd_mask, cpu);
+ odph_linux_pthread_create(&thread_tbl[i], &thd_mask,
+ thr_run_func,
+ &gbl_args->thread[i],
+ ODP_THREAD_WORKER);
+ cpu = odp_cpumask_next(&cpumask, cpu);
+ }
+
+ /* Start packet receive and transmit */
+ for (i = 0; i < if_count; ++i) {
+ odp_pktio_t pktio;
+
+ pktio = gbl_args->pktios[i].pktio;
+ ret = odp_pktio_start(pktio);
+ if (ret) {
+ LOG_ERR("Error: unable to start %s\n",
+ gbl_args->appl.if_names[i]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ ret = print_speed_stats(num_workers, stats, gbl_args->appl.time,
+ gbl_args->appl.accuracy);
+ exit_threads = 1;
+
+ /* Master thread waits for other threads to exit */
+ odph_linux_pthread_join(thread_tbl, num_workers);
+
+ free(gbl_args->appl.if_names);
+ free(gbl_args->appl.if_str);
+ printf("Exit\n\n");
+
+ return ret;
}
diff --git a/test/performance/odp_pktio_perf.c b/test/performance/odp_pktio_perf.c
index 9fdc4cb81..9a4735579 100644
--- a/test/performance/odp_pktio_perf.c
+++ b/test/performance/odp_pktio_perf.c
@@ -145,7 +145,7 @@ typedef struct {
} thread_args_t;
typedef struct {
- uint32be_t magic; /* Packet header magic number */
+ odp_u32be_t magic; /* Packet header magic number */
} pkt_head_t;
/* Pool from which transmitted packets are allocated */
@@ -382,7 +382,7 @@ static void *run_thread_tx(void *arg)
return NULL;
}
-static int receive_packets(odp_queue_t pollq,
+static int receive_packets(odp_queue_t plainq,
odp_event_t *event_tbl, unsigned num_pkts)
{
int n_ev = 0;
@@ -390,12 +390,12 @@ static int receive_packets(odp_queue_t pollq,
if (num_pkts == 0)
return 0;
- if (pollq != ODP_QUEUE_INVALID) {
+ if (plainq != ODP_QUEUE_INVALID) {
if (num_pkts == 1) {
- event_tbl[0] = odp_queue_deq(pollq);
+ event_tbl[0] = odp_queue_deq(plainq);
n_ev = event_tbl[0] != ODP_EVENT_INVALID;
} else {
- n_ev = odp_queue_deq_multi(pollq, event_tbl, num_pkts);
+ n_ev = odp_queue_deq_multi(plainq, event_tbl, num_pkts);
}
} else {
if (num_pkts == 1) {
@@ -413,7 +413,7 @@ static void *run_thread_rx(void *arg)
{
test_globals_t *globals;
int thr_id, batch_len;
- odp_queue_t pollq = ODP_QUEUE_INVALID;
+ odp_queue_t plainq = ODP_QUEUE_INVALID;
thread_args_t *targs = arg;
@@ -429,8 +429,8 @@ static void *run_thread_rx(void *arg)
pkt_rx_stats_t *stats = &globals->rx_stats[thr_id];
if (gbl_args->args.schedule == 0) {
- pollq = odp_pktio_inq_getdef(globals->pktio_rx);
- if (pollq == ODP_QUEUE_INVALID)
+ plainq = odp_pktio_inq_getdef(globals->pktio_rx);
+ if (plainq == ODP_QUEUE_INVALID)
LOG_ABORT("Invalid input queue.\n");
}
@@ -439,7 +439,7 @@ static void *run_thread_rx(void *arg)
odp_event_t ev[BATCH_LEN_MAX];
int i, n_ev;
- n_ev = receive_packets(pollq, ev, batch_len);
+ n_ev = receive_packets(plainq, ev, batch_len);
for (i = 0; i < n_ev; ++i) {
if (odp_event_type(ev[i]) == ODP_EVENT_PACKET) {
@@ -672,7 +672,7 @@ static int run_test(void)
printf("\tReceive batch length: \t%" PRIu32 "\n",
gbl_args->args.rx_batch_len);
printf("\tPacket receive method:\t%s\n",
- gbl_args->args.schedule ? "schedule" : "poll");
+ gbl_args->args.schedule ? "schedule" : "plain");
printf("\tInterface(s): \t");
for (i = 0; i < gbl_args->args.num_ifaces; ++i)
printf("%s ", gbl_args->args.ifaces[i]);
@@ -712,7 +712,7 @@ static odp_pktio_t create_pktio(const char *iface, int schedule)
if (schedule)
pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
else
- pktio_param.in_mode = ODP_PKTIN_MODE_POLL;
+ pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE;
pktio = odp_pktio_open(iface, pool, &pktio_param);
@@ -766,16 +766,16 @@ static int test_init(void)
/* create and associate an input queue for the RX side */
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PKTIN;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
snprintf(inq_name, sizeof(inq_name), "inq-pktio-%" PRIu64,
odp_pktio_to_u64(gbl_args->pktio_rx));
inq_def = odp_queue_lookup(inq_name);
if (inq_def == ODP_QUEUE_INVALID)
- inq_def = odp_queue_create(inq_name,
- ODP_QUEUE_TYPE_PKTIN, &qparam);
+ inq_def = odp_queue_create(inq_name, &qparam);
if (inq_def == ODP_QUEUE_INVALID)
return -1;
@@ -809,7 +809,7 @@ static int destroy_inq(odp_pktio_t pktio)
/* flush any pending events */
while (1) {
- if (q_type == ODP_QUEUE_TYPE_POLL)
+ if (q_type == ODP_QUEUE_TYPE_PLAIN)
ev = odp_queue_deq(inq);
else
ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
@@ -881,7 +881,7 @@ static void usage(void)
printf(" default: cpu_count+1/2\n");
printf(" -b, --txbatch <length> Number of packets per TX batch\n");
printf(" default: %d\n", BATCH_LEN_MAX);
- printf(" -p, --poll Poll input queue for packet RX\n");
+ printf(" -p, --plain Plain input queue for packet RX\n");
printf(" default: disabled (use scheduler)\n");
printf(" -R, --rxbatch <length> Number of packets per RX batch\n");
printf(" default: %d\n", BATCH_LEN_MAX);
@@ -904,7 +904,7 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
{"count", required_argument, NULL, 'c'},
{"txcount", required_argument, NULL, 't'},
{"txbatch", required_argument, NULL, 'b'},
- {"poll", no_argument, NULL, 'p'},
+ {"plain", no_argument, NULL, 'p'},
{"rxbatch", required_argument, NULL, 'R'},
{"length", required_argument, NULL, 'l'},
{"rate", required_argument, NULL, 'r'},
diff --git a/test/performance/odp_scheduling.c b/test/performance/odp_scheduling.c
index 8ec89bfe8..d785878cf 100644
--- a/test/performance/odp_scheduling.c
+++ b/test/performance/odp_scheduling.c
@@ -246,7 +246,7 @@ static int test_alloc_multi(int thr, odp_pool_t pool)
}
/**
- * @internal Test queue polling
+ * @internal Test plain queues
*
* Enqueue to and dequeue to/from a single shared queue.
*
@@ -255,7 +255,7 @@ static int test_alloc_multi(int thr, odp_pool_t pool)
*
* @return 0 if successful
*/
-static int test_poll_queue(int thr, odp_pool_t msg_pool)
+static int test_plain_queue(int thr, odp_pool_t msg_pool)
{
odp_event_t ev;
odp_buffer_t buf;
@@ -278,7 +278,7 @@ static int test_poll_queue(int thr, odp_pool_t msg_pool)
t_msg->msg_id = MSG_HELLO;
t_msg->seq = 0;
- queue = odp_queue_lookup("poll_queue");
+ queue = odp_queue_lookup("plain_queue");
if (queue == ODP_QUEUE_INVALID) {
printf(" [%i] Queue lookup failed.\n", thr);
@@ -310,7 +310,7 @@ static int test_poll_queue(int thr, odp_pool_t msg_pool)
cycles = odp_cpu_cycles_diff(c2, c1);
cycles = cycles / QUEUE_ROUNDS;
- printf(" [%i] poll_queue enq+deq %6" PRIu64 " CPU cycles\n",
+ printf(" [%i] plain_queue enq+deq %6" PRIu64 " CPU cycles\n",
thr, cycles);
odp_buffer_free(buf);
@@ -645,7 +645,7 @@ static void *run_thread(void *arg)
odp_barrier_wait(barrier);
- if (test_poll_queue(thr, msg_pool))
+ if (test_plain_queue(thr, msg_pool))
return NULL;
/* Low prio */
@@ -724,14 +724,14 @@ static void test_cpu_freq(void)
nsec = odp_time_to_ns(test_time);
cycles = odp_cpu_cycles_diff(c2, c1);
- max_cycles = (nsec * odp_sys_cpu_hz()) / 1000000000.0;
+ max_cycles = (nsec * odp_cpu_hz_max()) / 1000000000.0;
/* Compare measured CPU cycles to maximum theoretical CPU cycle count */
diff_max_hz = ((double)(cycles) - max_cycles) / max_cycles;
printf("odp_time %" PRIu64 " ns\n", nsec);
printf("odp_cpu_cycles %" PRIu64 " CPU cycles\n", cycles);
- printf("odp_sys_cpu_hz %" PRIu64 " hz\n", odp_sys_cpu_hz());
+ printf("odp_sys_cpu_hz %" PRIu64 " hz\n", odp_cpu_hz_max());
printf("Diff from max CPU freq %f%%\n", diff_max_hz * 100.0);
printf("\n");
@@ -845,8 +845,8 @@ int main(int argc, char *argv[])
printf("ODP system info\n");
printf("---------------\n");
printf("ODP API version: %s\n", odp_version_api_str());
- printf("CPU model: %s\n", odp_sys_cpu_model_str());
- printf("CPU freq (hz): %" PRIu64 "\n", odp_sys_cpu_hz());
+ printf("CPU model: %s\n", odp_cpu_model_str());
+ printf("CPU freq (hz): %" PRIu64 "\n", odp_cpu_hz_max());
printf("Cache line size: %i\n", odp_sys_cache_line_size());
printf("Max CPU count: %i\n", odp_cpu_count());
@@ -900,12 +900,12 @@ int main(int argc, char *argv[])
/* odp_pool_print(pool); */
/*
- * Create a queue for direct poll test
+ * Create a queue for plain queue test
*/
- queue = odp_queue_create("poll_queue", ODP_QUEUE_TYPE_POLL, NULL);
+ queue = odp_queue_create("plain_queue", NULL);
if (queue == ODP_QUEUE_INVALID) {
- LOG_ERR("Poll queue create failed.\n");
+ LOG_ERR("Plain queue create failed.\n");
return -1;
}
@@ -926,6 +926,7 @@ int main(int argc, char *argv[])
name[7] = '0' + i - 10*(i/10);
odp_queue_param_init(&param);
+ param.type = ODP_QUEUE_TYPE_SCHED;
param.sched.prio = i;
param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
param.sched.group = ODP_SCHED_GROUP_ALL;
@@ -934,8 +935,7 @@ int main(int argc, char *argv[])
name[9] = '0' + j/10;
name[10] = '0' + j - 10*(j/10);
- queue = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
- &param);
+ queue = odp_queue_create(name, &param);
if (queue == ODP_QUEUE_INVALID) {
LOG_ERR("Schedule queue create failed.\n");
diff --git a/test/validation/Makefile.am b/test/validation/Makefile.am
index 197ff8960..cdd5a842a 100644
--- a/test/validation/Makefile.am
+++ b/test/validation/Makefile.am
@@ -1,4 +1,6 @@
-ODP_MODULES = buffer \
+ODP_MODULES = atomic \
+ barrier \
+ buffer \
classification \
config \
cpumask \
@@ -6,6 +8,7 @@ ODP_MODULES = buffer \
errno \
hash \
init \
+ lock \
queue \
packet \
pktio \
@@ -13,7 +16,6 @@ ODP_MODULES = buffer \
random \
scheduler \
std_clib \
- synchronizers \
thread \
time \
timer \
@@ -23,4 +25,4 @@ ODP_MODULES = buffer \
SUBDIRS = common $(ODP_MODULES)
#The tests will need to retain the deprecated test implementation
-AM_CFLAGS += -Wno-deprecated-declarations \ No newline at end of file
+AM_CFLAGS += -Wno-deprecated-declarations
diff --git a/test/validation/atomic/.gitignore b/test/validation/atomic/.gitignore
new file mode 100644
index 000000000..610ffeab0
--- /dev/null
+++ b/test/validation/atomic/.gitignore
@@ -0,0 +1 @@
+atomic_main
diff --git a/test/validation/atomic/Makefile.am b/test/validation/atomic/Makefile.am
new file mode 100644
index 000000000..9b6bd6315
--- /dev/null
+++ b/test/validation/atomic/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestatomic.la
+libtestatomic_la_SOURCES = atomic.c
+
+test_PROGRAMS = atomic_main$(EXEEXT)
+dist_atomic_main_SOURCES = atomic_main.c
+atomic_main_LDADD = libtestatomic.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = atomic.h
diff --git a/test/validation/atomic/atomic.c b/test/validation/atomic/atomic.c
new file mode 100644
index 000000000..24c0de731
--- /dev/null
+++ b/test/validation/atomic/atomic.c
@@ -0,0 +1,881 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <malloc.h>
+#include <odp.h>
+#include <CUnit/Basic.h>
+#include <odp_cunit_common.h>
+#include <unistd.h>
+#include "atomic.h"
+
+#define VERBOSE 0
+#define MAX_ITERATIONS 1000
+
+#define ADD_SUB_CNT 5
+
+#define CNT 10
+#define U32_INIT_VAL (1UL << 10)
+#define U64_INIT_VAL (1ULL << 33)
+#define U32_MAGIC 0xa23f65b2
+#define U64_MAGIC 0xf2e1c5430cb6a52e
+
+#define GLOBAL_SHM_NAME "GlobalLockTest"
+
+#define UNUSED __attribute__((__unused__))
+
+#define CHECK_MAX_MIN (1 << 0)
+#define CHECK_XCHG (1 << 2)
+
+static odp_atomic_u32_t a32u;
+static odp_atomic_u64_t a64u;
+static odp_atomic_u32_t a32u_min;
+static odp_atomic_u32_t a32u_max;
+static odp_atomic_u64_t a64u_min;
+static odp_atomic_u64_t a64u_max;
+static odp_atomic_u32_t a32u_xchg;
+static odp_atomic_u64_t a64u_xchg;
+
+typedef __volatile uint32_t volatile_u32_t;
+typedef __volatile uint64_t volatile_u64_t;
+
+typedef struct {
+ /* Global variables */
+ uint32_t g_num_threads;
+ uint32_t g_iterations;
+ uint32_t g_verbose;
+ uint32_t g_max_num_cores;
+
+ volatile_u32_t global_lock_owner;
+} global_shared_mem_t;
+
+/* Per-thread memory */
+typedef struct {
+ global_shared_mem_t *global_mem;
+
+ int thread_id;
+ int thread_core;
+
+ volatile_u64_t delay_counter;
+} per_thread_mem_t;
+
+static odp_shm_t global_shm;
+static global_shared_mem_t *global_mem;
+
+/* Initialise per-thread memory */
+static per_thread_mem_t *thread_init(void)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_shm_t global_shm;
+ uint32_t per_thread_mem_len;
+
+ per_thread_mem_len = sizeof(per_thread_mem_t);
+ per_thread_mem = malloc(per_thread_mem_len);
+ memset(per_thread_mem, 0, per_thread_mem_len);
+
+ per_thread_mem->delay_counter = 1;
+
+ per_thread_mem->thread_id = odp_thread_id();
+ per_thread_mem->thread_core = odp_cpu_id();
+
+ global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ global_mem = odp_shm_addr(global_shm);
+ CU_ASSERT_PTR_NOT_NULL(global_mem);
+
+ per_thread_mem->global_mem = global_mem;
+
+ return per_thread_mem;
+}
+
+static void thread_finalize(per_thread_mem_t *per_thread_mem)
+{
+ free(per_thread_mem);
+}
+
+static void test_atomic_inc_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_inc_u32(&a32u);
+}
+
+static void test_atomic_inc_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_inc_u64(&a64u);
+}
+
+static void test_atomic_dec_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_dec_u32(&a32u);
+}
+
+static void test_atomic_dec_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_dec_u64(&a64u);
+}
+
+static void test_atomic_fetch_inc_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_inc_u32(&a32u);
+}
+
+static void test_atomic_fetch_inc_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_inc_u64(&a64u);
+}
+
+static void test_atomic_fetch_dec_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_dec_u32(&a32u);
+}
+
+static void test_atomic_fetch_dec_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_dec_u64(&a64u);
+}
+
+static void test_atomic_add_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_add_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_add_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_add_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_sub_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_sub_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_sub_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_sub_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_add_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_add_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_add_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_add_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_sub_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_sub_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_sub_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_sub_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_min_32(void)
+{
+ int i;
+ uint32_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_dec_u32(&a32u);
+ odp_atomic_min_u32(&a32u_min, tmp);
+ }
+}
+
+static void test_atomic_min_64(void)
+{
+ int i;
+ uint64_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_dec_u64(&a64u);
+ odp_atomic_min_u64(&a64u_min, tmp);
+ }
+}
+
+static void test_atomic_max_32(void)
+{
+ int i;
+ uint32_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_inc_u32(&a32u);
+ odp_atomic_max_u32(&a32u_max, tmp);
+ }
+}
+
+static void test_atomic_max_64(void)
+{
+ int i;
+ uint64_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_inc_u64(&a64u);
+ odp_atomic_max_u64(&a64u_max, tmp);
+ }
+}
+
+static void test_atomic_cas_inc_32(void)
+{
+ int i;
+ uint32_t old;
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u32(&a32u);
+
+ while (odp_atomic_cas_u32(&a32u, &old, old + 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_cas_dec_32(void)
+{
+ int i;
+ uint32_t old;
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u32(&a32u);
+
+ while (odp_atomic_cas_u32(&a32u, &old, old - 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_cas_inc_64(void)
+{
+ int i;
+ uint64_t old;
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u64(&a64u);
+
+ while (odp_atomic_cas_u64(&a64u, &old, old + 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_cas_dec_64(void)
+{
+ int i;
+ uint64_t old;
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u64(&a64u);
+
+ while (odp_atomic_cas_u64(&a64u, &old, old - 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_xchg_32(void)
+{
+ uint32_t old, new;
+ int i;
+
+ for (i = 0; i < CNT; i++) {
+ new = odp_atomic_fetch_inc_u32(&a32u);
+ old = odp_atomic_xchg_u32(&a32u_xchg, new);
+
+ if (old & 0x1)
+ odp_atomic_xchg_u32(&a32u_xchg, 0);
+ else
+ odp_atomic_xchg_u32(&a32u_xchg, 1);
+ }
+
+ odp_atomic_sub_u32(&a32u, CNT);
+ odp_atomic_xchg_u32(&a32u_xchg, U32_MAGIC);
+}
+
+static void test_atomic_xchg_64(void)
+{
+ uint64_t old, new;
+ int i;
+
+ for (i = 0; i < CNT; i++) {
+ new = odp_atomic_fetch_inc_u64(&a64u);
+ old = odp_atomic_xchg_u64(&a64u_xchg, new);
+
+ if (old & 0x1)
+ odp_atomic_xchg_u64(&a64u_xchg, 0);
+ else
+ odp_atomic_xchg_u64(&a64u_xchg, 1);
+ }
+
+ odp_atomic_sub_u64(&a64u, CNT);
+ odp_atomic_xchg_u64(&a64u_xchg, U64_MAGIC);
+}
+
+static void test_atomic_non_relaxed_32(void)
+{
+ int i;
+ uint32_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_load_acq_u32(&a32u);
+ odp_atomic_store_rel_u32(&a32u, tmp);
+
+ tmp = odp_atomic_load_acq_u32(&a32u_max);
+ odp_atomic_add_rel_u32(&a32u_max, 1);
+
+ tmp = odp_atomic_load_acq_u32(&a32u_min);
+ odp_atomic_sub_rel_u32(&a32u_min, 1);
+
+ tmp = odp_atomic_load_u32(&a32u_xchg);
+ while (odp_atomic_cas_acq_u32(&a32u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u32(&a32u_xchg);
+ while (odp_atomic_cas_rel_u32(&a32u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u32(&a32u_xchg);
+ /* finally set value for validation */
+ while (odp_atomic_cas_acq_rel_u32(&a32u_xchg, &tmp, U32_MAGIC)
+ == 0)
+ ;
+ }
+}
+
+static void test_atomic_non_relaxed_64(void)
+{
+ int i;
+ uint64_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_load_acq_u64(&a64u);
+ odp_atomic_store_rel_u64(&a64u, tmp);
+
+ tmp = odp_atomic_load_acq_u64(&a64u_max);
+ odp_atomic_add_rel_u64(&a64u_max, 1);
+
+ tmp = odp_atomic_load_acq_u64(&a64u_min);
+ odp_atomic_sub_rel_u64(&a64u_min, 1);
+
+ tmp = odp_atomic_load_u64(&a64u_xchg);
+ while (odp_atomic_cas_acq_u64(&a64u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u64(&a64u_xchg);
+ while (odp_atomic_cas_rel_u64(&a64u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u64(&a64u_xchg);
+ /* finally set value for validation */
+ while (odp_atomic_cas_acq_rel_u64(&a64u_xchg, &tmp, U64_MAGIC)
+ == 0)
+ ;
+ }
+}
+
+static void test_atomic_inc_dec_32(void)
+{
+ test_atomic_inc_32();
+ test_atomic_dec_32();
+}
+
+static void test_atomic_inc_dec_64(void)
+{
+ test_atomic_inc_64();
+ test_atomic_dec_64();
+}
+
+static void test_atomic_fetch_inc_dec_32(void)
+{
+ test_atomic_fetch_inc_32();
+ test_atomic_fetch_dec_32();
+}
+
+static void test_atomic_fetch_inc_dec_64(void)
+{
+ test_atomic_fetch_inc_64();
+ test_atomic_fetch_dec_64();
+}
+
+static void test_atomic_add_sub_32(void)
+{
+ test_atomic_add_32();
+ test_atomic_sub_32();
+}
+
+static void test_atomic_add_sub_64(void)
+{
+ test_atomic_add_64();
+ test_atomic_sub_64();
+}
+
+static void test_atomic_fetch_add_sub_32(void)
+{
+ test_atomic_fetch_add_32();
+ test_atomic_fetch_sub_32();
+}
+
+static void test_atomic_fetch_add_sub_64(void)
+{
+ test_atomic_fetch_add_64();
+ test_atomic_fetch_sub_64();
+}
+
+static void test_atomic_max_min_32(void)
+{
+ test_atomic_max_32();
+ test_atomic_min_32();
+}
+
+static void test_atomic_max_min_64(void)
+{
+ test_atomic_max_64();
+ test_atomic_min_64();
+}
+
+static void test_atomic_cas_inc_dec_32(void)
+{
+ test_atomic_cas_inc_32();
+ test_atomic_cas_dec_32();
+}
+
+static void test_atomic_cas_inc_dec_64(void)
+{
+ test_atomic_cas_inc_64();
+ test_atomic_cas_dec_64();
+}
+
+static void test_atomic_init(void)
+{
+ odp_atomic_init_u32(&a32u, 0);
+ odp_atomic_init_u64(&a64u, 0);
+ odp_atomic_init_u32(&a32u_min, 0);
+ odp_atomic_init_u32(&a32u_max, 0);
+ odp_atomic_init_u64(&a64u_min, 0);
+ odp_atomic_init_u64(&a64u_max, 0);
+ odp_atomic_init_u32(&a32u_xchg, 0);
+ odp_atomic_init_u64(&a64u_xchg, 0);
+}
+
+static void test_atomic_store(void)
+{
+ odp_atomic_store_u32(&a32u, U32_INIT_VAL);
+ odp_atomic_store_u64(&a64u, U64_INIT_VAL);
+ odp_atomic_store_u32(&a32u_min, U32_INIT_VAL);
+ odp_atomic_store_u32(&a32u_max, U32_INIT_VAL);
+ odp_atomic_store_u64(&a64u_min, U64_INIT_VAL);
+ odp_atomic_store_u64(&a64u_max, U64_INIT_VAL);
+ odp_atomic_store_u32(&a32u_xchg, U32_INIT_VAL);
+ odp_atomic_store_u64(&a64u_xchg, U64_INIT_VAL);
+}
+
+static void test_atomic_validate(int check)
+{
+ CU_ASSERT(U32_INIT_VAL == odp_atomic_load_u32(&a32u));
+ CU_ASSERT(U64_INIT_VAL == odp_atomic_load_u64(&a64u));
+
+ if (check & CHECK_MAX_MIN) {
+ CU_ASSERT(odp_atomic_load_u32(&a32u_max) >
+ odp_atomic_load_u32(&a32u_min));
+
+ CU_ASSERT(odp_atomic_load_u64(&a64u_max) >
+ odp_atomic_load_u64(&a64u_min));
+ }
+
+ if (check & CHECK_XCHG) {
+ CU_ASSERT(odp_atomic_load_u32(&a32u_xchg) == U32_MAGIC);
+ CU_ASSERT(odp_atomic_load_u64(&a64u_xchg) == U64_MAGIC);
+ }
+}
+
+int atomic_init(void)
+{
+ uint32_t workers_count, max_threads;
+ int ret = 0;
+ odp_cpumask_t mask;
+
+ if (0 != odp_init_global(NULL, NULL)) {
+ fprintf(stderr, "error: odp_init_global() failed.\n");
+ return -1;
+ }
+ if (0 != odp_init_local(ODP_THREAD_CONTROL)) {
+ fprintf(stderr, "error: odp_init_local() failed.\n");
+ return -1;
+ }
+
+ global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
+ sizeof(global_shared_mem_t), 64,
+ ODP_SHM_SW_ONLY);
+ if (ODP_SHM_INVALID == global_shm) {
+ fprintf(stderr, "Unable reserve memory for global_shm\n");
+ return -1;
+ }
+
+ global_mem = odp_shm_addr(global_shm);
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
+ global_mem->g_num_threads = MAX_WORKERS;
+ global_mem->g_iterations = MAX_ITERATIONS;
+ global_mem->g_verbose = VERBOSE;
+
+ workers_count = odp_cpumask_default_worker(&mask, 0);
+
+ max_threads = (workers_count >= MAX_WORKERS) ?
+ MAX_WORKERS : workers_count;
+
+ if (max_threads < global_mem->g_num_threads) {
+ printf("Requested num of threads is too large\n");
+ printf("reducing from %" PRIu32 " to %" PRIu32 "\n",
+ global_mem->g_num_threads,
+ max_threads);
+ global_mem->g_num_threads = max_threads;
+ }
+
+ printf("Num of threads used = %" PRIu32 "\n",
+ global_mem->g_num_threads);
+
+ return ret;
+}
+
+/* Atomic tests */
+static void *test_atomic_inc_dec_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_inc_dec_32();
+ test_atomic_inc_dec_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *test_atomic_add_sub_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_add_sub_32();
+ test_atomic_add_sub_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *test_atomic_fetch_inc_dec_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_fetch_inc_dec_32();
+ test_atomic_fetch_inc_dec_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *test_atomic_fetch_add_sub_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_fetch_add_sub_32();
+ test_atomic_fetch_add_sub_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *test_atomic_max_min_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_max_min_32();
+ test_atomic_max_min_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *test_atomic_cas_inc_dec_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_cas_inc_dec_32();
+ test_atomic_cas_inc_dec_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *test_atomic_xchg_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_xchg_32();
+ test_atomic_xchg_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *test_atomic_non_relaxed_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_non_relaxed_32();
+ test_atomic_non_relaxed_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void test_atomic_functional(void *func_ptr(void *), int check)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ test_atomic_init();
+ test_atomic_store();
+ odp_cunit_thread_create(func_ptr, &arg);
+ odp_cunit_thread_exit(&arg);
+ test_atomic_validate(check);
+}
+
+void atomic_test_atomic_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_inc_dec_thread, 0);
+}
+
+void atomic_test_atomic_add_sub(void)
+{
+ test_atomic_functional(test_atomic_add_sub_thread, 0);
+}
+
+void atomic_test_atomic_fetch_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_fetch_inc_dec_thread, 0);
+}
+
+void atomic_test_atomic_fetch_add_sub(void)
+{
+ test_atomic_functional(test_atomic_fetch_add_sub_thread, 0);
+}
+
+void atomic_test_atomic_max_min(void)
+{
+ test_atomic_functional(test_atomic_max_min_thread, CHECK_MAX_MIN);
+}
+
+void atomic_test_atomic_cas_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_cas_inc_dec_thread, 0);
+}
+
+void atomic_test_atomic_xchg(void)
+{
+ test_atomic_functional(test_atomic_xchg_thread, CHECK_XCHG);
+}
+
+void atomic_test_atomic_non_relaxed(void)
+{
+ test_atomic_functional(test_atomic_non_relaxed_thread,
+ CHECK_MAX_MIN | CHECK_XCHG);
+}
+
+void atomic_test_atomic_op_lock_free(void)
+{
+ odp_atomic_op_t atomic_op;
+ int ret_null, ret;
+
+ memset(&atomic_op, 0xff, sizeof(odp_atomic_op_t));
+ atomic_op.all_bits = 0;
+
+ CU_ASSERT(atomic_op.all_bits == 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 0);
+ CU_ASSERT(atomic_op.op.store == 0);
+ CU_ASSERT(atomic_op.op.fetch_add == 0);
+ CU_ASSERT(atomic_op.op.add == 0);
+ CU_ASSERT(atomic_op.op.fetch_sub == 0);
+ CU_ASSERT(atomic_op.op.sub == 0);
+ CU_ASSERT(atomic_op.op.fetch_inc == 0);
+ CU_ASSERT(atomic_op.op.inc == 0);
+ CU_ASSERT(atomic_op.op.fetch_dec == 0);
+ CU_ASSERT(atomic_op.op.dec == 0);
+ CU_ASSERT(atomic_op.op.min == 0);
+ CU_ASSERT(atomic_op.op.max == 0);
+ CU_ASSERT(atomic_op.op.cas == 0);
+ CU_ASSERT(atomic_op.op.xchg == 0);
+
+ /* Test setting first, last and couple of other bits */
+ atomic_op.op.init = 1;
+ CU_ASSERT(atomic_op.op.init == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.init = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ atomic_op.op.xchg = 1;
+ CU_ASSERT(atomic_op.op.xchg == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.xchg = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ atomic_op.op.add = 1;
+ CU_ASSERT(atomic_op.op.add == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.add = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ atomic_op.op.dec = 1;
+ CU_ASSERT(atomic_op.op.dec == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.dec = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ memset(&atomic_op, 0xff, sizeof(odp_atomic_op_t));
+ ret = odp_atomic_lock_free_u64(&atomic_op);
+ ret_null = odp_atomic_lock_free_u64(NULL);
+
+ CU_ASSERT(ret == ret_null);
+
+ /* Init operation is not atomic by the spec. Call to
+ * odp_atomic_lock_free_u64() zeros it but never sets it. */
+
+ if (ret == 0) {
+ /* none are lock free */
+ CU_ASSERT(atomic_op.all_bits == 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 0);
+ CU_ASSERT(atomic_op.op.store == 0);
+ CU_ASSERT(atomic_op.op.fetch_add == 0);
+ CU_ASSERT(atomic_op.op.add == 0);
+ CU_ASSERT(atomic_op.op.fetch_sub == 0);
+ CU_ASSERT(atomic_op.op.sub == 0);
+ CU_ASSERT(atomic_op.op.fetch_inc == 0);
+ CU_ASSERT(atomic_op.op.inc == 0);
+ CU_ASSERT(atomic_op.op.fetch_dec == 0);
+ CU_ASSERT(atomic_op.op.dec == 0);
+ CU_ASSERT(atomic_op.op.min == 0);
+ CU_ASSERT(atomic_op.op.max == 0);
+ CU_ASSERT(atomic_op.op.cas == 0);
+ CU_ASSERT(atomic_op.op.xchg == 0);
+ }
+
+ if (ret == 1) {
+ /* some are lock free */
+ CU_ASSERT(atomic_op.all_bits != 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ }
+
+ if (ret == 2) {
+ /* all are lock free */
+ CU_ASSERT(atomic_op.all_bits != 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 1);
+ CU_ASSERT(atomic_op.op.store == 1);
+ CU_ASSERT(atomic_op.op.fetch_add == 1);
+ CU_ASSERT(atomic_op.op.add == 1);
+ CU_ASSERT(atomic_op.op.fetch_sub == 1);
+ CU_ASSERT(atomic_op.op.sub == 1);
+ CU_ASSERT(atomic_op.op.fetch_inc == 1);
+ CU_ASSERT(atomic_op.op.inc == 1);
+ CU_ASSERT(atomic_op.op.fetch_dec == 1);
+ CU_ASSERT(atomic_op.op.dec == 1);
+ CU_ASSERT(atomic_op.op.min == 1);
+ CU_ASSERT(atomic_op.op.max == 1);
+ CU_ASSERT(atomic_op.op.cas == 1);
+ CU_ASSERT(atomic_op.op.xchg == 1);
+ }
+}
+
+odp_testinfo_t atomic_suite_atomic[] = {
+ ODP_TEST_INFO(atomic_test_atomic_inc_dec),
+ ODP_TEST_INFO(atomic_test_atomic_add_sub),
+ ODP_TEST_INFO(atomic_test_atomic_fetch_inc_dec),
+ ODP_TEST_INFO(atomic_test_atomic_fetch_add_sub),
+ ODP_TEST_INFO(atomic_test_atomic_max_min),
+ ODP_TEST_INFO(atomic_test_atomic_cas_inc_dec),
+ ODP_TEST_INFO(atomic_test_atomic_xchg),
+ ODP_TEST_INFO(atomic_test_atomic_non_relaxed),
+ ODP_TEST_INFO(atomic_test_atomic_op_lock_free),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t atomic_suites[] = {
+ {"atomic", NULL, NULL,
+ atomic_suite_atomic},
+ ODP_SUITE_INFO_NULL
+};
+
+int atomic_main(void)
+{
+ int ret;
+
+ odp_cunit_register_global_init(atomic_init);
+
+ ret = odp_cunit_register(atomic_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/atomic/atomic.h b/test/validation/atomic/atomic.h
new file mode 100644
index 000000000..526767086
--- /dev/null
+++ b/test/validation/atomic/atomic.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_ATOMIC_H_
+#define _ODP_TEST_ATOMIC_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void atomic_test_atomic_inc_dec(void);
+void atomic_test_atomic_add_sub(void);
+void atomic_test_atomic_fetch_inc_dec(void);
+void atomic_test_atomic_fetch_add_sub(void);
+void atomic_test_atomic_max_min(void);
+void atomic_test_atomic_cas_inc_dec(void);
+void atomic_test_atomic_xchg(void);
+void atomic_test_atomic_non_relaxed(void);
+void atomic_test_atomic_op_lock_free(void);
+
+/* test arrays: */
+extern odp_testinfo_t atomic_suite_atomic[];
+
+/* test array init/term functions: */
+int atomic_suite_init(void);
+
+/* test registry: */
+extern odp_suiteinfo_t atomic_suites[];
+
+/* executable init/term functions: */
+int atomic_init(void);
+
+/* main test program: */
+int atomic_main(void);
+
+#endif
diff --git a/test/validation/synchronizers/synchronizers_main.c b/test/validation/atomic/atomic_main.c
index 659d3152f..377bdd5b9 100644
--- a/test/validation/synchronizers/synchronizers_main.c
+++ b/test/validation/atomic/atomic_main.c
@@ -4,9 +4,9 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include "synchronizers.h"
+#include "atomic.h"
int main(void)
{
- return synchronizers_main();
+ return atomic_main();
}
diff --git a/test/validation/barrier/.gitignore b/test/validation/barrier/.gitignore
new file mode 100644
index 000000000..2e0ee7ade
--- /dev/null
+++ b/test/validation/barrier/.gitignore
@@ -0,0 +1 @@
+barrier_main
diff --git a/test/validation/barrier/Makefile.am b/test/validation/barrier/Makefile.am
new file mode 100644
index 000000000..8fc632c27
--- /dev/null
+++ b/test/validation/barrier/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestbarrier.la
+libtestbarrier_la_SOURCES = barrier.c
+
+test_PROGRAMS = barrier_main$(EXEEXT)
+dist_barrier_main_SOURCES = barrier_main.c
+barrier_main_LDADD = libtestbarrier.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = barrier.h
diff --git a/test/validation/barrier/barrier.c b/test/validation/barrier/barrier.c
new file mode 100644
index 000000000..8f15cdf0d
--- /dev/null
+++ b/test/validation/barrier/barrier.c
@@ -0,0 +1,393 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <malloc.h>
+#include <odp.h>
+#include <CUnit/Basic.h>
+#include <odp_cunit_common.h>
+#include <unistd.h>
+#include "barrier.h"
+
+#define VERBOSE 0
+#define MAX_ITERATIONS 1000
+#define BARRIER_ITERATIONS 64
+
+#define SLOW_BARRIER_DELAY 400
+#define BASE_DELAY 6
+
+#define NUM_TEST_BARRIERS BARRIER_ITERATIONS
+#define NUM_RESYNC_BARRIERS 100
+
+#define BARRIER_DELAY 10
+
+#define GLOBAL_SHM_NAME "GlobalLockTest"
+
+#define UNUSED __attribute__((__unused__))
+
+static volatile int temp_result;
+
+typedef __volatile uint32_t volatile_u32_t;
+typedef __volatile uint64_t volatile_u64_t;
+
+typedef struct {
+ odp_atomic_u32_t wait_cnt;
+} custom_barrier_t;
+
+typedef struct {
+ /* Global variables */
+ uint32_t g_num_threads;
+ uint32_t g_iterations;
+ uint32_t g_verbose;
+ uint32_t g_max_num_cores;
+
+ odp_barrier_t test_barriers[NUM_TEST_BARRIERS];
+ custom_barrier_t custom_barrier1[NUM_TEST_BARRIERS];
+ custom_barrier_t custom_barrier2[NUM_TEST_BARRIERS];
+ volatile_u32_t slow_thread_num;
+ volatile_u32_t barrier_cnt1;
+ volatile_u32_t barrier_cnt2;
+ odp_barrier_t global_barrier;
+
+} global_shared_mem_t;
+
+/* Per-thread memory */
+typedef struct {
+ global_shared_mem_t *global_mem;
+
+ int thread_id;
+ int thread_core;
+
+ volatile_u64_t delay_counter;
+} per_thread_mem_t;
+
+static odp_shm_t global_shm;
+static global_shared_mem_t *global_mem;
+
+/*
+* Delay a consistent amount of time. Ideally the amount of CPU time taken
+* is linearly proportional to "iterations". The goal is to try to do some
+* work that the compiler optimizer won't optimize away, and also to
+* minimize loads and stores (at least to different memory addresses)
+* so as to not affect or be affected by caching issues. This does NOT have to
+* correlate to a specific number of cpu cycles or be consistent across
+* CPU architectures.
+*/
+static void thread_delay(per_thread_mem_t *per_thread_mem, uint32_t iterations)
+{
+ volatile_u64_t *counter_ptr;
+ uint32_t cnt;
+
+ counter_ptr = &per_thread_mem->delay_counter;
+
+ for (cnt = 1; cnt <= iterations; cnt++)
+ (*counter_ptr)++;
+}
+
+/* Initialise per-thread memory */
+static per_thread_mem_t *thread_init(void)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_shm_t global_shm;
+ uint32_t per_thread_mem_len;
+
+ per_thread_mem_len = sizeof(per_thread_mem_t);
+ per_thread_mem = malloc(per_thread_mem_len);
+ memset(per_thread_mem, 0, per_thread_mem_len);
+
+ per_thread_mem->delay_counter = 1;
+
+ per_thread_mem->thread_id = odp_thread_id();
+ per_thread_mem->thread_core = odp_cpu_id();
+
+ global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ global_mem = odp_shm_addr(global_shm);
+ CU_ASSERT_PTR_NOT_NULL(global_mem);
+
+ per_thread_mem->global_mem = global_mem;
+
+ return per_thread_mem;
+}
+
+static void thread_finalize(per_thread_mem_t *per_thread_mem)
+{
+ free(per_thread_mem);
+}
+
+static void custom_barrier_init(custom_barrier_t *custom_barrier,
+ uint32_t num_threads)
+{
+ odp_atomic_init_u32(&custom_barrier->wait_cnt, num_threads);
+}
+
+static void custom_barrier_wait(custom_barrier_t *custom_barrier)
+{
+ volatile_u64_t counter = 1;
+ uint32_t delay_cnt, wait_cnt;
+
+ odp_atomic_sub_u32(&custom_barrier->wait_cnt, 1);
+
+ wait_cnt = 1;
+ while (wait_cnt != 0) {
+ for (delay_cnt = 1; delay_cnt <= BARRIER_DELAY; delay_cnt++)
+ counter++;
+
+ wait_cnt = odp_atomic_load_u32(&custom_barrier->wait_cnt);
+ }
+}
+
+static uint32_t barrier_test(per_thread_mem_t *per_thread_mem,
+ odp_bool_t no_barrier_test)
+{
+ global_shared_mem_t *global_mem;
+ uint32_t barrier_errs, iterations, cnt, i_am_slow_thread;
+ uint32_t thread_num, slow_thread_num, next_slow_thread, num_threads;
+ uint32_t lock_owner_delay, barrier_cnt1, barrier_cnt2;
+
+ thread_num = odp_thread_id();
+ global_mem = per_thread_mem->global_mem;
+ num_threads = global_mem->g_num_threads;
+ iterations = BARRIER_ITERATIONS;
+
+ barrier_errs = 0;
+ lock_owner_delay = SLOW_BARRIER_DELAY;
+
+ for (cnt = 1; cnt < iterations; cnt++) {
+ /* Wait here until all of the threads reach this point */
+ custom_barrier_wait(&global_mem->custom_barrier1[cnt]);
+
+ barrier_cnt1 = global_mem->barrier_cnt1;
+ barrier_cnt2 = global_mem->barrier_cnt2;
+
+ if ((barrier_cnt1 != cnt) || (barrier_cnt2 != cnt)) {
+ printf("thread_num=%" PRIu32 " barrier_cnts of %" PRIu32
+ " %" PRIu32 " cnt=%" PRIu32 "\n",
+ thread_num, barrier_cnt1, barrier_cnt2, cnt);
+ barrier_errs++;
+ }
+
+ /* Wait here until all of the threads reach this point */
+ custom_barrier_wait(&global_mem->custom_barrier2[cnt]);
+
+ slow_thread_num = global_mem->slow_thread_num;
+ i_am_slow_thread = thread_num == slow_thread_num;
+ next_slow_thread = slow_thread_num + 1;
+ if (num_threads < next_slow_thread)
+ next_slow_thread = 1;
+
+ /*
+ * Now run the test, which involves having all but one thread
+ * immediately calling odp_barrier_wait(), and one thread wait a
+ * moderate amount of time and then calling odp_barrier_wait().
+ * The test fails if any of the first group of threads
+ * has not waited for the "slow" thread. The "slow" thread is
+ * responsible for re-initializing the barrier for next trial.
+ */
+ if (i_am_slow_thread) {
+ thread_delay(per_thread_mem, lock_owner_delay);
+ lock_owner_delay += BASE_DELAY;
+ if ((global_mem->barrier_cnt1 != cnt) ||
+ (global_mem->barrier_cnt2 != cnt) ||
+ (global_mem->slow_thread_num
+ != slow_thread_num))
+ barrier_errs++;
+ }
+
+ if (no_barrier_test == 0)
+ odp_barrier_wait(&global_mem->test_barriers[cnt]);
+
+ global_mem->barrier_cnt1 = cnt + 1;
+ odp_mb_full();
+
+ if (i_am_slow_thread) {
+ global_mem->slow_thread_num = next_slow_thread;
+ global_mem->barrier_cnt2 = cnt + 1;
+ odp_mb_full();
+ } else {
+ while (global_mem->barrier_cnt2 != (cnt + 1))
+ thread_delay(per_thread_mem, BASE_DELAY);
+ }
+ }
+
+ if ((global_mem->g_verbose) && (barrier_errs != 0))
+ printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
+ " barrier_errs in %" PRIu32 " iterations\n", thread_num,
+ per_thread_mem->thread_id,
+ per_thread_mem->thread_core, barrier_errs, iterations);
+
+ return barrier_errs;
+}
+
+static void *no_barrier_functional_test(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+ uint32_t barrier_errs;
+
+ per_thread_mem = thread_init();
+ barrier_errs = barrier_test(per_thread_mem, 1);
+
+ /*
+ * Note that the following CU_ASSERT MAY appear incorrect, but for the
+ * no_barrier test it should see barrier_errs or else there is something
+ * wrong with the test methodology or the ODP thread implementation.
+ * So this test PASSES only if it sees barrier_errs or a single
+ * worker was used.
+ */
+ CU_ASSERT(barrier_errs != 0 || global_mem->g_num_threads == 1);
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *barrier_functional_test(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+ uint32_t barrier_errs;
+
+ per_thread_mem = thread_init();
+ barrier_errs = barrier_test(per_thread_mem, 0);
+
+ CU_ASSERT(barrier_errs == 0);
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void barrier_test_init(void)
+{
+ uint32_t num_threads, idx;
+
+ num_threads = global_mem->g_num_threads;
+
+ for (idx = 0; idx < NUM_TEST_BARRIERS; idx++) {
+ odp_barrier_init(&global_mem->test_barriers[idx], num_threads);
+ custom_barrier_init(&global_mem->custom_barrier1[idx],
+ num_threads);
+ custom_barrier_init(&global_mem->custom_barrier2[idx],
+ num_threads);
+ }
+
+ global_mem->slow_thread_num = 1;
+ global_mem->barrier_cnt1 = 1;
+ global_mem->barrier_cnt2 = 1;
+}
+
+/* Barrier tests */
+void barrier_test_memory_barrier(void)
+{
+ volatile int a = 0;
+ volatile int b = 0;
+ volatile int c = 0;
+ volatile int d = 0;
+
+ /* Call all memory barriers to verify that those are implemented */
+ a = 1;
+ odp_mb_release();
+ b = 1;
+ odp_mb_acquire();
+ c = 1;
+ odp_mb_full();
+ d = 1;
+
+ /* Avoid "variable set but not used" warning */
+ temp_result = a + b + c + d;
+}
+
+void barrier_test_no_barrier_functional(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ barrier_test_init();
+ odp_cunit_thread_create(no_barrier_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+void barrier_test_barrier_functional(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ barrier_test_init();
+ odp_cunit_thread_create(barrier_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+odp_testinfo_t barrier_suite_barrier[] = {
+ ODP_TEST_INFO(barrier_test_memory_barrier),
+ ODP_TEST_INFO(barrier_test_no_barrier_functional),
+ ODP_TEST_INFO(barrier_test_barrier_functional),
+ ODP_TEST_INFO_NULL
+};
+
+int barrier_init(void)
+{
+ uint32_t workers_count, max_threads;
+ int ret = 0;
+ odp_cpumask_t mask;
+
+ if (0 != odp_init_global(NULL, NULL)) {
+ fprintf(stderr, "error: odp_init_global() failed.\n");
+ return -1;
+ }
+ if (0 != odp_init_local(ODP_THREAD_CONTROL)) {
+ fprintf(stderr, "error: odp_init_local() failed.\n");
+ return -1;
+ }
+
+ global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
+ sizeof(global_shared_mem_t), 64,
+ ODP_SHM_SW_ONLY);
+ if (ODP_SHM_INVALID == global_shm) {
+ fprintf(stderr, "Unable reserve memory for global_shm\n");
+ return -1;
+ }
+
+ global_mem = odp_shm_addr(global_shm);
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
+ global_mem->g_num_threads = MAX_WORKERS;
+ global_mem->g_iterations = MAX_ITERATIONS;
+ global_mem->g_verbose = VERBOSE;
+
+ workers_count = odp_cpumask_default_worker(&mask, 0);
+
+ max_threads = (workers_count >= MAX_WORKERS) ?
+ MAX_WORKERS : workers_count;
+
+ if (max_threads < global_mem->g_num_threads) {
+ printf("Requested num of threads is too large\n");
+ printf("reducing from %" PRIu32 " to %" PRIu32 "\n",
+ global_mem->g_num_threads,
+ max_threads);
+ global_mem->g_num_threads = max_threads;
+ }
+
+ printf("Num of threads used = %" PRIu32 "\n",
+ global_mem->g_num_threads);
+
+ return ret;
+}
+
+odp_suiteinfo_t barrier_suites[] = {
+ {"barrier", NULL, NULL,
+ barrier_suite_barrier},
+ ODP_SUITE_INFO_NULL
+};
+
+int barrier_main(void)
+{
+ int ret;
+
+ odp_cunit_register_global_init(barrier_init);
+
+ ret = odp_cunit_register(barrier_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/barrier/barrier.h b/test/validation/barrier/barrier.h
new file mode 100644
index 000000000..3cddfc428
--- /dev/null
+++ b/test/validation/barrier/barrier.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_BARRIER_H_
+#define _ODP_TEST_BARRIER_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void barrier_test_memory_barrier(void);
+void barrier_test_no_barrier_functional(void);
+void barrier_test_barrier_functional(void);
+
+/* test arrays: */
+extern odp_testinfo_t barrier_suite_barrier[];
+
+/* test registry: */
+extern odp_suiteinfo_t barrier_suites[];
+
+/* executable init/term functions: */
+int barrier_init(void);
+
+/* main test program: */
+int barrier_main(void);
+
+#endif
diff --git a/test/validation/barrier/barrier_main.c b/test/validation/barrier/barrier_main.c
new file mode 100644
index 000000000..88c9b3e52
--- /dev/null
+++ b/test/validation/barrier/barrier_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "barrier.h"
+
+int main(void)
+{
+ return barrier_main();
+}
diff --git a/test/validation/classification/classification.h b/test/validation/classification/classification.h
index a186339ff..5508af7ca 100644
--- a/test/validation/classification/classification.h
+++ b/test/validation/classification/classification.h
@@ -19,6 +19,8 @@
#define CLS_DEFAULT_DADDR "10.0.0.100/32"
#define CLS_DEFAULT_SPORT 1024
#define CLS_DEFAULT_DPORT 2048
+#define CLS_DEFAULT_DMAC 0x010203040506
+#define CLS_DEFAULT_SMAC 0x060504030201
/* Config values for Error CoS */
#define TEST_ERROR 1
@@ -29,18 +31,18 @@
#define CLS_PMR_CHAIN_SRC 2
#define CLS_PMR_CHAIN_DST 3
#define CLS_PMR_CHAIN_SADDR "10.0.0.5/32"
-#define CLS_PMR_CHAIN_SPORT 3000
+#define CLS_PMR_CHAIN_PORT 3000
/* Config values for PMR */
#define TEST_PMR 1
#define CLS_PMR 4
-#define CLS_PMR_SPORT 4000
+#define CLS_PMR_PORT 4000
/* Config values for PMR SET */
#define TEST_PMR_SET 1
#define CLS_PMR_SET 5
#define CLS_PMR_SET_SADDR "10.0.0.6/32"
-#define CLS_PMR_SET_SPORT 5000
+#define CLS_PMR_SET_PORT 5000
/* Config values for CoS L2 Priority */
#define TEST_L2_QOS 1
@@ -76,6 +78,8 @@ void classification_test_pmr_term_tcp_sport(void);
void classification_test_pmr_term_udp_dport(void);
void classification_test_pmr_term_udp_sport(void);
void classification_test_pmr_term_ipproto(void);
+void classification_test_pmr_term_dmac(void);
+void classification_test_pmr_term_packet_len(void);
/* test arrays: */
extern odp_testinfo_t classification_suite_basic[];
diff --git a/test/validation/classification/odp_classification_basic.c b/test/validation/classification/odp_classification_basic.c
index f0b7a4243..81077b609 100644
--- a/test/validation/classification/odp_classification_basic.c
+++ b/test/validation/classification/odp_classification_basic.c
@@ -78,7 +78,7 @@ void classification_test_create_pmr_match(void)
val = 1024;
mask = 0xffff;
- match.term = ODP_PMR_TCP_SPORT;
+ match.term = find_first_supported_l3_pmr();
match.val = &val;
match.mask = &mask;
match.val_sz = sizeof(val);
@@ -99,7 +99,7 @@ void classification_test_destroy_pmr(void)
val = 1024;
mask = 0xffff;
- match.term = ODP_PMR_TCP_SPORT;
+ match.term = find_first_supported_l3_pmr();
match.val = &val;
match.mask = &mask;
match.val_sz = sizeof(val);
diff --git a/test/validation/classification/odp_classification_common.c b/test/validation/classification/odp_classification_common.c
index afcea4546..54ce5949d 100644
--- a/test/validation/classification/odp_classification_common.c
+++ b/test/validation/classification/odp_classification_common.c
@@ -13,8 +13,8 @@
#include <odp/helper/tcp.h>
typedef struct cls_test_packet {
- uint32be_t magic;
- uint32be_t seq;
+ odp_u32be_t magic;
+ odp_u32be_t seq;
} cls_test_packet_t;
int destroy_inq(odp_pktio_t pktio)
@@ -161,17 +161,14 @@ odp_queue_t queue_create(const char *queuename, bool sched)
if (sched) {
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
- queue = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ queue = odp_queue_create(queuename, &qparam);
} else {
- queue = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_POLL,
- NULL);
+ queue = odp_queue_create(queuename, NULL);
}
return queue;
@@ -193,21 +190,35 @@ odp_pool_t pool_create(const char *poolname)
odp_packet_t create_packet(odp_pool_t pool, bool vlan,
odp_atomic_u32_t *seq, bool flag_udp)
{
+ return create_packet_len(pool, vlan, seq, flag_udp, 0);
+}
+
+odp_packet_t create_packet_len(odp_pool_t pool, bool vlan,
+ odp_atomic_u32_t *seq, bool flag_udp,
+ uint16_t len)
+{
uint32_t seqno;
odph_ethhdr_t *ethhdr;
odph_udphdr_t *udp;
odph_tcphdr_t *tcp;
odph_ipv4hdr_t *ip;
- uint8_t payload_len;
- char src_mac[ODPH_ETHADDR_LEN] = {0};
- char dst_mac[ODPH_ETHADDR_LEN] = {0};
+ uint16_t payload_len;
+ uint64_t src_mac = CLS_DEFAULT_SMAC;
+ uint64_t dst_mac = CLS_DEFAULT_DMAC;
+ uint64_t dst_mac_be;
uint32_t addr = 0;
uint32_t mask;
int offset;
odp_packet_t pkt;
int packet_len = 0;
- payload_len = sizeof(cls_test_packet_t);
+ /* 48 bit ethernet address needs to be left shifted for proper
+ value after changing to be*/
+ dst_mac_be = odp_cpu_to_be_64(dst_mac);
+ if (dst_mac != dst_mac_be)
+ dst_mac_be = dst_mac_be >> (64 - 8 * ODPH_ETHADDR_LEN);
+
+ payload_len = sizeof(cls_test_packet_t) + len;
packet_len += ODPH_ETHHDR_LEN;
packet_len += ODPH_IPV4HDR_LEN;
if (flag_udp)
@@ -226,8 +237,8 @@ odp_packet_t create_packet(odp_pool_t pool, bool vlan,
offset = 0;
odp_packet_l2_offset_set(pkt, offset);
ethhdr = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- memcpy(ethhdr->src.addr, src_mac, ODPH_ETHADDR_LEN);
- memcpy(ethhdr->dst.addr, dst_mac, ODPH_ETHADDR_LEN);
+ memcpy(ethhdr->src.addr, &src_mac, ODPH_ETHADDR_LEN);
+ memcpy(ethhdr->dst.addr, &dst_mac_be, ODPH_ETHADDR_LEN);
offset += sizeof(odph_ethhdr_t);
if (vlan) {
/* Default vlan header */
@@ -240,7 +251,7 @@ odp_packet_t create_packet(odp_pool_t pool, bool vlan,
vlan->tpid = odp_cpu_to_be_16(ODPH_ETHTYPE_VLAN);
offset += sizeof(odph_vlanhdr_t);
parseptr += sizeof(odph_vlanhdr_t);
- uint16be_t *type = (uint16be_t *)(void *)parseptr;
+ odp_u16be_t *type = (odp_u16be_t *)(void *)parseptr;
*type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
} else {
ethhdr->type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
@@ -251,10 +262,10 @@ odp_packet_t create_packet(odp_pool_t pool, bool vlan,
/* ipv4 */
ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
- parse_ipv4_string(CLS_DEFAULT_SADDR, &addr, &mask);
+ parse_ipv4_string(CLS_DEFAULT_DADDR, &addr, &mask);
ip->dst_addr = odp_cpu_to_be_32(addr);
- parse_ipv4_string(CLS_DEFAULT_DADDR, &addr, &mask);
+ parse_ipv4_string(CLS_DEFAULT_SADDR, &addr, &mask);
ip->src_addr = odp_cpu_to_be_32(addr);
ip->ver_ihl = ODPH_IPV4 << 4 | ODPH_IPV4HDR_IHL_MIN;
if (flag_udp)
@@ -299,3 +310,55 @@ odp_packet_t create_packet(odp_pool_t pool, bool vlan,
return pkt;
}
+
+odp_pmr_term_t find_first_supported_l3_pmr(void)
+{
+ unsigned long long cap;
+ odp_pmr_term_t term = ODP_PMR_TCP_DPORT;
+
+ /* choose supported PMR */
+ cap = odp_pmr_terms_cap();
+ if (cap & (1 << ODP_PMR_UDP_SPORT))
+ term = ODP_PMR_UDP_SPORT;
+ else if (cap & (1 << ODP_PMR_UDP_DPORT))
+ term = ODP_PMR_UDP_DPORT;
+ else if (cap & (1 << ODP_PMR_TCP_SPORT))
+ term = ODP_PMR_TCP_SPORT;
+ else if (cap & (1 << ODP_PMR_TCP_DPORT))
+ term = ODP_PMR_TCP_DPORT;
+ else
+ CU_FAIL("Implementations doesn't support any TCP/UDP PMR");
+
+ return term;
+}
+
+int set_first_supported_pmr_port(odp_packet_t pkt, uint16_t port)
+{
+ odph_udphdr_t *udp;
+ odph_tcphdr_t *tcp;
+ odp_pmr_term_t term;
+
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ port = odp_cpu_to_be_16(port);
+ term = find_first_supported_l3_pmr();
+ switch (term) {
+ case ODP_PMR_UDP_SPORT:
+ udp->src_port = port;
+ break;
+ case ODP_PMR_UDP_DPORT:
+ udp->dst_port = port;
+ break;
+ case ODP_PMR_TCP_DPORT:
+ tcp->dst_port = port;
+ break;
+ case ODP_PMR_TCP_SPORT:
+ tcp->src_port = port;
+ break;
+ default:
+ CU_FAIL("Unsupported L3 term");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/test/validation/classification/odp_classification_test_pmr.c b/test/validation/classification/odp_classification_test_pmr.c
index 53b96204e..5f516a7fc 100644
--- a/test/validation/classification/odp_classification_test_pmr.c
+++ b/test/validation/classification/odp_classification_test_pmr.c
@@ -39,8 +39,9 @@ odp_pktio_t create_pktio(odp_queue_type_t q_type)
return ODP_PKTIO_INVALID;
odp_pktio_param_init(&pktio_param);
- if (q_type == ODP_QUEUE_TYPE_POLL)
- pktio_param.in_mode = ODP_PKTIN_MODE_POLL;
+
+ if (q_type == ODP_QUEUE_TYPE_PLAIN)
+ pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE;
else
pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
@@ -55,13 +56,14 @@ odp_pktio_t create_pktio(odp_queue_type_t q_type)
return pktio;
}
-int create_default_inq(odp_pktio_t pktio, odp_queue_type_t qtype)
+int create_default_inq(odp_pktio_t pktio, odp_queue_type_t qtype ODP_UNUSED)
{
odp_queue_param_t qparam;
odp_queue_t inq_def;
char inq_name[ODP_QUEUE_NAME_LEN];
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PKTIN;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
@@ -70,10 +72,7 @@ int create_default_inq(odp_pktio_t pktio, odp_queue_type_t qtype)
odp_pktio_to_u64(pktio));
inq_def = odp_queue_lookup(inq_name);
if (inq_def == ODP_QUEUE_INVALID)
- inq_def = odp_queue_create(
- inq_name,
- ODP_QUEUE_TYPE_PKTIN,
- qtype == ODP_QUEUE_TYPE_POLL ? NULL : &qparam);
+ inq_def = odp_queue_create(inq_name, &qparam);
CU_ASSERT_FATAL(inq_def != ODP_QUEUE_INVALID);
@@ -156,6 +155,7 @@ void classification_test_pmr_term_tcp_dport(void)
odp_pool_t pool;
odp_pool_t pool_recv;
odp_pmr_match_t match;
+ odph_ethhdr_t *eth;
val = CLS_DEFAULT_DPORT;
mask = 0xffff;
@@ -166,6 +166,9 @@ void classification_test_pmr_term_tcp_dport(void)
retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
CU_ASSERT(retval == 0);
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
match.term = ODP_PMR_TCP_DPORT;
match.val = &val;
match.mask = &mask;
@@ -193,12 +196,13 @@ void classification_test_pmr_term_tcp_dport(void)
retval = odp_pktio_pmr_cos(pmr, pktio, cos);
CU_ASSERT(retval == 0);
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
pkt = create_packet(pkt_pool, false, &seq, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
@@ -219,6 +223,9 @@ void classification_test_pmr_term_tcp_dport(void)
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
@@ -265,6 +272,7 @@ void classification_test_pmr_term_tcp_sport(void)
char cosname[ODP_COS_NAME_LEN];
odp_cls_cos_param_t cls_param;
odp_pmr_match_t match;
+ odph_ethhdr_t *eth;
val = CLS_DEFAULT_SPORT;
mask = 0xffff;
@@ -275,6 +283,9 @@ void classification_test_pmr_term_tcp_sport(void)
retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
CU_ASSERT(retval == 0);
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
match.term = ODP_PMR_TCP_SPORT;
match.val = &val;
match.mask = &mask;
@@ -301,12 +312,13 @@ void classification_test_pmr_term_tcp_sport(void)
retval = odp_pktio_pmr_cos(pmr, pktio, cos);
CU_ASSERT(retval == 0);
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
pkt = create_packet(pkt_pool, false, &seq, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
@@ -325,6 +337,9 @@ void classification_test_pmr_term_tcp_sport(void)
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
@@ -371,6 +386,7 @@ void classification_test_pmr_term_udp_dport(void)
char cosname[ODP_COS_NAME_LEN];
odp_pmr_match_t match;
odp_cls_cos_param_t cls_param;
+ odph_ethhdr_t *eth;
val = CLS_DEFAULT_DPORT;
mask = 0xffff;
@@ -381,6 +397,9 @@ void classification_test_pmr_term_udp_dport(void)
retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
CU_ASSERT(retval == 0);
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
match.term = ODP_PMR_UDP_DPORT;
match.val = &val;
match.mask = &mask;
@@ -407,12 +426,13 @@ void classification_test_pmr_term_udp_dport(void)
retval = odp_pktio_pmr_cos(pmr, pktio, cos);
CU_ASSERT(retval == 0);
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
pkt = create_packet(pkt_pool, false, &seq, true);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
@@ -432,6 +452,9 @@ void classification_test_pmr_term_udp_dport(void)
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
@@ -478,6 +501,7 @@ void classification_test_pmr_term_udp_sport(void)
char cosname[ODP_COS_NAME_LEN];
odp_pmr_match_t match;
odp_cls_cos_param_t cls_param;
+ odph_ethhdr_t *eth;
val = CLS_DEFAULT_SPORT;
mask = 0xffff;
@@ -488,6 +512,9 @@ void classification_test_pmr_term_udp_sport(void)
retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
CU_ASSERT(retval == 0);
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
match.term = ODP_PMR_UDP_SPORT;
match.val = &val;
match.mask = &mask;
@@ -514,12 +541,13 @@ void classification_test_pmr_term_udp_sport(void)
retval = odp_pktio_pmr_cos(pmr, pktio, cos);
CU_ASSERT(retval == 0);
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
pkt = create_packet(pkt_pool, false, &seq, true);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
@@ -538,6 +566,9 @@ void classification_test_pmr_term_udp_sport(void)
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
@@ -583,6 +614,7 @@ void classification_test_pmr_term_ipproto(void)
char cosname[ODP_COS_NAME_LEN];
odp_cls_cos_param_t cls_param;
odp_pmr_match_t match;
+ odph_ethhdr_t *eth;
val = ODPH_IPPROTO_UDP;
mask = 0xff;
@@ -593,6 +625,9 @@ void classification_test_pmr_term_ipproto(void)
retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
CU_ASSERT(retval == 0);
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
match.term = ODP_PMR_IPPROTO;
match.val = &val;
match.mask = &mask;
@@ -619,8 +654,114 @@ void classification_test_pmr_term_ipproto(void)
retval = odp_pktio_pmr_cos(pmr, pktio, cos);
CU_ASSERT(retval == 0);
+ pkt = create_packet(pkt_pool, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool);
+ CU_ASSERT(retqueue == queue);
+ odp_packet_free(pkt);
+
+ /* Other packets delivered to default queue */
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == default_pool);
+ CU_ASSERT(retqueue == default_queue);
+
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_pmr_destroy(pmr);
+ odp_packet_free(pkt);
+ destroy_inq(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+void classification_test_pmr_term_dmac(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ uint64_t val;
+ uint64_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pmr_match_t match;
+ odph_ethhdr_t *eth;
+
+ val = CLS_DEFAULT_DMAC; /* 48 bit Ethernet Mac address */
+ mask = 0xffffffffffff;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT(retval == 0);
+
configure_default_cos(pktio, &default_cos,
&default_queue, &default_pool);
+
+ match.term = ODP_PMR_DMAC;
+ match.val = &val;
+ match.mask = &mask;
+ match.val_sz = ODPH_ETHADDR_LEN;
+
+ pmr = odp_pmr_create(&match);
+ CU_ASSERT(pmr != ODP_PMR_INVAL);
+
+ queue = queue_create("dmac", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("dmac");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "dmac");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ retval = odp_pktio_pmr_cos(pmr, pktio, cos);
+ CU_ASSERT(retval == 0);
+
pkt = create_packet(pkt_pool, false, &seq, true);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
@@ -638,6 +779,8 @@ void classification_test_pmr_term_ipproto(void)
/* Other packets delivered to default queue */
pkt = create_packet(pkt_pool, false, &seq, false);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ memset(eth->dst.addr, 0, ODPH_ETHADDR_LEN);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
@@ -663,6 +806,116 @@ void classification_test_pmr_term_ipproto(void)
odp_pktio_close(pktio);
}
+void classification_test_pmr_term_packet_len(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ uint16_t val;
+ uint16_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pmr_match_t match;
+ odph_ethhdr_t *eth;
+
+ val = 1024;
+ /*Mask value will match any packet of length 1000 - 1099*/
+ mask = 0xff00;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ match.term = ODP_PMR_LEN;
+ match.val = &val;
+ match.mask = &mask;
+ match.val_sz = sizeof(val);
+
+ pmr = odp_pmr_create(&match);
+ CU_ASSERT(pmr != ODP_PMR_INVAL);
+
+ queue = queue_create("packet_len", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("packet_len");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "packet_len");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ retval = odp_pktio_pmr_cos(pmr, pktio, cos);
+ CU_ASSERT(retval == 0);
+
+ /* create packet of payload length 1024 */
+ pkt = create_packet_len(pkt_pool, false, &seq, true, 1024);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool);
+ CU_ASSERT(retqueue == queue);
+ odp_packet_free(pkt);
+
+ /* Other packets delivered to default queue */
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == default_pool);
+ CU_ASSERT(retqueue == default_queue);
+
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_pmr_destroy(pmr);
+ odp_packet_free(pkt);
+ destroy_inq(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
static void classification_test_pmr_pool_set(void)
{
odp_packet_t pkt;
@@ -684,6 +937,7 @@ static void classification_test_pmr_pool_set(void)
char cosname[ODP_COS_NAME_LEN];
odp_cls_cos_param_t cls_param;
odp_pmr_match_t match;
+ odph_ethhdr_t *eth;
val = ODPH_IPPROTO_UDP;
mask = 0xff;
@@ -694,6 +948,9 @@ static void classification_test_pmr_pool_set(void)
retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
CU_ASSERT(retval == 0);
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
match.term = ODP_PMR_IPPROTO;
match.val = &val;
match.mask = &mask;
@@ -727,12 +984,13 @@ static void classification_test_pmr_pool_set(void)
retval = odp_pktio_pmr_cos(pmr, pktio, cos);
CU_ASSERT(retval == 0);
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
pkt = create_packet(pkt_pool, false, &seq, true);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
enqueue_pktio_interface(pkt, pktio);
@@ -777,6 +1035,7 @@ static void classification_test_pmr_queue_set(void)
char cosname[ODP_COS_NAME_LEN];
odp_cls_cos_param_t cls_param;
odp_pmr_match_t match;
+ odph_ethhdr_t *eth;
val = ODPH_IPPROTO_UDP;
mask = 0xff;
@@ -787,6 +1046,9 @@ static void classification_test_pmr_queue_set(void)
retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
CU_ASSERT(retval == 0);
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
match.term = ODP_PMR_IPPROTO;
match.val = &val;
match.mask = &mask;
@@ -820,12 +1082,13 @@ static void classification_test_pmr_queue_set(void)
retval = odp_pktio_pmr_cos(pmr, pktio, cos);
CU_ASSERT(retval == 0);
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
pkt = create_packet(pkt_pool, false, &seq, true);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
enqueue_pktio_interface(pkt, pktio);
@@ -849,13 +1112,121 @@ static void classification_test_pmr_queue_set(void)
odp_pktio_close(pktio);
}
+static void classification_test_pmr_term_daddr(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_pool_t pool;
+ odp_pool_t default_pool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ odp_cos_t default_cos;
+ uint32_t addr;
+ uint32_t mask;
+ char cosname[ODP_QUEUE_NAME_LEN];
+ odp_pmr_match_t match;
+ odp_cls_cos_param_t cls_param;
+ odph_ipv4hdr_t *ip;
+ const char *dst_addr = "10.0.0.99/32";
+ odph_ethhdr_t *eth;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED);
+ retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ parse_ipv4_string(dst_addr, &addr, &mask);
+ match.term = ODP_PMR_DIP_ADDR;
+ match.val = &addr;
+ match.mask = &mask;
+ match.val_sz = sizeof(addr);
+
+ pmr = odp_pmr_create(&match);
+ CU_ASSERT_FATAL(pmr != ODP_PMR_INVAL);
+
+ queue = queue_create("daddr", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("daddr");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "daddr");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ retval = odp_pktio_pmr_cos(pmr, pktio, cos);
+ CU_ASSERT(retval == 0);
+
+ /* packet with dst ip address matching PMR rule to be
+ received in the CoS queue*/
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ ip->dst_addr = odp_cpu_to_be_32(addr);
+ ip->chksum = odph_ipv4_csum_update(pkt);
+
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == queue);
+ odp_packet_free(pkt);
+
+ /* Other packets delivered to default queue */
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == default_queue);
+
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_pmr_destroy(pmr);
+ odp_packet_free(pkt);
+ destroy_inq(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
odp_testinfo_t classification_suite_pmr[] = {
ODP_TEST_INFO(classification_test_pmr_term_tcp_dport),
ODP_TEST_INFO(classification_test_pmr_term_tcp_sport),
ODP_TEST_INFO(classification_test_pmr_term_udp_dport),
ODP_TEST_INFO(classification_test_pmr_term_udp_sport),
ODP_TEST_INFO(classification_test_pmr_term_ipproto),
+ ODP_TEST_INFO(classification_test_pmr_term_dmac),
ODP_TEST_INFO(classification_test_pmr_pool_set),
ODP_TEST_INFO(classification_test_pmr_queue_set),
+ ODP_TEST_INFO(classification_test_pmr_term_daddr),
+ ODP_TEST_INFO(classification_test_pmr_term_packet_len),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/classification/odp_classification_tests.c b/test/validation/classification/odp_classification_tests.c
index e11c3d8b6..a19425987 100644
--- a/test/validation/classification/odp_classification_tests.c
+++ b/test/validation/classification/odp_classification_tests.c
@@ -10,6 +10,7 @@
#include <odp/helper/eth.h>
#include <odp/helper/ip.h>
#include <odp/helper/udp.h>
+#include <odp/helper/tcp.h>
static odp_cos_t cos_list[CLS_ENTRIES];
static odp_pmr_t pmr_list[CLS_ENTRIES];
@@ -50,13 +51,13 @@ int classification_suite_init(void)
}
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PKTIN;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "inq_loop");
- inq_def = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_PKTIN, &qparam);
+ inq_def = odp_queue_create(queuename, &qparam);
odp_pktio_inq_setdef(pktio_loop, inq_def);
for (i = 0; i < CLS_ENTRIES; i++)
@@ -139,15 +140,14 @@ void configure_cls_pmr_chain(void)
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_NORMAL;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
qparam.sched.lock_count = ODP_CONFIG_MAX_ORDERED_LOCKS_PER_QUEUE;
sprintf(queuename, "%s", "SrcQueue");
- queue_list[CLS_PMR_CHAIN_SRC] = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ queue_list[CLS_PMR_CHAIN_SRC] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_list[CLS_PMR_CHAIN_SRC] != ODP_QUEUE_INVALID);
@@ -165,14 +165,13 @@ void configure_cls_pmr_chain(void)
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_NORMAL;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "DstQueue");
- queue_list[CLS_PMR_CHAIN_DST] = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ queue_list[CLS_PMR_CHAIN_DST] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_list[CLS_PMR_CHAIN_DST] != ODP_QUEUE_INVALID);
sprintf(poolname, "%s", "DstPool");
@@ -195,9 +194,9 @@ void configure_cls_pmr_chain(void)
pmr_list[CLS_PMR_CHAIN_SRC] = odp_pmr_create(&match);
CU_ASSERT_FATAL(pmr_list[CLS_PMR_CHAIN_SRC] != ODP_PMR_INVAL);
- val = CLS_PMR_CHAIN_SPORT;
+ val = CLS_PMR_CHAIN_PORT;
maskport = 0xffff;
- match.term = ODP_PMR_UDP_SPORT;
+ match.term = find_first_supported_l3_pmr();
match.val = &val;
match.mask = &maskport;
match.val_sz = sizeof(val);
@@ -218,7 +217,6 @@ void test_cls_pmr_chain(void)
{
odp_packet_t pkt;
odph_ipv4hdr_t *ip;
- odph_udphdr_t *udp;
odp_queue_t queue;
odp_pool_t pool;
uint32_t addr = 0;
@@ -236,8 +234,7 @@ void test_cls_pmr_chain(void)
ip->chksum = 0;
ip->chksum = odph_ipv4_csum_update(pkt);
- udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- udp->src_port = odp_cpu_to_be_16(CLS_PMR_CHAIN_SPORT);
+ set_first_supported_pmr_port(pkt, CLS_PMR_CHAIN_PORT);
enqueue_pktio_interface(pkt, pktio_loop);
@@ -280,12 +277,12 @@ void configure_pktio_default_cos(void)
char poolname[ODP_POOL_NAME_LEN];
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "DefaultQueue");
- queue_list[CLS_DEFAULT] = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_SCHED, &qparam);
+ queue_list[CLS_DEFAULT] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_list[CLS_DEFAULT] != ODP_QUEUE_INVALID);
sprintf(poolname, "DefaultPool");
@@ -339,14 +336,13 @@ void configure_pktio_error_cos(void)
char poolname[ODP_POOL_NAME_LEN];
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_LOWEST;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "ErrorCos");
- queue_list[CLS_ERROR] = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ queue_list[CLS_ERROR] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_list[CLS_ERROR] != ODP_QUEUE_INVALID);
sprintf(poolname, "ErrorPool");
@@ -439,13 +435,13 @@ void configure_cos_with_l2_priority(void)
qos_tbl[i] = 0;
odp_queue_param_init(&qparam);
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
for (i = 0; i < num_qos; i++) {
qparam.sched.prio = ODP_SCHED_PRIO_LOWEST - i;
sprintf(queuename, "%s_%d", "L2_Queue", i);
- queue_tbl[i] = odp_queue_create(queuename, ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ queue_tbl[i] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_tbl[i] != ODP_QUEUE_INVALID);
queue_list[CLS_L2_QOS_0 + i] = queue_tbl[i];
@@ -513,9 +509,9 @@ void configure_pmr_cos(void)
char queuename[ODP_QUEUE_NAME_LEN];
char poolname[ODP_POOL_NAME_LEN];
- val = CLS_PMR_SPORT;
+ val = CLS_PMR_PORT;
mask = 0xffff;
- match.term = ODP_PMR_UDP_SPORT;
+ match.term = find_first_supported_l3_pmr();
match.val = &val;
match.mask = &mask;
match.val_sz = sizeof(val);
@@ -524,14 +520,13 @@ void configure_pmr_cos(void)
CU_ASSERT_FATAL(pmr_list[CLS_PMR] != ODP_PMR_INVAL);
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "PMR_CoS");
- queue_list[CLS_PMR] = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ queue_list[CLS_PMR] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_list[CLS_PMR] != ODP_QUEUE_INVALID);
sprintf(poolname, "PMR_Pool");
@@ -554,7 +549,6 @@ void configure_pmr_cos(void)
void test_pmr_cos(void)
{
odp_packet_t pkt;
- odph_udphdr_t *udp;
odp_queue_t queue;
odp_pool_t pool;
uint32_t seqno = 0;
@@ -563,8 +557,7 @@ void test_pmr_cos(void)
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
- udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- udp->src_port = odp_cpu_to_be_16(CLS_PMR_SPORT);
+ set_first_supported_pmr_port(pkt, CLS_PMR_PORT);
enqueue_pktio_interface(pkt, pktio_loop);
pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
@@ -597,9 +590,9 @@ void configure_pktio_pmr_match_set_cos(void)
pmr_terms[0].val_sz = sizeof(addr);
- val = CLS_PMR_SET_SPORT;
+ val = CLS_PMR_SET_PORT;
maskport = 0xffff;
- pmr_terms[1].term = ODP_PMR_UDP_SPORT;
+ pmr_terms[1].term = find_first_supported_l3_pmr();
pmr_terms[1].val = &val;
pmr_terms[1].mask = &maskport;
pmr_terms[1].val_sz = sizeof(val);
@@ -608,14 +601,13 @@ void configure_pktio_pmr_match_set_cos(void)
CU_ASSERT(retval > 0);
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "cos_pmr_set_queue");
- queue_list[CLS_PMR_SET] = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ queue_list[CLS_PMR_SET] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_list[CLS_PMR_SET] != ODP_QUEUE_INVALID);
sprintf(poolname, "cos_pmr_set_pool");
@@ -640,7 +632,6 @@ void test_pktio_pmr_match_set_cos(void)
uint32_t addr = 0;
uint32_t mask;
odph_ipv4hdr_t *ip;
- odph_udphdr_t *udp;
odp_packet_t pkt;
odp_pool_t pool;
odp_queue_t queue;
@@ -657,8 +648,7 @@ void test_pktio_pmr_match_set_cos(void)
ip->chksum = 0;
ip->chksum = odph_ipv4_csum_update(pkt);
- udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- udp->src_port = odp_cpu_to_be_16(CLS_PMR_SET_SPORT);
+ set_first_supported_pmr_port(pkt, CLS_PMR_SET_PORT);
enqueue_pktio_interface(pkt, pktio_loop);
pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
diff --git a/test/validation/classification/odp_classification_testsuites.h b/test/validation/classification/odp_classification_testsuites.h
index 02828e125..19d5ae2f3 100644
--- a/test/validation/classification/odp_classification_testsuites.h
+++ b/test/validation/classification/odp_classification_testsuites.h
@@ -22,6 +22,9 @@ int classification_suite_pmr_init(void);
odp_packet_t create_packet(odp_pool_t pool, bool vlan,
odp_atomic_u32_t *seq, bool udp);
+odp_packet_t create_packet_len(odp_pool_t pool, bool vlan,
+ odp_atomic_u32_t *seq, bool flag_udp,
+ uint16_t len);
int cls_pkt_set_seq(odp_packet_t pkt);
uint32_t cls_pkt_get_seq(odp_packet_t pkt);
odp_pktio_t create_pktio(odp_queue_type_t q_type);
@@ -46,5 +49,7 @@ void test_pmr_cos(void);
void configure_pktio_pmr_match_set_cos(void);
void test_pktio_pmr_match_set_cos(void);
int destroy_inq(odp_pktio_t pktio);
+odp_pmr_term_t find_first_supported_l3_pmr(void);
+int set_first_supported_pmr_port(odp_packet_t pkt, uint16_t port);
#endif /* ODP_BUFFER_TESTSUITES_H_ */
diff --git a/test/validation/common/odp_cunit_common.h b/test/validation/common/odp_cunit_common.h
index 37e8e8c7e..bf7a7f3a9 100644
--- a/test/validation/common/odp_cunit_common.h
+++ b/test/validation/common/odp_cunit_common.h
@@ -47,9 +47,12 @@ static inline void odp_cunit_test_missing(void) { }
#define ODP_TEST_INFO_INACTIVE(test_func, args...) \
{#test_func, odp_cunit_test_missing, odp_cunit_test_inactive}
+#define ODP_TEST_INACTIVE 0
+#define ODP_TEST_ACTIVE 1
+
/* A test case that may be marked as inactive at runtime based on the
- * return value of the cond_func function. A return value of 0 means
- * inactive, anything else is active. */
+ * return value of the cond_func function. A return value of ODP_TEST_INACTIVE
+ * means inactive, ODP_TEST_ACTIVE means active. */
#define ODP_TEST_INFO_CONDITIONAL(test_func, cond_func) \
{#test_func, test_func, cond_func}
diff --git a/test/validation/crypto/crypto.c b/test/validation/crypto/crypto.c
index 1234f783f..b2d8f459d 100644
--- a/test/validation/crypto/crypto.c
+++ b/test/validation/crypto/crypto.c
@@ -51,8 +51,7 @@ int crypto_init(void)
fprintf(stderr, "Packet pool creation failed.\n");
return -1;
}
- out_queue = odp_queue_create("crypto-out",
- ODP_QUEUE_TYPE_POLL, NULL);
+ out_queue = odp_queue_create("crypto-out", NULL);
if (ODP_QUEUE_INVALID == out_queue) {
fprintf(stderr, "Crypto outq creation failed.\n");
return -1;
diff --git a/test/validation/init/init.c b/test/validation/init/init.c
index a8a564063..62bd75cc6 100644
--- a/test/validation/init/init.c
+++ b/test/validation/init/init.c
@@ -18,7 +18,7 @@ static void odp_init_abort(void) ODP_NORETURN;
/* replacement log function: */
ODP_PRINTF_FORMAT(2, 3)
-static int odp_init_log(odp_log_level_e level, const char *fmt, ...);
+static int odp_init_log(odp_log_level_t level, const char *fmt, ...);
/* test ODP global init, with alternate abort function */
void init_test_odp_init_global_replace_abort(void)
@@ -98,7 +98,7 @@ odp_suiteinfo_t init_suites_log[] = {
ODP_SUITE_INFO_NULL,
};
-static int odp_init_log(odp_log_level_e level __attribute__((unused)),
+static int odp_init_log(odp_log_level_t level __attribute__((unused)),
const char *fmt, ...)
{
va_list args;
diff --git a/test/validation/lock/.gitignore b/test/validation/lock/.gitignore
new file mode 100644
index 000000000..ff16646f4
--- /dev/null
+++ b/test/validation/lock/.gitignore
@@ -0,0 +1 @@
+lock_main
diff --git a/test/validation/lock/Makefile.am b/test/validation/lock/Makefile.am
new file mode 100644
index 000000000..29993df44
--- /dev/null
+++ b/test/validation/lock/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestlock.la
+libtestlock_la_SOURCES = lock.c
+
+test_PROGRAMS = lock_main$(EXEEXT)
+dist_lock_main_SOURCES = lock_main.c
+lock_main_LDADD = libtestlock.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = lock.h
diff --git a/test/validation/synchronizers/synchronizers.c b/test/validation/lock/lock.c
index 0302069b8..0f4415dba 100644
--- a/test/validation/synchronizers/synchronizers.c
+++ b/test/validation/lock/lock.c
@@ -9,35 +9,21 @@
#include <CUnit/Basic.h>
#include <odp_cunit_common.h>
#include <unistd.h>
-#include "synchronizers.h"
+#include "lock.h"
#define VERBOSE 0
#define MAX_ITERATIONS 1000
-#define BARRIER_ITERATIONS 64
#define SLOW_BARRIER_DELAY 400
#define BASE_DELAY 6
#define MIN_DELAY 1
-#define NUM_TEST_BARRIERS BARRIER_ITERATIONS
#define NUM_RESYNC_BARRIERS 100
-#define ADD_SUB_CNT 5
-
-#define CNT 10
-#define BARRIER_DELAY 10
-#define U32_INIT_VAL (1UL << 10)
-#define U64_INIT_VAL (1ULL << 33)
-
#define GLOBAL_SHM_NAME "GlobalLockTest"
#define UNUSED __attribute__((__unused__))
-static odp_atomic_u32_t a32u;
-static odp_atomic_u64_t a64u;
-
-static volatile int temp_result;
-
typedef __volatile uint32_t volatile_u32_t;
typedef __volatile uint64_t volatile_u64_t;
@@ -52,9 +38,6 @@ typedef struct {
uint32_t g_verbose;
uint32_t g_max_num_cores;
- odp_barrier_t test_barriers[NUM_TEST_BARRIERS];
- custom_barrier_t custom_barrier1[NUM_TEST_BARRIERS];
- custom_barrier_t custom_barrier2[NUM_TEST_BARRIERS];
volatile_u32_t slow_thread_num;
volatile_u32_t barrier_cnt1;
volatile_u32_t barrier_cnt2;
@@ -143,145 +126,6 @@ static void thread_finalize(per_thread_mem_t *per_thread_mem)
free(per_thread_mem);
}
-static void custom_barrier_init(custom_barrier_t *custom_barrier,
- uint32_t num_threads)
-{
- odp_atomic_init_u32(&custom_barrier->wait_cnt, num_threads);
-}
-
-static void custom_barrier_wait(custom_barrier_t *custom_barrier)
-{
- volatile_u64_t counter = 1;
- uint32_t delay_cnt, wait_cnt;
-
- odp_atomic_sub_u32(&custom_barrier->wait_cnt, 1);
-
- wait_cnt = 1;
- while (wait_cnt != 0) {
- for (delay_cnt = 1; delay_cnt <= BARRIER_DELAY; delay_cnt++)
- counter++;
-
- wait_cnt = odp_atomic_load_u32(&custom_barrier->wait_cnt);
- }
-}
-
-static uint32_t barrier_test(per_thread_mem_t *per_thread_mem,
- odp_bool_t no_barrier_test)
-{
- global_shared_mem_t *global_mem;
- uint32_t barrier_errs, iterations, cnt, i_am_slow_thread;
- uint32_t thread_num, slow_thread_num, next_slow_thread, num_threads;
- uint32_t lock_owner_delay, barrier_cnt1, barrier_cnt2;
-
- thread_num = odp_thread_id();
- global_mem = per_thread_mem->global_mem;
- num_threads = global_mem->g_num_threads;
- iterations = BARRIER_ITERATIONS;
-
- barrier_errs = 0;
- lock_owner_delay = SLOW_BARRIER_DELAY;
-
- for (cnt = 1; cnt < iterations; cnt++) {
- /* Wait here until all of the threads reach this point */
- custom_barrier_wait(&global_mem->custom_barrier1[cnt]);
-
- barrier_cnt1 = global_mem->barrier_cnt1;
- barrier_cnt2 = global_mem->barrier_cnt2;
-
- if ((barrier_cnt1 != cnt) || (barrier_cnt2 != cnt)) {
- printf("thread_num=%" PRIu32 " barrier_cnts of %" PRIu32
- " %" PRIu32 " cnt=%" PRIu32 "\n",
- thread_num, barrier_cnt1, barrier_cnt2, cnt);
- barrier_errs++;
- }
-
- /* Wait here until all of the threads reach this point */
- custom_barrier_wait(&global_mem->custom_barrier2[cnt]);
-
- slow_thread_num = global_mem->slow_thread_num;
- i_am_slow_thread = thread_num == slow_thread_num;
- next_slow_thread = slow_thread_num + 1;
- if (num_threads < next_slow_thread)
- next_slow_thread = 1;
-
- /*
- * Now run the test, which involves having all but one thread
- * immediately calling odp_barrier_wait(), and one thread wait a
- * moderate amount of time and then calling odp_barrier_wait().
- * The test fails if any of the first group of threads
- * has not waited for the "slow" thread. The "slow" thread is
- * responsible for re-initializing the barrier for next trial.
- */
- if (i_am_slow_thread) {
- thread_delay(per_thread_mem, lock_owner_delay);
- lock_owner_delay += BASE_DELAY;
- if ((global_mem->barrier_cnt1 != cnt) ||
- (global_mem->barrier_cnt2 != cnt) ||
- (global_mem->slow_thread_num
- != slow_thread_num))
- barrier_errs++;
- }
-
- if (no_barrier_test == 0)
- odp_barrier_wait(&global_mem->test_barriers[cnt]);
-
- global_mem->barrier_cnt1 = cnt + 1;
- odp_mb_full();
-
- if (i_am_slow_thread) {
- global_mem->slow_thread_num = next_slow_thread;
- global_mem->barrier_cnt2 = cnt + 1;
- odp_mb_full();
- } else {
- while (global_mem->barrier_cnt2 != (cnt + 1))
- thread_delay(per_thread_mem, BASE_DELAY);
- }
- }
-
- if ((global_mem->g_verbose) && (barrier_errs != 0))
- printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
- " barrier_errs in %" PRIu32 " iterations\n", thread_num,
- per_thread_mem->thread_id,
- per_thread_mem->thread_core, barrier_errs, iterations);
-
- return barrier_errs;
-}
-
-static void *no_barrier_functional_test(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
- uint32_t barrier_errs;
-
- per_thread_mem = thread_init();
- barrier_errs = barrier_test(per_thread_mem, 1);
-
- /*
- * Note that the following CU_ASSERT MAY appear incorrect, but for the
- * no_barrier test it should see barrier_errs or else there is something
- * wrong with the test methodology or the ODP thread implementation.
- * So this test PASSES only if it sees barrier_errs or a single
- * worker was used.
- */
- CU_ASSERT(barrier_errs != 0 || global_mem->g_num_threads == 1);
- thread_finalize(per_thread_mem);
-
- return NULL;
-}
-
-static void *barrier_functional_test(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
- uint32_t barrier_errs;
-
- per_thread_mem = thread_init();
- barrier_errs = barrier_test(per_thread_mem, 0);
-
- CU_ASSERT(barrier_errs == 0);
- thread_finalize(per_thread_mem);
-
- return NULL;
-}
-
static void spinlock_api_test(odp_spinlock_t *spinlock)
{
odp_spinlock_init(spinlock);
@@ -1055,269 +899,8 @@ static void *rwlock_recursive_functional_test(void *arg UNUSED)
return NULL;
}
-static void barrier_test_init(void)
-{
- uint32_t num_threads, idx;
-
- num_threads = global_mem->g_num_threads;
-
- for (idx = 0; idx < NUM_TEST_BARRIERS; idx++) {
- odp_barrier_init(&global_mem->test_barriers[idx], num_threads);
- custom_barrier_init(&global_mem->custom_barrier1[idx],
- num_threads);
- custom_barrier_init(&global_mem->custom_barrier2[idx],
- num_threads);
- }
-
- global_mem->slow_thread_num = 1;
- global_mem->barrier_cnt1 = 1;
- global_mem->barrier_cnt2 = 1;
-}
-
-static void test_atomic_inc_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_inc_u32(&a32u);
-}
-
-static void test_atomic_inc_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_inc_u64(&a64u);
-}
-
-static void test_atomic_dec_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_dec_u32(&a32u);
-}
-
-static void test_atomic_dec_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_dec_u64(&a64u);
-}
-
-static void test_atomic_fetch_inc_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_inc_u32(&a32u);
-}
-
-static void test_atomic_fetch_inc_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_inc_u64(&a64u);
-}
-
-static void test_atomic_fetch_dec_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_dec_u32(&a32u);
-}
-
-static void test_atomic_fetch_dec_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_dec_u64(&a64u);
-}
-
-static void test_atomic_add_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_add_u32(&a32u, ADD_SUB_CNT);
-}
-
-static void test_atomic_add_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_add_u64(&a64u, ADD_SUB_CNT);
-}
-
-static void test_atomic_sub_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_sub_u32(&a32u, ADD_SUB_CNT);
-}
-
-static void test_atomic_sub_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_sub_u64(&a64u, ADD_SUB_CNT);
-}
-
-static void test_atomic_fetch_add_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_add_u32(&a32u, ADD_SUB_CNT);
-}
-
-static void test_atomic_fetch_add_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_add_u64(&a64u, ADD_SUB_CNT);
-}
-
-static void test_atomic_fetch_sub_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_sub_u32(&a32u, ADD_SUB_CNT);
-}
-
-static void test_atomic_fetch_sub_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_sub_u64(&a64u, ADD_SUB_CNT);
-}
-
-static void test_atomic_inc_dec_32(void)
-{
- test_atomic_inc_32();
- test_atomic_dec_32();
-}
-
-static void test_atomic_inc_dec_64(void)
-{
- test_atomic_inc_64();
- test_atomic_dec_64();
-}
-
-static void test_atomic_fetch_inc_dec_32(void)
-{
- test_atomic_fetch_inc_32();
- test_atomic_fetch_dec_32();
-}
-
-static void test_atomic_fetch_inc_dec_64(void)
-{
- test_atomic_fetch_inc_64();
- test_atomic_fetch_dec_64();
-}
-
-static void test_atomic_add_sub_32(void)
-{
- test_atomic_add_32();
- test_atomic_sub_32();
-}
-
-static void test_atomic_add_sub_64(void)
-{
- test_atomic_add_64();
- test_atomic_sub_64();
-}
-
-static void test_atomic_fetch_add_sub_32(void)
-{
- test_atomic_fetch_add_32();
- test_atomic_fetch_sub_32();
-}
-
-static void test_atomic_fetch_add_sub_64(void)
-{
- test_atomic_fetch_add_64();
- test_atomic_fetch_sub_64();
-}
-
-static void test_atomic_init(void)
-{
- odp_atomic_init_u32(&a32u, 0);
- odp_atomic_init_u64(&a64u, 0);
-}
-
-static void test_atomic_store(void)
-{
- odp_atomic_store_u32(&a32u, U32_INIT_VAL);
- odp_atomic_store_u64(&a64u, U64_INIT_VAL);
-}
-
-static void test_atomic_validate(void)
-{
- CU_ASSERT(U32_INIT_VAL == odp_atomic_load_u32(&a32u));
- CU_ASSERT(U64_INIT_VAL == odp_atomic_load_u64(&a64u));
-}
-
-/* Barrier tests */
-void synchronizers_test_memory_barrier(void)
-{
- volatile int a = 0;
- volatile int b = 0;
- volatile int c = 0;
- volatile int d = 0;
-
- /* Call all memory barriers to verify that those are implemented */
- a = 1;
- odp_mb_release();
- b = 1;
- odp_mb_acquire();
- c = 1;
- odp_mb_full();
- d = 1;
-
- /* Avoid "variable set but not used" warning */
- temp_result = a + b + c + d;
-}
-
-void synchronizers_test_no_barrier_functional(void)
-{
- pthrd_arg arg;
-
- arg.numthrds = global_mem->g_num_threads;
- barrier_test_init();
- odp_cunit_thread_create(no_barrier_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
-}
-
-void synchronizers_test_barrier_functional(void)
-{
- pthrd_arg arg;
-
- arg.numthrds = global_mem->g_num_threads;
- barrier_test_init();
- odp_cunit_thread_create(barrier_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
-}
-
-odp_testinfo_t synchronizers_suite_barrier[] = {
- ODP_TEST_INFO(synchronizers_test_memory_barrier),
- ODP_TEST_INFO(synchronizers_test_no_barrier_functional),
- ODP_TEST_INFO(synchronizers_test_barrier_functional),
- ODP_TEST_INFO_NULL
-};
-
/* Thread-unsafe tests */
-void synchronizers_test_no_lock_functional(void)
+void lock_test_no_lock_functional(void)
{
pthrd_arg arg;
@@ -1326,13 +909,13 @@ void synchronizers_test_no_lock_functional(void)
odp_cunit_thread_exit(&arg);
}
-odp_testinfo_t synchronizers_suite_no_locking[] = {
- ODP_TEST_INFO(synchronizers_test_no_lock_functional),
+odp_testinfo_t lock_suite_no_locking[] = {
+ ODP_TEST_INFO(lock_test_no_lock_functional),
ODP_TEST_INFO_NULL
};
/* Spin lock tests */
-void synchronizers_test_spinlock_api(void)
+void lock_test_spinlock_api(void)
{
pthrd_arg arg;
@@ -1341,7 +924,7 @@ void synchronizers_test_spinlock_api(void)
odp_cunit_thread_exit(&arg);
}
-void synchronizers_test_spinlock_functional(void)
+void lock_test_spinlock_functional(void)
{
pthrd_arg arg;
@@ -1351,7 +934,7 @@ void synchronizers_test_spinlock_functional(void)
odp_cunit_thread_exit(&arg);
}
-void synchronizers_test_spinlock_recursive_api(void)
+void lock_test_spinlock_recursive_api(void)
{
pthrd_arg arg;
@@ -1360,7 +943,7 @@ void synchronizers_test_spinlock_recursive_api(void)
odp_cunit_thread_exit(&arg);
}
-void synchronizers_test_spinlock_recursive_functional(void)
+void lock_test_spinlock_recursive_functional(void)
{
pthrd_arg arg;
@@ -1370,20 +953,20 @@ void synchronizers_test_spinlock_recursive_functional(void)
odp_cunit_thread_exit(&arg);
}
-odp_testinfo_t synchronizers_suite_spinlock[] = {
- ODP_TEST_INFO(synchronizers_test_spinlock_api),
- ODP_TEST_INFO(synchronizers_test_spinlock_functional),
+odp_testinfo_t lock_suite_spinlock[] = {
+ ODP_TEST_INFO(lock_test_spinlock_api),
+ ODP_TEST_INFO(lock_test_spinlock_functional),
ODP_TEST_INFO_NULL
};
-odp_testinfo_t synchronizers_suite_spinlock_recursive[] = {
- ODP_TEST_INFO(synchronizers_test_spinlock_recursive_api),
- ODP_TEST_INFO(synchronizers_test_spinlock_recursive_functional),
+odp_testinfo_t lock_suite_spinlock_recursive[] = {
+ ODP_TEST_INFO(lock_test_spinlock_recursive_api),
+ ODP_TEST_INFO(lock_test_spinlock_recursive_functional),
ODP_TEST_INFO_NULL
};
/* Ticket lock tests */
-void synchronizers_test_ticketlock_api(void)
+void lock_test_ticketlock_api(void)
{
pthrd_arg arg;
@@ -1392,7 +975,7 @@ void synchronizers_test_ticketlock_api(void)
odp_cunit_thread_exit(&arg);
}
-void synchronizers_test_ticketlock_functional(void)
+void lock_test_ticketlock_functional(void)
{
pthrd_arg arg;
@@ -1403,14 +986,14 @@ void synchronizers_test_ticketlock_functional(void)
odp_cunit_thread_exit(&arg);
}
-odp_testinfo_t synchronizers_suite_ticketlock[] = {
- ODP_TEST_INFO(synchronizers_test_ticketlock_api),
- ODP_TEST_INFO(synchronizers_test_ticketlock_functional),
+odp_testinfo_t lock_suite_ticketlock[] = {
+ ODP_TEST_INFO(lock_test_ticketlock_api),
+ ODP_TEST_INFO(lock_test_ticketlock_functional),
ODP_TEST_INFO_NULL
};
/* RW lock tests */
-void synchronizers_test_rwlock_api(void)
+void lock_test_rwlock_api(void)
{
pthrd_arg arg;
@@ -1419,7 +1002,7 @@ void synchronizers_test_rwlock_api(void)
odp_cunit_thread_exit(&arg);
}
-void synchronizers_test_rwlock_functional(void)
+void lock_test_rwlock_functional(void)
{
pthrd_arg arg;
@@ -1429,13 +1012,13 @@ void synchronizers_test_rwlock_functional(void)
odp_cunit_thread_exit(&arg);
}
-odp_testinfo_t synchronizers_suite_rwlock[] = {
- ODP_TEST_INFO(synchronizers_test_rwlock_api),
- ODP_TEST_INFO(synchronizers_test_rwlock_functional),
+odp_testinfo_t lock_suite_rwlock[] = {
+ ODP_TEST_INFO(lock_test_rwlock_api),
+ ODP_TEST_INFO(lock_test_rwlock_functional),
ODP_TEST_INFO_NULL
};
-void synchronizers_test_rwlock_recursive_api(void)
+void lock_test_rwlock_recursive_api(void)
{
pthrd_arg arg;
@@ -1444,7 +1027,7 @@ void synchronizers_test_rwlock_recursive_api(void)
odp_cunit_thread_exit(&arg);
}
-void synchronizers_test_rwlock_recursive_functional(void)
+void lock_test_rwlock_recursive_functional(void)
{
pthrd_arg arg;
@@ -1454,13 +1037,13 @@ void synchronizers_test_rwlock_recursive_functional(void)
odp_cunit_thread_exit(&arg);
}
-odp_testinfo_t synchronizers_suite_rwlock_recursive[] = {
- ODP_TEST_INFO(synchronizers_test_rwlock_recursive_api),
- ODP_TEST_INFO(synchronizers_test_rwlock_recursive_functional),
+odp_testinfo_t lock_suite_rwlock_recursive[] = {
+ ODP_TEST_INFO(lock_test_rwlock_recursive_api),
+ ODP_TEST_INFO(lock_test_rwlock_recursive_functional),
ODP_TEST_INFO_NULL
};
-int synchronizers_suite_init(void)
+int lock_suite_init(void)
{
uint32_t num_threads, idx;
@@ -1472,7 +1055,7 @@ int synchronizers_suite_init(void)
return 0;
}
-int synchronizers_init(void)
+int lock_init(void)
{
uint32_t workers_count, max_threads;
int ret = 0;
@@ -1521,126 +1104,29 @@ int synchronizers_init(void)
return ret;
}
-/* Atomic tests */
-static void *test_atomic_inc_dec_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_inc_dec_32();
- test_atomic_inc_dec_64();
-
- thread_finalize(per_thread_mem);
-
- return NULL;
-}
-
-static void *test_atomic_add_sub_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_add_sub_32();
- test_atomic_add_sub_64();
-
- thread_finalize(per_thread_mem);
-
- return NULL;
-}
-
-static void *test_atomic_fetch_inc_dec_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_fetch_inc_dec_32();
- test_atomic_fetch_inc_dec_64();
-
- thread_finalize(per_thread_mem);
-
- return NULL;
-}
-
-static void *test_atomic_fetch_add_sub_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_fetch_add_sub_32();
- test_atomic_fetch_add_sub_64();
-
- thread_finalize(per_thread_mem);
-
- return NULL;
-}
-
-static void test_atomic_functional(void *func_ptr(void *))
-{
- pthrd_arg arg;
-
- arg.numthrds = global_mem->g_num_threads;
- test_atomic_init();
- test_atomic_store();
- odp_cunit_thread_create(func_ptr, &arg);
- odp_cunit_thread_exit(&arg);
- test_atomic_validate();
-}
-
-void synchronizers_test_atomic_inc_dec(void)
-{
- test_atomic_functional(test_atomic_inc_dec_thread);
-}
-
-void synchronizers_test_atomic_add_sub(void)
-{
- test_atomic_functional(test_atomic_add_sub_thread);
-}
-
-void synchronizers_test_atomic_fetch_inc_dec(void)
-{
- test_atomic_functional(test_atomic_fetch_inc_dec_thread);
-}
-
-void synchronizers_test_atomic_fetch_add_sub(void)
-{
- test_atomic_functional(test_atomic_fetch_add_sub_thread);
-}
-
-odp_testinfo_t synchronizers_suite_atomic[] = {
- ODP_TEST_INFO(synchronizers_test_atomic_inc_dec),
- ODP_TEST_INFO(synchronizers_test_atomic_add_sub),
- ODP_TEST_INFO(synchronizers_test_atomic_fetch_inc_dec),
- ODP_TEST_INFO(synchronizers_test_atomic_fetch_add_sub),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t synchronizers_suites[] = {
- {"barrier", NULL, NULL,
- synchronizers_suite_barrier},
- {"nolocking", synchronizers_suite_init, NULL,
- synchronizers_suite_no_locking},
- {"spinlock", synchronizers_suite_init, NULL,
- synchronizers_suite_spinlock},
- {"spinlock_recursive", synchronizers_suite_init, NULL,
- synchronizers_suite_spinlock_recursive},
- {"ticketlock", synchronizers_suite_init, NULL,
- synchronizers_suite_ticketlock},
- {"rwlock", synchronizers_suite_init, NULL,
- synchronizers_suite_rwlock},
- {"rwlock_recursive", synchronizers_suite_init, NULL,
- synchronizers_suite_rwlock_recursive},
- {"atomic", NULL, NULL,
- synchronizers_suite_atomic},
+odp_suiteinfo_t lock_suites[] = {
+ {"nolocking", lock_suite_init, NULL,
+ lock_suite_no_locking},
+ {"spinlock", lock_suite_init, NULL,
+ lock_suite_spinlock},
+ {"spinlock_recursive", lock_suite_init, NULL,
+ lock_suite_spinlock_recursive},
+ {"ticketlock", lock_suite_init, NULL,
+ lock_suite_ticketlock},
+ {"rwlock", lock_suite_init, NULL,
+ lock_suite_rwlock},
+ {"rwlock_recursive", lock_suite_init, NULL,
+ lock_suite_rwlock_recursive},
ODP_SUITE_INFO_NULL
};
-int synchronizers_main(void)
+int lock_main(void)
{
int ret;
- odp_cunit_register_global_init(synchronizers_init);
+ odp_cunit_register_global_init(lock_init);
- ret = odp_cunit_register(synchronizers_suites);
+ ret = odp_cunit_register(lock_suites);
if (ret == 0)
ret = odp_cunit_run();
diff --git a/test/validation/lock/lock.h b/test/validation/lock/lock.h
new file mode 100644
index 000000000..d90cdbc70
--- /dev/null
+++ b/test/validation/lock/lock.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_LOCK_H_
+#define _ODP_TEST_LOCK_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void lock_test_no_lock_functional(void);
+void lock_test_spinlock_api(void);
+void lock_test_spinlock_functional(void);
+void lock_test_spinlock_recursive_api(void);
+void lock_test_spinlock_recursive_functional(void);
+void lock_test_ticketlock_api(void);
+void lock_test_ticketlock_functional(void);
+void lock_test_rwlock_api(void);
+void lock_test_rwlock_functional(void);
+void lock_test_rwlock_recursive_api(void);
+void lock_test_rwlock_recursive_functional(void);
+
+/* test arrays: */
+extern odp_testinfo_t lock_suite_no_locking[];
+extern odp_testinfo_t lock_suite_spinlock[];
+extern odp_testinfo_t lock_suite_spinlock_recursive[];
+extern odp_testinfo_t lock_suite_ticketlock[];
+extern odp_testinfo_t lock_suite_rwlock[];
+extern odp_testinfo_t lock_suite_rwlock_recursive[];
+
+/* test array init/term functions: */
+int lock_suite_init(void);
+
+/* test registry: */
+extern odp_suiteinfo_t lock_suites[];
+
+/* executable init/term functions: */
+int lock_init(void);
+
+/* main test program: */
+int lock_main(void);
+
+#endif
diff --git a/test/validation/lock/lock_main.c b/test/validation/lock/lock_main.c
new file mode 100644
index 000000000..c12c2b514
--- /dev/null
+++ b/test/validation/lock/lock_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "lock.h"
+
+int main(void)
+{
+ return lock_main();
+}
diff --git a/test/validation/pktio/pktio.c b/test/validation/pktio/pktio.c
index 96d3e7290..aab8a42cd 100644
--- a/test/validation/pktio/pktio.c
+++ b/test/validation/pktio/pktio.c
@@ -22,6 +22,9 @@
#define TEST_SEQ_INVALID ((uint32_t)~0)
#define TEST_SEQ_MAGIC 0x92749451
#define TX_BATCH_LEN 4
+#define MAX_QUEUES 10
+
+#undef DEBUG_STATS
/** interface names used for testing */
static const char *iface_name[MAX_NUM_IFACES];
@@ -40,18 +43,18 @@ typedef struct {
odp_pktio_t id;
odp_queue_t outq;
odp_queue_t inq;
- odp_pktio_input_mode_t in_mode;
+ odp_pktin_mode_t in_mode;
} pktio_info_t;
/** magic number and sequence at start of UDP payload */
typedef struct ODP_PACKED {
- uint32be_t magic;
- uint32be_t seq;
+ odp_u32be_t magic;
+ odp_u32be_t seq;
} pkt_head_t;
/** magic number at end of UDP payload */
typedef struct ODP_PACKED {
- uint32be_t magic;
+ odp_u32be_t magic;
} pkt_tail_t;
/** Run mode */
@@ -79,6 +82,29 @@ pkt_segmented_e pool_segmentation = PKT_POOL_UNSEGMENTED;
odp_pool_t pool[MAX_NUM_IFACES] = {ODP_POOL_INVALID, ODP_POOL_INVALID};
+static inline void _pktio_wait_linkup(odp_pktio_t pktio)
+{
+ /* wait 1 second for link up */
+ uint64_t wait_ns = (10 * ODP_TIME_MSEC_IN_NS);
+ int wait_num = 100;
+ int i;
+ int ret = -1;
+
+ for (i = 0; i < wait_num; i++) {
+ ret = odp_pktio_link_status(pktio);
+ if (ret < 0 || ret == 1)
+ break;
+ /* link is down, call status again after delay */
+ odp_time_wait_ns(wait_ns);
+ }
+
+ if (ret != -1) {
+ /* assert only if link state supported and
+ * it's down. */
+ CU_ASSERT_FATAL(ret == 1);
+ }
+}
+
static void set_pool_len(odp_pool_param_t *params)
{
switch (pool_segmentation) {
@@ -264,8 +290,8 @@ static int default_pool_create(void)
return 0;
}
-static odp_pktio_t create_pktio(int iface_idx, odp_pktio_input_mode_t imode,
- odp_pktio_output_mode_t omode)
+static odp_pktio_t create_pktio(int iface_idx, odp_pktin_mode_t imode,
+ odp_pktout_mode_t omode)
{
odp_pktio_t pktio;
odp_pktio_param_t pktio_param;
@@ -289,13 +315,14 @@ static odp_pktio_t create_pktio(int iface_idx, odp_pktio_input_mode_t imode,
return pktio;
}
-static int create_inq(odp_pktio_t pktio, odp_queue_type_t qtype)
+static int create_inq(odp_pktio_t pktio, odp_queue_type_t qtype ODP_UNUSED)
{
odp_queue_param_t qparam;
odp_queue_t inq_def;
char inq_name[ODP_QUEUE_NAME_LEN];
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PKTIN;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
@@ -304,10 +331,7 @@ static int create_inq(odp_pktio_t pktio, odp_queue_type_t qtype)
odp_pktio_to_u64(pktio));
inq_def = odp_queue_lookup(inq_name);
if (inq_def == ODP_QUEUE_INVALID)
- inq_def = odp_queue_create(
- inq_name,
- ODP_QUEUE_TYPE_PKTIN,
- qtype == ODP_QUEUE_TYPE_POLL ? NULL : &qparam);
+ inq_def = odp_queue_create(inq_name, &qparam);
CU_ASSERT(inq_def != ODP_QUEUE_INVALID);
@@ -333,7 +357,7 @@ static int destroy_inq(odp_pktio_t pktio)
/* flush any pending events */
while (1) {
- if (q_type == ODP_QUEUE_TYPE_POLL)
+ if (q_type == ODP_QUEUE_TYPE_PLAIN)
ev = odp_queue_deq(inq);
else
ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
@@ -355,11 +379,11 @@ static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
int num_pkts = 0;
int i;
- if (pktio_rx->in_mode == ODP_PKTIN_MODE_RECV)
+ if (pktio_rx->in_mode == ODP_PKTIN_MODE_DIRECT)
return odp_pktio_recv(pktio_rx->id, pkt_tbl, num);
if (mode == TXRX_MODE_MULTI) {
- if (pktio_rx->in_mode == ODP_PKTIN_MODE_POLL)
+ if (pktio_rx->in_mode == ODP_PKTIN_MODE_QUEUE)
num_evts = odp_queue_deq_multi(pktio_rx->inq, evt_tbl,
num);
else
@@ -368,7 +392,7 @@ static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
} else {
odp_event_t evt_tmp;
- if (pktio_rx->in_mode == ODP_PKTIN_MODE_POLL)
+ if (pktio_rx->in_mode == ODP_PKTIN_MODE_QUEUE)
evt_tmp = odp_queue_deq(pktio_rx->inq);
else
evt_tmp = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
@@ -486,7 +510,7 @@ static void pktio_txrx_multi(pktio_info_t *pktio_a, pktio_info_t *pktio_b,
}
}
-static void test_txrx(odp_pktio_input_mode_t in_mode, int num_pkts,
+static void test_txrx(odp_pktin_mode_t in_mode, int num_pkts,
txrx_mode_e mode)
{
int ret, i, if_b;
@@ -498,7 +522,7 @@ static void test_txrx(odp_pktio_input_mode_t in_mode, int num_pkts,
io = &pktios[i];
io->name = iface_name[i];
- io->id = create_pktio(i, in_mode, ODP_PKTOUT_MODE_SEND);
+ io->id = create_pktio(i, in_mode, ODP_PKTOUT_MODE_DIRECT);
if (io->id == ODP_PKTIO_INVALID) {
CU_FAIL("failed to open iface");
return;
@@ -506,8 +530,8 @@ static void test_txrx(odp_pktio_input_mode_t in_mode, int num_pkts,
io->outq = odp_pktio_outq_getdef(io->id);
io->in_mode = in_mode;
- if (in_mode == ODP_PKTIN_MODE_POLL) {
- create_inq(io->id, ODP_QUEUE_TYPE_POLL);
+ if (in_mode == ODP_PKTIN_MODE_QUEUE) {
+ create_inq(io->id, ODP_QUEUE_TYPE_PLAIN);
io->inq = odp_pktio_inq_getdef(io->id);
} else if (in_mode == ODP_PKTIN_MODE_SCHED) {
create_inq(io->id, ODP_QUEUE_TYPE_SCHED);
@@ -516,6 +540,8 @@ static void test_txrx(odp_pktio_input_mode_t in_mode, int num_pkts,
ret = odp_pktio_start(io->id);
CU_ASSERT(ret == 0);
+
+ _pktio_wait_linkup(io->id);
}
/* if we have two interfaces then send through one and receive on
@@ -526,23 +552,23 @@ static void test_txrx(odp_pktio_input_mode_t in_mode, int num_pkts,
for (i = 0; i < num_ifaces; ++i) {
ret = odp_pktio_stop(pktios[i].id);
CU_ASSERT(ret == 0);
- if (in_mode != ODP_PKTIN_MODE_RECV)
+ if (in_mode != ODP_PKTIN_MODE_DIRECT)
destroy_inq(pktios[i].id);
ret = odp_pktio_close(pktios[i].id);
CU_ASSERT(ret == 0);
}
}
-void pktio_test_poll_queue(void)
+void pktio_test_plain_queue(void)
{
- test_txrx(ODP_PKTIN_MODE_POLL, 1, TXRX_MODE_SINGLE);
- test_txrx(ODP_PKTIN_MODE_POLL, TX_BATCH_LEN, TXRX_MODE_SINGLE);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_SINGLE);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_SINGLE);
}
-void pktio_test_poll_multi(void)
+void pktio_test_plain_multi(void)
{
- test_txrx(ODP_PKTIN_MODE_POLL, TX_BATCH_LEN, TXRX_MODE_MULTI);
- test_txrx(ODP_PKTIN_MODE_POLL, 1, TXRX_MODE_MULTI);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_MULTI);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_MULTI);
}
void pktio_test_sched_queue(void)
@@ -559,12 +585,133 @@ void pktio_test_sched_multi(void)
void pktio_test_recv(void)
{
- test_txrx(ODP_PKTIN_MODE_RECV, 1, TXRX_MODE_SINGLE);
+ test_txrx(ODP_PKTIN_MODE_DIRECT, 1, TXRX_MODE_SINGLE);
}
void pktio_test_recv_multi(void)
{
- test_txrx(ODP_PKTIN_MODE_RECV, TX_BATCH_LEN, TXRX_MODE_MULTI);
+ test_txrx(ODP_PKTIN_MODE_DIRECT, TX_BATCH_LEN, TXRX_MODE_MULTI);
+}
+
+void pktio_test_recv_queue(void)
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_pktio_t pktio[MAX_NUM_IFACES];
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t in_queue_param;
+ odp_pktout_queue_param_t out_queue_param;
+ odp_pktout_queue_t pktout_queue[MAX_QUEUES];
+ odp_pktin_queue_t pktin_queue[MAX_QUEUES];
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ odp_packet_t tmp_pkt[TX_BATCH_LEN];
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ odp_time_t wait_time, end;
+ int num_rx = 0;
+ int num_queues;
+ int ret;
+ int i;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &capa) == 0);
+
+ odp_pktin_queue_param_init(&in_queue_param);
+ num_queues = capa.max_input_queues;
+ in_queue_param.num_queues = num_queues;
+ in_queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ in_queue_param.hash_proto.proto.ipv4_udp = 1;
+
+ ret = odp_pktin_queue_config(pktio[i], &in_queue_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ odp_pktout_queue_param_init(&out_queue_param);
+ out_queue_param.num_queues = capa.max_output_queues;
+
+ ret = odp_pktout_queue_config(pktio[i], &out_queue_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; ++i)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ if (num_ifaces > 1)
+ pktio_rx = pktio[1];
+ else
+ pktio_rx = pktio_tx;
+
+ /* Allocate and initialize test packets */
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ pkt_tbl[i] = odp_packet_alloc(default_pkt_pool, packet_len);
+ if (pkt_tbl[i] == ODP_PACKET_INVALID)
+ break;
+
+ pkt_seq[i] = pktio_init_packet(pkt_tbl[i]);
+ if (pkt_seq[i] == TEST_SEQ_INVALID) {
+ odp_packet_free(pkt_tbl[i]);
+ break;
+ }
+
+ pktio_pkt_set_macs(pkt_tbl[i], pktio_tx, pktio_rx);
+
+ if (pktio_fixup_checksums(pkt_tbl[i]) != 0) {
+ odp_packet_free(pkt_tbl[i]);
+ break;
+ }
+ }
+ if (i != TX_BATCH_LEN) {
+ CU_FAIL("Failed to generate test packets");
+ return;
+ }
+
+ /* Send packets */
+ num_queues = odp_pktout_queue(pktio_tx, pktout_queue, MAX_QUEUES);
+ CU_ASSERT(num_queues > 0);
+ ret = odp_pktio_send_queue(pktout_queue[num_queues - 1], pkt_tbl,
+ TX_BATCH_LEN);
+ CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
+
+ /* Receive packets */
+ num_queues = odp_pktin_queue(pktio_rx, pktin_queue, MAX_QUEUES);
+ CU_ASSERT(num_queues > 0);
+
+ wait_time = odp_time_local_from_ns(ODP_TIME_SEC_IN_NS);
+ end = odp_time_sum(odp_time_local(), wait_time);
+ do {
+ int n = 0;
+
+ for (i = 0; i < num_queues; i++) {
+ n = odp_pktio_recv_queue(pktin_queue[i], tmp_pkt,
+ TX_BATCH_LEN);
+ if (n != 0)
+ break;
+ }
+ if (n < 0)
+ break;
+ for (i = 0; i < n; i++) {
+ if (pktio_pkt_seq(tmp_pkt[i]) == pkt_seq[num_rx])
+ pkt_tbl[num_rx++] = tmp_pkt[i];
+ else
+ odp_packet_free(tmp_pkt[i]);
+ }
+ } while (num_rx < TX_BATCH_LEN &&
+ odp_time_cmp(end, odp_time_local()) > 0);
+
+ for (i = 0; i < num_rx; i++)
+ odp_packet_free(pkt_tbl[i]);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
}
void pktio_test_jumbo(void)
@@ -580,7 +727,7 @@ void pktio_test_mtu(void)
int mtu;
odp_pktio_t pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_SEND);
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
mtu = odp_pktio_mtu(pktio);
@@ -597,7 +744,7 @@ void pktio_test_promisc(void)
int ret;
odp_pktio_t pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_SEND);
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
ret = odp_pktio_promisc_mode_set(pktio, 1);
@@ -626,7 +773,7 @@ void pktio_test_mac(void)
odp_pktio_t pktio;
pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_SEND);
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
printf("testing mac for %s\n", iface_name[0]);
@@ -655,9 +802,9 @@ void pktio_test_inq_remdef(void)
int i;
pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_SEND);
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- CU_ASSERT(create_inq(pktio, ODP_QUEUE_TYPE_POLL) == 0);
+ CU_ASSERT(create_inq(pktio, ODP_QUEUE_TYPE_PLAIN) == 0);
inq = odp_pktio_inq_getdef(pktio);
CU_ASSERT(inq != ODP_QUEUE_INVALID);
CU_ASSERT(odp_pktio_inq_remdef(pktio) == 0);
@@ -684,7 +831,7 @@ void pktio_test_open(void)
/* test the sequence open->close->open->close() */
for (i = 0; i < 2; ++i) {
pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_SEND);
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
CU_ASSERT(odp_pktio_close(pktio) == 0);
}
@@ -725,8 +872,8 @@ static void pktio_test_print(void)
int i;
for (i = 0; i < num_ifaces; ++i) {
- pktio = create_pktio(i, ODP_PKTIN_MODE_POLL,
- ODP_PKTOUT_MODE_SEND);
+ pktio = create_pktio(i, ODP_PKTIN_MODE_QUEUE,
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
/* Print pktio debug info and test that the
@@ -737,19 +884,336 @@ static void pktio_test_print(void)
}
}
+void pktio_test_pktin_queue_config_direct(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t queue_param;
+ odp_pktin_queue_t pktin_queues[MAX_QUEUES];
+ odp_queue_t in_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT(odp_pktio_capability(ODP_PKTIO_INVALID, &capa) < 0);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = capa.max_input_queues;
+
+ odp_pktin_queue_param_init(&queue_param);
+
+ queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ queue_param.hash_proto.proto.ipv4_udp = 1;
+ queue_param.num_queues = num_queues;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES)
+ == num_queues);
+ CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES) < 0);
+
+ queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ queue_param.num_queues = 1;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_queue_config(ODP_PKTIO_INVALID, &queue_param) < 0);
+
+ queue_param.num_queues = capa.max_input_queues + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+}
+
+void pktio_test_pktin_queue_config_sched(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t queue_param;
+ odp_pktin_queue_t pktin_queues[MAX_QUEUES];
+ odp_queue_t in_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = capa.max_input_queues;
+
+ odp_pktin_queue_param_init(&queue_param);
+
+ queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ queue_param.hash_proto.proto.ipv4_udp = 1;
+ queue_param.num_queues = num_queues;
+ queue_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES)
+ == num_queues);
+ CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES) < 0);
+
+ queue_param.num_queues = 1;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ queue_param.num_queues = capa.max_input_queues + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+}
+
+void pktio_test_pktin_queue_config_queue(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t queue_param;
+ odp_pktin_queue_t pktin_queues[MAX_QUEUES];
+ odp_queue_t in_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_QUEUE, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = capa.max_input_queues;
+
+ odp_pktin_queue_param_init(&queue_param);
+
+ queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ queue_param.hash_proto.proto.ipv4_udp = 1;
+ queue_param.num_queues = num_queues;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES)
+ == num_queues);
+ CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES) < 0);
+
+ queue_param.num_queues = 1;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ queue_param.num_queues = capa.max_input_queues + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+}
+
+void pktio_test_pktout_queue_config(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktout_queue_param_t queue_param;
+ odp_pktout_queue_t pktout_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_output_queues > 0);
+ num_queues = capa.max_output_queues;
+
+ odp_pktout_queue_param_init(&queue_param);
+
+ queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ queue_param.num_queues = num_queues;
+ CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktout_queue(pktio, pktout_queues, MAX_QUEUES)
+ == num_queues);
+
+ queue_param.op_mode = ODP_PKTIO_OP_MT;
+ queue_param.num_queues = 1;
+ CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktout_queue_config(ODP_PKTIO_INVALID, &queue_param) < 0);
+
+ queue_param.num_queues = capa.max_output_queues + 1;
+ CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+}
+
void pktio_test_inq(void)
{
odp_pktio_t pktio;
- pktio = create_pktio(0, ODP_PKTIN_MODE_POLL,
- ODP_PKTOUT_MODE_SEND);
+ pktio = create_pktio(0, ODP_PKTIN_MODE_QUEUE,
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- CU_ASSERT(create_inq(pktio, ODP_QUEUE_TYPE_POLL) == 0);
+ CU_ASSERT(create_inq(pktio, ODP_QUEUE_TYPE_PLAIN) == 0);
CU_ASSERT(destroy_inq(pktio) == 0);
CU_ASSERT(odp_pktio_close(pktio) == 0);
}
+#ifdef DEBUG_STATS
+static void _print_pktio_stats(odp_pktio_stats_t *s, const char *name)
+{
+ fprintf(stderr, "\n%s:\n"
+ " in_octets %" PRIu64 "\n"
+ " in_ucast_pkts %" PRIu64 "\n"
+ " in_discards %" PRIu64 "\n"
+ " in_errors %" PRIu64 "\n"
+ " in_unknown_protos %" PRIu64 "\n"
+ " out_octets %" PRIu64 "\n"
+ " out_ucast_pkts %" PRIu64 "\n"
+ " out_discards %" PRIu64 "\n"
+ " out_errors %" PRIu64 "\n",
+ name,
+ s->in_octets,
+ s->in_ucast_pkts,
+ s->in_discards,
+ s->in_errors,
+ s->in_unknown_protos,
+ s->out_octets,
+ s->out_ucast_pkts,
+ s->out_discards,
+ s->out_errors);
+}
+#endif
+
+/* some pktio like netmap support various methods to
+ * get statistics counters. ethtool strings are not standardised
+ * and sysfs may not be supported. skip pktio_stats test until
+ * we will solve that.*/
+int pktio_check_statistics_counters(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_stats_t stats;
+ int ret;
+ odp_pktio_param_t pktio_param;
+ const char *iface = iface_name[0];
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface, pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_stats(pktio, &stats);
+ (void)odp_pktio_close(pktio);
+
+ if (ret == 0)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+void pktio_test_statistics_counters(void)
+{
+ odp_pktio_t pktio[MAX_NUM_IFACES];
+ odp_packet_t pkt;
+ odp_event_t tx_ev[1000];
+ odp_event_t ev;
+ int i, pkts, ret, alloc = 0;
+ odp_queue_t outq;
+ uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+ odp_pktio_stats_t stats[2];
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ create_inq(pktio[i], ODP_QUEUE_TYPE_SCHED);
+ }
+
+ outq = odp_pktio_outq_getdef(pktio[0]);
+
+ ret = odp_pktio_start(pktio[0]);
+ CU_ASSERT(ret == 0);
+ if (num_ifaces > 1) {
+ ret = odp_pktio_start(pktio[1]);
+ CU_ASSERT(ret == 0);
+ }
+
+ /* flush packets with magic number in pipes */
+ for (i = 0; i < 1000; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ }
+
+ /* alloc */
+ for (alloc = 0; alloc < 1000; alloc++) {
+ pkt = odp_packet_alloc(default_pkt_pool, packet_len);
+ if (pkt == ODP_PACKET_INVALID)
+ break;
+ pktio_init_packet(pkt);
+ tx_ev[alloc] = odp_packet_to_event(pkt);
+ }
+
+ ret = odp_pktio_stats_reset(pktio[0]);
+ CU_ASSERT(ret == 0);
+ if (num_ifaces > 1) {
+ ret = odp_pktio_stats_reset(pktio[1]);
+ CU_ASSERT(ret == 0);
+ }
+
+ /* send */
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_queue_enq_multi(outq, &tx_ev[pkts], alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to enqueue packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+
+ /* get */
+ for (i = 0, pkts = 0; i < 1000; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ }
+
+ ret = odp_pktio_stats(pktio[0], &stats[0]);
+ CU_ASSERT(ret == 0);
+
+ if (num_ifaces > 1) {
+ ret = odp_pktio_stats(pktio[1], &stats[1]);
+ CU_ASSERT(ret == 0);
+ CU_ASSERT((stats[1].in_ucast_pkts == 0) ||
+ (stats[1].in_ucast_pkts >= (uint64_t)pkts));
+ CU_ASSERT(stats[0].out_ucast_pkts == stats[1].in_ucast_pkts);
+ CU_ASSERT(stats[0].out_octets == stats[1].in_octets);
+ CU_ASSERT((stats[0].out_octets == 0) ||
+ (stats[0].out_octets >=
+ (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ } else {
+ CU_ASSERT((stats[0].in_ucast_pkts == 0) ||
+ (stats[0].in_ucast_pkts == (uint64_t)pkts));
+ CU_ASSERT((stats[0].in_octets == 0) ||
+ (stats[0].in_octets ==
+ (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ }
+
+ CU_ASSERT(pkts == alloc);
+ CU_ASSERT(0 == stats[0].in_discards);
+ CU_ASSERT(0 == stats[0].in_errors);
+ CU_ASSERT(0 == stats[0].in_unknown_protos);
+ CU_ASSERT(0 == stats[0].out_discards);
+ CU_ASSERT(0 == stats[0].out_errors);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+#ifdef DEBUG_STATS
+ _print_pktio_stats(&stats[i], iface_name[i]);
+#endif
+ destroy_inq(pktio[i]);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
void pktio_test_start_stop(void)
{
odp_pktio_t pktio[MAX_NUM_IFACES];
@@ -762,7 +1226,7 @@ void pktio_test_start_stop(void)
for (i = 0; i < num_ifaces; i++) {
pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_SEND);
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
create_inq(pktio[i], ODP_QUEUE_TYPE_SCHED);
}
@@ -781,6 +1245,8 @@ void pktio_test_start_stop(void)
ret = odp_pktio_start(pktio[0]);
CU_ASSERT(ret < 0);
+ _pktio_wait_linkup(pktio[0]);
+
/* Test Rx on a stopped interface. Only works if there are 2 */
if (num_ifaces > 1) {
for (alloc = 0; alloc < 1000; alloc++) {
@@ -828,6 +1294,8 @@ void pktio_test_start_stop(void)
ret = odp_pktio_start(pktio[1]);
CU_ASSERT(ret == 0);
+ _pktio_wait_linkup(pktio[1]);
+
/* flush packets with magic number in pipes */
for (i = 0; i < 1000; i++) {
ev = odp_schedule(NULL, wait);
@@ -899,12 +1367,12 @@ int pktio_check_send_failure(void)
memset(&pktio_param, 0, sizeof(pktio_param));
- pktio_param.in_mode = ODP_PKTIN_MODE_RECV;
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
pktio_tx = odp_pktio_open(iface, pool[iface_idx], &pktio_param);
if (pktio_tx == ODP_PKTIO_INVALID) {
fprintf(stderr, "%s: failed to open pktio\n", __func__);
- return 0;
+ return ODP_TEST_INACTIVE;
}
/* read the MTU from the transmit interface */
@@ -912,7 +1380,10 @@ int pktio_check_send_failure(void)
odp_pktio_close(pktio_tx);
- return (mtu <= ODP_CONFIG_PACKET_BUF_LEN_MAX - 32);
+ if (mtu <= ODP_CONFIG_PACKET_BUF_LEN_MAX - 32)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
}
void pktio_test_send_failure(void)
@@ -926,8 +1397,8 @@ void pktio_test_send_failure(void)
int long_pkt_idx = TX_BATCH_LEN / 2;
pktio_info_t info_rx;
- pktio_tx = create_pktio(0, ODP_PKTIN_MODE_RECV,
- ODP_PKTOUT_MODE_SEND);
+ pktio_tx = create_pktio(0, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
if (pktio_tx == ODP_PKTIO_INVALID) {
CU_FAIL("failed to open pktio");
return;
@@ -939,6 +1410,8 @@ void pktio_test_send_failure(void)
ret = odp_pktio_start(pktio_tx);
CU_ASSERT_FATAL(ret == 0);
+ _pktio_wait_linkup(pktio_tx);
+
/* configure the pool so that we can generate test packets larger
* than the interface MTU */
memset(&pool_params, 0, sizeof(pool_params));
@@ -950,10 +1423,12 @@ void pktio_test_send_failure(void)
CU_ASSERT_FATAL(pkt_pool != ODP_POOL_INVALID);
if (num_ifaces > 1) {
- pktio_rx = create_pktio(1, ODP_PKTIN_MODE_RECV,
- ODP_PKTOUT_MODE_SEND);
+ pktio_rx = create_pktio(1, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
ret = odp_pktio_start(pktio_rx);
CU_ASSERT_FATAL(ret == 0);
+
+ _pktio_wait_linkup(pktio_rx);
} else {
pktio_rx = pktio_tx;
}
@@ -998,7 +1473,7 @@ void pktio_test_send_failure(void)
info_rx.id = pktio_rx;
info_rx.outq = ODP_QUEUE_INVALID;
info_rx.inq = ODP_QUEUE_INVALID;
- info_rx.in_mode = ODP_PKTIN_MODE_RECV;
+ info_rx.in_mode = ODP_PKTIN_MODE_DIRECT;
i = wait_for_packets(&info_rx, pkt_tbl, pkt_seq, ret,
TXRX_MODE_MULTI, ODP_TIME_SEC_IN_NS);
@@ -1057,7 +1532,7 @@ void pktio_test_recv_on_wonly(void)
int ret;
pktio = create_pktio(0, ODP_PKTIN_MODE_DISABLED,
- ODP_PKTOUT_MODE_SEND);
+ ODP_PKTOUT_MODE_DIRECT);
if (pktio == ODP_PKTIO_INVALID) {
CU_FAIL("failed to open pktio");
@@ -1067,6 +1542,8 @@ void pktio_test_recv_on_wonly(void)
ret = odp_pktio_start(pktio);
CU_ASSERT_FATAL(ret == 0);
+ _pktio_wait_linkup(pktio);
+
ret = odp_pktio_recv(pktio, &pkt, 1);
CU_ASSERT(ret < 0);
@@ -1086,7 +1563,7 @@ void pktio_test_send_on_ronly(void)
odp_packet_t pkt;
int ret;
- pktio = create_pktio(0, ODP_PKTIN_MODE_RECV,
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT,
ODP_PKTOUT_MODE_DISABLED);
if (pktio == ODP_PKTIO_INVALID) {
@@ -1097,6 +1574,8 @@ void pktio_test_send_on_ronly(void)
ret = odp_pktio_start(pktio);
CU_ASSERT_FATAL(ret == 0);
+ _pktio_wait_linkup(pktio);
+
pkt = odp_packet_alloc(default_pkt_pool, packet_len);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID)
@@ -1221,13 +1700,18 @@ odp_testinfo_t pktio_suite_unsegmented[] = {
ODP_TEST_INFO(pktio_test_open),
ODP_TEST_INFO(pktio_test_lookup),
ODP_TEST_INFO(pktio_test_print),
+ ODP_TEST_INFO(pktio_test_pktin_queue_config_direct),
+ ODP_TEST_INFO(pktio_test_pktin_queue_config_sched),
+ ODP_TEST_INFO(pktio_test_pktin_queue_config_queue),
+ ODP_TEST_INFO(pktio_test_pktout_queue_config),
ODP_TEST_INFO(pktio_test_inq),
- ODP_TEST_INFO(pktio_test_poll_queue),
- ODP_TEST_INFO(pktio_test_poll_multi),
+ ODP_TEST_INFO(pktio_test_plain_queue),
+ ODP_TEST_INFO(pktio_test_plain_multi),
ODP_TEST_INFO(pktio_test_sched_queue),
ODP_TEST_INFO(pktio_test_sched_multi),
ODP_TEST_INFO(pktio_test_recv),
ODP_TEST_INFO(pktio_test_recv_multi),
+ ODP_TEST_INFO(pktio_test_recv_queue),
ODP_TEST_INFO(pktio_test_jumbo),
ODP_TEST_INFO_CONDITIONAL(pktio_test_send_failure,
pktio_check_send_failure),
@@ -1238,12 +1722,14 @@ odp_testinfo_t pktio_suite_unsegmented[] = {
ODP_TEST_INFO(pktio_test_start_stop),
ODP_TEST_INFO(pktio_test_recv_on_wonly),
ODP_TEST_INFO(pktio_test_send_on_ronly),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_statistics_counters,
+ pktio_check_statistics_counters),
ODP_TEST_INFO_NULL
};
odp_testinfo_t pktio_suite_segmented[] = {
- ODP_TEST_INFO(pktio_test_poll_queue),
- ODP_TEST_INFO(pktio_test_poll_multi),
+ ODP_TEST_INFO(pktio_test_plain_queue),
+ ODP_TEST_INFO(pktio_test_plain_multi),
ODP_TEST_INFO(pktio_test_sched_queue),
ODP_TEST_INFO(pktio_test_sched_multi),
ODP_TEST_INFO(pktio_test_recv),
diff --git a/test/validation/pktio/pktio.h b/test/validation/pktio/pktio.h
index 58fdbca5b..22fd814d6 100644
--- a/test/validation/pktio/pktio.h
+++ b/test/validation/pktio/pktio.h
@@ -10,12 +10,13 @@
#include <odp_cunit_common.h>
/* test functions: */
-void pktio_test_poll_queue(void);
-void pktio_test_poll_multi(void);
+void pktio_test_plain_queue(void);
+void pktio_test_plain_multi(void);
void pktio_test_sched_queue(void);
void pktio_test_sched_multi(void);
void pktio_test_recv(void);
void pktio_test_recv_multi(void);
+void pktio_test_recv_queue(void);
void pktio_test_jumbo(void);
void pktio_test_mtu(void);
void pktio_test_promisc(void);
@@ -24,11 +25,17 @@ void pktio_test_inq_remdef(void);
void pktio_test_open(void);
void pktio_test_lookup(void);
void pktio_test_inq(void);
+void pktio_test_pktin_queue_config_direct(void);
+void pktio_test_pktin_queue_config_sched(void);
+void pktio_test_pktin_queue_config_queue(void);
+void pktio_test_pktout_queue_config(void);
void pktio_test_start_stop(void);
int pktio_check_send_failure(void);
void pktio_test_send_failure(void);
void pktio_test_recv_on_wonly(void);
void pktio_test_send_on_ronly(void);
+int pktio_check_statistics_counters(void);
+void pktio_test_statistics_counters(void);
/* test arrays: */
extern odp_testinfo_t pktio_suite[];
diff --git a/test/validation/queue/queue.c b/test/validation/queue/queue.c
index 3c1c64ab1..7c55eb335 100644
--- a/test/validation/queue/queue.c
+++ b/test/validation/queue/queue.c
@@ -54,15 +54,17 @@ void queue_test_sunnydays(void)
odp_queue_param_t qparams;
odp_queue_param_init(&qparams);
+ qparams.type = ODP_QUEUE_TYPE_SCHED;
qparams.sched.prio = ODP_SCHED_PRIO_LOWEST;
- qparams.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparams.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparams.sched.group = ODP_SCHED_GROUP_WORKER;
- queue_creat_id = odp_queue_create("test_queue",
- ODP_QUEUE_TYPE_POLL, &qparams);
+ queue_creat_id = odp_queue_create("test_queue", &qparams);
CU_ASSERT(ODP_QUEUE_INVALID != queue_creat_id);
+ CU_ASSERT(odp_queue_to_u64(queue_creat_id) !=
+ odp_queue_to_u64(ODP_QUEUE_INVALID));
- CU_ASSERT_EQUAL(ODP_QUEUE_TYPE_POLL,
+ CU_ASSERT_EQUAL(ODP_QUEUE_TYPE_SCHED,
odp_queue_type(queue_creat_id));
queue_id = odp_queue_lookup("test_queue");
@@ -71,7 +73,8 @@ void queue_test_sunnydays(void)
CU_ASSERT_EQUAL(ODP_SCHED_GROUP_WORKER,
odp_queue_sched_group(queue_id));
CU_ASSERT_EQUAL(ODP_SCHED_PRIO_LOWEST, odp_queue_sched_prio(queue_id));
- CU_ASSERT_EQUAL(ODP_SCHED_SYNC_NONE, odp_queue_sched_type(queue_id));
+ CU_ASSERT_EQUAL(ODP_SCHED_SYNC_PARALLEL,
+ odp_queue_sched_type(queue_id));
CU_ASSERT(0 == odp_queue_context_set(queue_id, &queue_contest));
@@ -127,46 +130,47 @@ void queue_test_sunnydays(void)
void queue_test_info(void)
{
- odp_queue_t q_poll, q_order;
- const char *const nq_poll = "test_q_poll";
+ odp_queue_t q_plain, q_order;
+ const char *const nq_plain = "test_q_plain";
const char *const nq_order = "test_q_order";
odp_queue_info_t info;
odp_queue_param_t param;
- char q_poll_ctx[] = "test_q_poll context data";
+ char q_plain_ctx[] = "test_q_plain context data";
char q_order_ctx[] = "test_q_order context data";
unsigned lock_count;
char *ctx;
int ret;
- /* Create a polled queue and set context */
- q_poll = odp_queue_create(nq_poll, ODP_QUEUE_TYPE_POLL, NULL);
- CU_ASSERT(ODP_QUEUE_INVALID != q_poll);
- CU_ASSERT(odp_queue_context_set(q_poll, q_poll_ctx) == 0);
+ /* Create a plain queue and set context */
+ q_plain = odp_queue_create(nq_plain, NULL);
+ CU_ASSERT(ODP_QUEUE_INVALID != q_plain);
+ CU_ASSERT(odp_queue_context_set(q_plain, q_plain_ctx) == 0);
/* Create a scheduled ordered queue with explicitly set params */
odp_queue_param_init(&param);
+ param.type = ODP_QUEUE_TYPE_SCHED;
param.sched.prio = ODP_SCHED_PRIO_NORMAL;
param.sched.sync = ODP_SCHED_SYNC_ORDERED;
param.sched.group = ODP_SCHED_GROUP_ALL;
param.sched.lock_count = 1;
param.context = q_order_ctx;
- q_order = odp_queue_create(nq_order, ODP_QUEUE_TYPE_SCHED, &param);
+ q_order = odp_queue_create(nq_order, &param);
CU_ASSERT(ODP_QUEUE_INVALID != q_order);
- /* Check info for the polled queue */
- CU_ASSERT(odp_queue_info(q_poll, &info) == 0);
- CU_ASSERT(strcmp(nq_poll, info.name) == 0);
- CU_ASSERT(info.type == ODP_QUEUE_TYPE_POLL);
- CU_ASSERT(info.type == odp_queue_type(q_poll));
+ /* Check info for the plain queue */
+ CU_ASSERT(odp_queue_info(q_plain, &info) == 0);
+ CU_ASSERT(strcmp(nq_plain, info.name) == 0);
+ CU_ASSERT(info.param.type == ODP_QUEUE_TYPE_PLAIN);
+ CU_ASSERT(info.param.type == odp_queue_type(q_plain));
ctx = info.param.context; /* 'char' context ptr */
- CU_ASSERT(ctx == q_poll_ctx);
- CU_ASSERT(info.param.context == odp_queue_context(q_poll));
+ CU_ASSERT(ctx == q_plain_ctx);
+ CU_ASSERT(info.param.context == odp_queue_context(q_plain));
/* Check info for the scheduled ordered queue */
CU_ASSERT(odp_queue_info(q_order, &info) == 0);
CU_ASSERT(strcmp(nq_order, info.name) == 0);
- CU_ASSERT(info.type == ODP_QUEUE_TYPE_SCHED);
- CU_ASSERT(info.type == odp_queue_type(q_order));
+ CU_ASSERT(info.param.type == ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT(info.param.type == odp_queue_type(q_order));
ctx = info.param.context; /* 'char' context ptr */
CU_ASSERT(ctx == q_order_ctx);
CU_ASSERT(info.param.context == odp_queue_context(q_order));
@@ -178,7 +182,7 @@ void queue_test_info(void)
lock_count = (unsigned) ret;
CU_ASSERT(info.param.sched.lock_count == lock_count);
- CU_ASSERT(odp_queue_destroy(q_poll) == 0);
+ CU_ASSERT(odp_queue_destroy(q_plain) == 0);
CU_ASSERT(odp_queue_destroy(q_order) == 0);
}
diff --git a/test/validation/scheduler/scheduler.c b/test/validation/scheduler/scheduler.c
index ff95b4b27..dcf01c073 100644
--- a/test/validation/scheduler/scheduler.c
+++ b/test/validation/scheduler/scheduler.c
@@ -129,7 +129,11 @@ void scheduler_test_wait_time(void)
/* check ODP_SCHED_NO_WAIT */
odp_queue_param_init(&qp);
- queue = odp_queue_create("dummy_queue", ODP_QUEUE_TYPE_SCHED, &qp);
+ qp.type = ODP_QUEUE_TYPE_SCHED;
+ qp.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qp.sched.prio = ODP_SCHED_PRIO_NORMAL;
+ qp.sched.group = ODP_SCHED_GROUP_ALL;
+ queue = odp_queue_create("dummy_queue", &qp);
CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
wait_time = odp_schedule_wait_time(ODP_TIME_SEC_IN_NS);
@@ -184,7 +188,7 @@ void scheduler_test_queue_destroy(void)
odp_event_t ev;
uint32_t *u32;
int i;
- odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
ODP_SCHED_SYNC_ATOMIC,
ODP_SCHED_SYNC_ORDERED};
@@ -200,11 +204,12 @@ void scheduler_test_queue_destroy(void)
CU_ASSERT_FATAL(p != ODP_POOL_INVALID);
for (i = 0; i < 3; i++) {
+ qp.type = ODP_QUEUE_TYPE_SCHED;
qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qp.sched.sync = sync[i];
+ qp.sched.group = ODP_SCHED_GROUP_ALL;
- queue = odp_queue_create("sched_destroy_queue",
- ODP_QUEUE_TYPE_SCHED, &qp);
+ queue = odp_queue_create("sched_destroy_queue", &qp);
CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
@@ -249,7 +254,7 @@ void scheduler_test_groups(void)
odp_event_t ev;
uint32_t *u32;
int i, j, rc;
- odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
ODP_SCHED_SYNC_ATOMIC,
ODP_SCHED_SYNC_ORDERED};
int thr_id = odp_thread_id();
@@ -337,13 +342,13 @@ void scheduler_test_groups(void)
CU_ASSERT_FATAL(p != ODP_POOL_INVALID);
for (i = 0; i < 3; i++) {
+ qp.type = ODP_QUEUE_TYPE_SCHED;
qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qp.sched.sync = sync[i];
qp.sched.group = mygrp1;
/* Create and populate a group in group 1 */
- queue_grp1 = odp_queue_create("sched_group_test_queue_1",
- ODP_QUEUE_TYPE_SCHED, &qp);
+ queue_grp1 = odp_queue_create("sched_group_test_queue_1", &qp);
CU_ASSERT_FATAL(queue_grp1 != ODP_QUEUE_INVALID);
CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp1) == mygrp1);
@@ -360,8 +365,7 @@ void scheduler_test_groups(void)
/* Now create and populate a queue in group 2 */
qp.sched.group = mygrp2;
- queue_grp2 = odp_queue_create("sched_group_test_queue_2",
- ODP_QUEUE_TYPE_SCHED, &qp);
+ queue_grp2 = odp_queue_create("sched_group_test_queue_2", &qp);
CU_ASSERT_FATAL(queue_grp2 != ODP_QUEUE_INVALID);
CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp2) == mygrp2);
@@ -533,7 +537,7 @@ static void chaos_run(unsigned int qtype)
odp_queue_t from;
int i, rc;
uint64_t wait;
- odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
ODP_SCHED_SYNC_ATOMIC,
ODP_SCHED_SYNC_ORDERED};
const unsigned num_sync = (sizeof(sync) / sizeof(sync[0]));
@@ -562,7 +566,9 @@ static void chaos_run(unsigned int qtype)
pool = odp_pool_create("sched_chaos_pool", &params);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
- qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qp.type = ODP_QUEUE_TYPE_SCHED;
+ qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qp.sched.group = ODP_SCHED_GROUP_ALL;
for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
uint32_t ndx = qtype == num_sync ? i % num_sync : qtype;
@@ -574,9 +580,7 @@ static void chaos_run(unsigned int qtype)
qtypes[ndx]);
globals->chaos_q[i].handle =
- odp_queue_create(globals->chaos_q[i].name,
- ODP_QUEUE_TYPE_SCHED,
- &qp);
+ odp_queue_create(globals->chaos_q[i].name, &qp);
CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
ODP_QUEUE_INVALID);
rc = odp_queue_context_set(globals->chaos_q[i].handle,
@@ -842,7 +846,7 @@ static void *schedule_common_(void *arg)
for (i = 0; i < args->num_prio; i++) {
for (j = 0; j < args->num_queues; j++) {
snprintf(name, sizeof(name),
- "poll_%d_%d_o", i, j);
+ "plain_%d_%d_o", i, j);
pq = odp_queue_lookup(name);
CU_ASSERT_FATAL(pq != ODP_QUEUE_INVALID);
@@ -898,7 +902,7 @@ static void fill_queues(thread_args_t *args)
odp_queue_t queue;
switch (sync) {
- case ODP_SCHED_SYNC_NONE:
+ case ODP_SCHED_SYNC_PARALLEL:
snprintf(name, sizeof(name),
"sched_%d_%d_n", i, j);
break;
@@ -1047,10 +1051,10 @@ static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
reset_queues(args);
}
-/* 1 queue 1 thread ODP_SCHED_SYNC_NONE */
+/* 1 queue 1 thread ODP_SCHED_SYNC_PARALLEL */
void scheduler_test_1q_1t_n(void)
{
- schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_ONE);
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, ONE_Q, ONE_PRIO, SCHD_ONE);
}
/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
@@ -1065,12 +1069,12 @@ void scheduler_test_1q_1t_o(void)
schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_ONE);
}
-/* Many queues 1 thread ODP_SCHED_SYNC_NONE */
+/* Many queues 1 thread ODP_SCHED_SYNC_PARALLEL */
void scheduler_test_mq_1t_n(void)
{
/* Only one priority involved in these tests, but use
the same number of queues the more general case uses */
- schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO, SCHD_ONE);
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, ONE_PRIO, SCHD_ONE);
}
/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
@@ -1085,12 +1089,12 @@ void scheduler_test_mq_1t_o(void)
schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO, SCHD_ONE);
}
-/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE */
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_PARALLEL */
void scheduler_test_mq_1t_prio_n(void)
{
int prio = odp_schedule_num_prio();
- schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE);
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_ONE);
}
/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
@@ -1109,12 +1113,12 @@ void scheduler_test_mq_1t_prio_o(void)
schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE);
}
-/* Many queues many threads check priority ODP_SCHED_SYNC_NONE */
+/* Many queues many threads check priority ODP_SCHED_SYNC_PARALLEL */
void scheduler_test_mq_mt_prio_n(void)
{
int prio = odp_schedule_num_prio();
- parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE,
+ parallel_execute(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_ONE,
DISABLE_EXCL_ATOMIC);
}
@@ -1143,10 +1147,10 @@ void scheduler_test_1q_mt_a_excl(void)
ENABLE_EXCL_ATOMIC);
}
-/* 1 queue 1 thread ODP_SCHED_SYNC_NONE multi */
+/* 1 queue 1 thread ODP_SCHED_SYNC_PARALLEL multi */
void scheduler_test_multi_1q_1t_n(void)
{
- schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_MULTI);
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, ONE_Q, ONE_PRIO, SCHD_MULTI);
}
/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
@@ -1161,12 +1165,12 @@ void scheduler_test_multi_1q_1t_o(void)
schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_MULTI);
}
-/* Many queues 1 thread ODP_SCHED_SYNC_NONE multi */
+/* Many queues 1 thread ODP_SCHED_SYNC_PARALLEL multi */
void scheduler_test_multi_mq_1t_n(void)
{
/* Only one priority involved in these tests, but use
the same number of queues the more general case uses */
- schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO, SCHD_MULTI);
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, ONE_PRIO, SCHD_MULTI);
}
/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
@@ -1181,12 +1185,12 @@ void scheduler_test_multi_mq_1t_o(void)
schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO, SCHD_MULTI);
}
-/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE multi */
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_PARALLEL multi */
void scheduler_test_multi_mq_1t_prio_n(void)
{
int prio = odp_schedule_num_prio();
- schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI);
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_MULTI);
}
/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
@@ -1205,12 +1209,12 @@ void scheduler_test_multi_mq_1t_prio_o(void)
schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_MULTI);
}
-/* Many queues many threads check priority ODP_SCHED_SYNC_NONE multi */
+/* Many queues many threads check priority ODP_SCHED_SYNC_PARALLEL multi */
void scheduler_test_multi_mq_mt_prio_n(void)
{
int prio = odp_schedule_num_prio();
- parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI, 0);
+ parallel_execute(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_MULTI, 0);
}
/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi */
@@ -1318,6 +1322,7 @@ static int create_queues(void)
for (i = 0; i < prios; i++) {
odp_queue_param_t p;
odp_queue_param_init(&p);
+ p.type = ODP_QUEUE_TYPE_SCHED;
p.sched.prio = i;
for (j = 0; j < QUEUES_PER_PRIO; j++) {
@@ -1326,8 +1331,8 @@ static int create_queues(void)
odp_queue_t q, pq;
snprintf(name, sizeof(name), "sched_%d_%d_n", i, j);
- p.sched.sync = ODP_SCHED_SYNC_NONE;
- q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+ p.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ q = odp_queue_create(name, &p);
if (q == ODP_QUEUE_INVALID) {
printf("Schedule queue create failed.\n");
@@ -1336,24 +1341,24 @@ static int create_queues(void)
snprintf(name, sizeof(name), "sched_%d_%d_a", i, j);
p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
- q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+ q = odp_queue_create(name, &p);
if (q == ODP_QUEUE_INVALID) {
printf("Schedule queue create failed.\n");
return -1;
}
- snprintf(name, sizeof(name), "poll_%d_%d_o", i, j);
- pq = odp_queue_create(name, ODP_QUEUE_TYPE_POLL, NULL);
+ snprintf(name, sizeof(name), "plain_%d_%d_o", i, j);
+ pq = odp_queue_create(name, NULL);
if (pq == ODP_QUEUE_INVALID) {
- printf("Poll queue create failed.\n");
+ printf("Plain queue create failed.\n");
return -1;
}
queue_ctx_buf = odp_buffer_alloc(queue_ctx_pool);
if (queue_ctx_buf == ODP_BUFFER_INVALID) {
- printf("Cannot allocate poll queue ctx buf\n");
+ printf("Cannot allocate plain queue ctx buf\n");
return -1;
}
@@ -1364,7 +1369,7 @@ static int create_queues(void)
rc = odp_queue_context_set(pq, pqctx);
if (rc != 0) {
- printf("Cannot set poll queue context\n");
+ printf("Cannot set plain queue context\n");
return -1;
}
@@ -1372,7 +1377,7 @@ static int create_queues(void)
p.sched.sync = ODP_SCHED_SYNC_ORDERED;
p.sched.lock_count =
ODP_CONFIG_MAX_ORDERED_LOCKS_PER_QUEUE;
- q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+ q = odp_queue_create(name, &p);
if (q == ODP_QUEUE_INVALID) {
printf("Schedule queue create failed.\n");
@@ -1516,7 +1521,7 @@ static int destroy_queues(void)
if (destroy_queue(name) != 0)
return -1;
- snprintf(name, sizeof(name), "poll_%d_%d_o", i, j);
+ snprintf(name, sizeof(name), "plain_%d_%d_o", i, j);
if (destroy_queue(name) != 0)
return -1;
}
diff --git a/test/validation/std_clib/std_clib.c b/test/validation/std_clib/std_clib.c
index e53ad3946..e69bc3901 100644
--- a/test/validation/std_clib/std_clib.c
+++ b/test/validation/std_clib/std_clib.c
@@ -44,9 +44,47 @@ static void std_clib_test_memset(void)
CU_ASSERT(ret == 0);
}
+static void std_clib_test_memcmp(void)
+{
+ uint8_t data[] = {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ uint8_t equal[] = {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ uint8_t greater_11[] = {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 99, 12, 13, 14, 15, 16};
+ uint8_t less_6[] = {1, 2, 3, 4, 5, 2, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ size_t i;
+
+ CU_ASSERT(odp_memcmp(data, equal, 0) == 0);
+ CU_ASSERT(odp_memcmp(data, equal, sizeof(data)) == 0);
+ CU_ASSERT(odp_memcmp(data, equal, sizeof(data) - 3) == 0);
+
+ CU_ASSERT(odp_memcmp(greater_11, data, sizeof(data)) > 0);
+ CU_ASSERT(odp_memcmp(greater_11, data, 11) > 0);
+ CU_ASSERT(odp_memcmp(greater_11, data, 10) == 0);
+
+ CU_ASSERT(odp_memcmp(less_6, data, sizeof(data)) < 0);
+ CU_ASSERT(odp_memcmp(less_6, data, 6) < 0);
+ CU_ASSERT(odp_memcmp(less_6, data, 5) == 0);
+
+ for (i = 0; i < sizeof(data); i++) {
+ uint8_t tmp;
+
+ CU_ASSERT(odp_memcmp(data, equal, i + 1) == 0);
+ tmp = equal[i];
+ equal[i] = 88;
+ CU_ASSERT(odp_memcmp(data, equal, i + 1) < 0);
+ equal[i] = 0;
+ CU_ASSERT(odp_memcmp(data, equal, i + 1) > 0);
+ equal[i] = tmp;
+ }
+}
+
odp_testinfo_t std_clib_suite[] = {
ODP_TEST_INFO(std_clib_test_memcpy),
ODP_TEST_INFO(std_clib_test_memset),
+ ODP_TEST_INFO(std_clib_test_memcmp),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/synchronizers/.gitignore b/test/validation/synchronizers/.gitignore
deleted file mode 100644
index 6aad9dfbd..000000000
--- a/test/validation/synchronizers/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-synchronizers_main
diff --git a/test/validation/synchronizers/Makefile.am b/test/validation/synchronizers/Makefile.am
deleted file mode 100644
index dd504d560..000000000
--- a/test/validation/synchronizers/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestsynchronizers.la
-libtestsynchronizers_la_SOURCES = synchronizers.c
-
-test_PROGRAMS = synchronizers_main$(EXEEXT)
-dist_synchronizers_main_SOURCES = synchronizers_main.c
-synchronizers_main_LDADD = libtestsynchronizers.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = synchronizers.h
diff --git a/test/validation/synchronizers/synchronizers.h b/test/validation/synchronizers/synchronizers.h
deleted file mode 100644
index ad8db0b2e..000000000
--- a/test/validation/synchronizers/synchronizers.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_SYNCHRONIZERS_H_
-#define _ODP_TEST_SYNCHRONIZERS_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void synchronizers_test_memory_barrier(void);
-void synchronizers_test_no_barrier_functional(void);
-void synchronizers_test_barrier_functional(void);
-void synchronizers_test_no_lock_functional(void);
-void synchronizers_test_spinlock_api(void);
-void synchronizers_test_spinlock_functional(void);
-void synchronizers_test_spinlock_recursive_api(void);
-void synchronizers_test_spinlock_recursive_functional(void);
-void synchronizers_test_ticketlock_api(void);
-void synchronizers_test_ticketlock_functional(void);
-void synchronizers_test_rwlock_api(void);
-void synchronizers_test_rwlock_functional(void);
-void synchronizers_test_rwlock_recursive_api(void);
-void synchronizers_test_rwlock_recursive_functional(void);
-void synchronizers_test_atomic_inc_dec(void);
-void synchronizers_test_atomic_add_sub(void);
-void synchronizers_test_atomic_fetch_inc_dec(void);
-void synchronizers_test_atomic_fetch_add_sub(void);
-
-/* test arrays: */
-extern odp_testinfo_t synchronizers_suite_barrier[];
-extern odp_testinfo_t synchronizers_suite_no_locking[];
-extern odp_testinfo_t synchronizers_suite_spinlock[];
-extern odp_testinfo_t synchronizers_suite_spinlock_recursive[];
-extern odp_testinfo_t synchronizers_suite_ticketlock[];
-extern odp_testinfo_t synchronizers_suite_rwlock[];
-extern odp_testinfo_t synchronizers_suite_rwlock_recursive[];
-extern odp_testinfo_t synchronizers_suite_atomic[];
-
-/* test array init/term functions: */
-int synchronizers_suite_init(void);
-
-/* test registry: */
-extern odp_suiteinfo_t synchronizers_suites[];
-
-/* executable init/term functions: */
-int synchronizers_init(void);
-
-/* main test program: */
-int synchronizers_main(void);
-
-#endif
diff --git a/test/validation/system/system.c b/test/validation/system/system.c
index 7f54338b8..ac34b2478 100644
--- a/test/validation/system/system.c
+++ b/test/validation/system/system.c
@@ -6,6 +6,7 @@
#include <ctype.h>
#include <odp.h>
+#include <odp/cpumask.h>
#include "odp_cunit_common.h"
#include "test_debug.h"
#include "system.h"
@@ -170,15 +171,32 @@ void system_test_odp_sys_cache_line_size(void)
CU_ASSERT(ODP_CACHE_LINE_SIZE == cache_size);
}
-void system_test_odp_sys_cpu_model_str(void)
+void system_test_odp_cpu_model_str(void)
{
char model[128];
- snprintf(model, 128, "%s", odp_sys_cpu_model_str());
+ snprintf(model, 128, "%s", odp_cpu_model_str());
CU_ASSERT(strlen(model) > 0);
CU_ASSERT(strlen(model) < 127);
}
+void system_test_odp_cpu_model_str_id(void)
+{
+ char model[128];
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ snprintf(model, 128, "%s", odp_cpu_model_str_id(cpu));
+ CU_ASSERT(strlen(model) > 0);
+ CU_ASSERT(strlen(model) < 127);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
void system_test_odp_sys_page_size(void)
{
uint64_t page;
@@ -196,22 +214,107 @@ void system_test_odp_sys_huge_page_size(void)
CU_ASSERT(0 < page);
}
-void system_test_odp_sys_cpu_hz(void)
+int system_check_odp_cpu_hz(void)
+{
+ if (odp_cpu_hz() == 0) {
+ fprintf(stderr, "odp_cpu_hz is not supported, skipping\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+void system_test_odp_cpu_hz(void)
+{
+ uint64_t hz = odp_cpu_hz();
+
+ /* Test value sanity: less than 10GHz */
+ CU_ASSERT(hz < 10 * GIGA_HZ);
+
+ /* larger than 1kHz */
+ CU_ASSERT(hz > 1 * KILO_HZ);
+}
+
+int system_check_odp_cpu_hz_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_id(cpu);
+ if (hz == 0) {
+ fprintf(stderr, "cpu %d does not support"
+ " odp_cpu_hz_id(),"
+ "skip that test\n", cpu);
+ return ODP_TEST_INACTIVE;
+ }
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+void system_test_odp_cpu_hz_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_id(cpu);
+ /* Test value sanity: less than 10GHz */
+ CU_ASSERT(hz < 10 * GIGA_HZ);
+ /* larger than 1kHz */
+ CU_ASSERT(hz > 1 * KILO_HZ);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
+void system_test_odp_cpu_hz_max(void)
{
uint64_t hz;
- hz = odp_sys_cpu_hz();
+ hz = odp_cpu_hz_max();
CU_ASSERT(0 < hz);
}
+void system_test_odp_cpu_hz_max_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_max_id(cpu);
+ CU_ASSERT(0 < hz);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
odp_testinfo_t system_suite[] = {
ODP_TEST_INFO(system_test_odp_version_numbers),
ODP_TEST_INFO(system_test_odp_cpu_count),
ODP_TEST_INFO(system_test_odp_sys_cache_line_size),
- ODP_TEST_INFO(system_test_odp_sys_cpu_model_str),
+ ODP_TEST_INFO(system_test_odp_cpu_model_str),
+ ODP_TEST_INFO(system_test_odp_cpu_model_str_id),
ODP_TEST_INFO(system_test_odp_sys_page_size),
ODP_TEST_INFO(system_test_odp_sys_huge_page_size),
- ODP_TEST_INFO(system_test_odp_sys_cpu_hz),
+ ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz,
+ system_check_odp_cpu_hz),
+ ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz_id,
+ system_check_odp_cpu_hz_id),
+ ODP_TEST_INFO(system_test_odp_cpu_hz_max),
+ ODP_TEST_INFO(system_test_odp_cpu_hz_max_id),
ODP_TEST_INFO(system_test_odp_cpu_cycles),
ODP_TEST_INFO(system_test_odp_cpu_cycles_max),
ODP_TEST_INFO(system_test_odp_cpu_cycles_resolution),
diff --git a/test/validation/system/system.h b/test/validation/system/system.h
index 0c263f2c0..cf585a7b7 100644
--- a/test/validation/system/system.h
+++ b/test/validation/system/system.h
@@ -9,14 +9,23 @@
#include <odp_cunit_common.h>
+#define GIGA_HZ 1000000000ULL
+#define KILO_HZ 1000ULL
+
/* test functions: */
void system_test_odp_version_numbers(void);
void system_test_odp_cpu_count(void);
void system_test_odp_sys_cache_line_size(void);
-void system_test_odp_sys_cpu_model_str(void);
+void system_test_odp_cpu_model_str(void);
+void system_test_odp_cpu_model_str_id(void);
void system_test_odp_sys_page_size(void);
void system_test_odp_sys_huge_page_size(void);
-void system_test_odp_sys_cpu_hz(void);
+int system_check_odp_cpu_hz(void);
+void system_test_odp_cpu_hz(void);
+int system_check_odp_cpu_hz_id(void);
+void system_test_odp_cpu_hz_id(void);
+void system_test_odp_cpu_hz_max(void);
+void system_test_odp_cpu_hz_max_id(void);
void system_test_odp_cpu_cycles_max(void);
void system_test_odp_cpu_cycles(void);
void system_test_odp_cpu_cycles_diff(void);
diff --git a/test/validation/timer/timer.c b/test/validation/timer/timer.c
index 0bd67fb15..004670ad9 100644
--- a/test/validation/timer/timer.c
+++ b/test/validation/timer/timer.c
@@ -37,6 +37,10 @@ static odp_timer_pool_t tp;
/** @private Count of timeouts delivered too late */
static odp_atomic_u32_t ndelivtoolate;
+/** @private Sum of all allocated timers from all threads. Thread-local
+ * caches may make this number lower than the capacity of the pool */
+static odp_atomic_u32_t timers_allocated;
+
/** @private min() function */
static int min(int a, int b)
{
@@ -161,7 +165,7 @@ void timer_test_odp_timer_cancel(void)
/* Start all created timer pools */
odp_timer_pool_start();
- queue = odp_queue_create("timer_queue", ODP_QUEUE_TYPE_POLL, NULL);
+ queue = odp_queue_create("timer_queue", NULL);
if (queue == ODP_QUEUE_INVALID)
CU_FAIL_FATAL("Queue create failed");
@@ -274,13 +278,11 @@ static void handle_tmo(odp_event_t ev, bool stale, uint64_t prev_tick)
static void *worker_entrypoint(void *arg TEST_UNUSED)
{
int thr = odp_thread_id();
- uint32_t i;
+ uint32_t i, allocated;
unsigned seed = thr;
int rc;
- odp_queue_t queue = odp_queue_create("timer_queue",
- ODP_QUEUE_TYPE_POLL,
- NULL);
+ odp_queue_t queue = odp_queue_create("timer_queue", NULL);
if (queue == ODP_QUEUE_INVALID)
CU_FAIL_FATAL("Queue create failed");
@@ -290,21 +292,30 @@ static void *worker_entrypoint(void *arg TEST_UNUSED)
/* Prepare all timers */
for (i = 0; i < NTIMERS; i++) {
- tt[i].tim = odp_timer_alloc(tp, queue, &tt[i]);
- if (tt[i].tim == ODP_TIMER_INVALID)
- CU_FAIL_FATAL("Failed to allocate timer");
tt[i].ev = odp_timeout_to_event(odp_timeout_alloc(tbp));
- if (tt[i].ev == ODP_EVENT_INVALID)
- CU_FAIL_FATAL("Failed to allocate timeout");
+ if (tt[i].ev == ODP_EVENT_INVALID) {
+ LOG_DBG("Failed to allocate timeout (%d/%d)\n",
+ i, NTIMERS);
+ break;
+ }
+ tt[i].tim = odp_timer_alloc(tp, queue, &tt[i]);
+ if (tt[i].tim == ODP_TIMER_INVALID) {
+ LOG_DBG("Failed to allocate timer (%d/%d)\n",
+ i, NTIMERS);
+ odp_timeout_free(tt[i].ev);
+ break;
+ }
tt[i].ev2 = tt[i].ev;
tt[i].tick = TICK_INVALID;
}
+ allocated = i;
+ odp_atomic_fetch_add_u32(&timers_allocated, allocated);
odp_barrier_wait(&test_barrier);
/* Initial set all timers with a random expiration time */
uint32_t nset = 0;
- for (i = 0; i < NTIMERS; i++) {
+ for (i = 0; i < allocated; i++) {
uint64_t tck = odp_timer_current_tick(tp) + 1 +
odp_timer_ns_to_tick(tp,
(rand_r(&seed) % RANGE_MS)
@@ -336,7 +347,7 @@ static void *worker_entrypoint(void *arg TEST_UNUSED)
nrcv++;
}
prev_tick = odp_timer_current_tick(tp);
- i = rand_r(&seed) % NTIMERS;
+ i = rand_r(&seed) % allocated;
if (tt[i].ev == ODP_EVENT_INVALID &&
(rand_r(&seed) % 2 == 0)) {
/* Timer active, cancel it */
@@ -384,7 +395,7 @@ static void *worker_entrypoint(void *arg TEST_UNUSED)
/* Cancel and free all timers */
uint32_t nstale = 0;
- for (i = 0; i < NTIMERS; i++) {
+ for (i = 0; i < allocated; i++) {
(void)odp_timer_cancel(tt[i].tim, &tt[i].ev);
tt[i].tick = TICK_INVALID;
if (tt[i].ev == ODP_EVENT_INVALID)
@@ -430,7 +441,7 @@ static void *worker_entrypoint(void *arg TEST_UNUSED)
rc = odp_queue_destroy(queue);
CU_ASSERT(rc == 0);
- for (i = 0; i < NTIMERS; i++) {
+ for (i = 0; i < allocated; i++) {
if (tt[i].ev != ODP_EVENT_INVALID)
odp_event_free(tt[i].ev);
}
@@ -506,6 +517,9 @@ void timer_test_odp_timer_all(void)
/* Initialize the shared timeout counter */
odp_atomic_init_u32(&ndelivtoolate, 0);
+ /* Initialize the number of finally allocated elements */
+ odp_atomic_init_u32(&timers_allocated, 0);
+
/* Create and start worker threads */
pthrd_arg thrdarg;
thrdarg.testcase = 0;
@@ -522,7 +536,7 @@ void timer_test_odp_timer_all(void)
CU_FAIL("odp_timer_pool_info");
CU_ASSERT(tpinfo.param.num_timers == (unsigned)num_workers * NTIMERS);
CU_ASSERT(tpinfo.cur_timers == 0);
- CU_ASSERT(tpinfo.hwm_timers == (unsigned)num_workers * NTIMERS);
+ CU_ASSERT(tpinfo.hwm_timers == odp_atomic_load_u32(&timers_allocated));
/* Destroy timer pool, all timers must have been freed */
odp_timer_pool_destroy(tp);