aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/common/odp_cunit_common.c10
-rw-r--r--test/performance/.gitignore1
-rw-r--r--test/performance/Makefile.am2
-rw-r--r--test/performance/odp_cpu_bench.c6
-rw-r--r--test/performance/odp_crypto.c1
-rw-r--r--test/performance/odp_dma_perf.c63
-rw-r--r--test/performance/odp_ipsecfwd.c980
-rw-r--r--test/performance/odp_pool_perf.c107
-rw-r--r--test/performance/odp_sched_latency.c113
-rw-r--r--test/performance/odp_stress.c857
-rw-r--r--test/validation/api/atomic/atomic.c1
-rw-r--r--test/validation/api/barrier/barrier.c1
-rw-r--r--test/validation/api/buffer/buffer.c14
-rw-r--r--test/validation/api/classification/odp_classification_basic.c18
-rw-r--r--test/validation/api/classification/odp_classification_test_pmr.c18
-rw-r--r--test/validation/api/classification/odp_classification_tests.c23
-rw-r--r--test/validation/api/classification/odp_classification_testsuites.h2
-rw-r--r--test/validation/api/crypto/odp_crypto_test_inp.c1
-rw-r--r--test/validation/api/ipsec/ipsec.c191
-rw-r--r--test/validation/api/ipsec/ipsec.h14
-rw-r--r--test/validation/api/ipsec/ipsec_test_out.c136
-rw-r--r--test/validation/api/lock/lock.c1
-rw-r--r--test/validation/api/pktio/pktio.c266
-rw-r--r--test/validation/api/shmem/shmem.c113
-rw-r--r--test/validation/api/system/system.c46
25 files changed, 2566 insertions, 419 deletions
diff --git a/test/common/odp_cunit_common.c b/test/common/odp_cunit_common.c
index 5ce7fd791..0f3b45b18 100644
--- a/test/common/odp_cunit_common.c
+++ b/test/common/odp_cunit_common.c
@@ -36,6 +36,7 @@ static int allow_skip_result;
static odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
static int threads_running;
static odp_instance_t instance;
+static bool control_thread;
static char *progname;
static int (*thread_func)(void *);
@@ -260,6 +261,7 @@ static int tests_global_init(odp_instance_t *inst)
{
odp_init_t init_param;
odph_helper_options_t helper_options;
+ odp_thread_type_t thr_type;
if (odph_options(&helper_options)) {
fprintf(stderr, "error: odph_options() failed.\n");
@@ -273,7 +275,9 @@ static int tests_global_init(odp_instance_t *inst)
fprintf(stderr, "error: odp_init_global() failed.\n");
return -1;
}
- if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+
+ thr_type = control_thread ? ODP_THREAD_CONTROL : ODP_THREAD_WORKER;
+ if (0 != odp_init_local(*inst, thr_type)) {
fprintf(stderr, "error: odp_init_local() failed.\n");
return -1;
}
@@ -706,10 +710,14 @@ int odp_cunit_register(odp_suiteinfo_t testsuites[])
*/
int odp_cunit_parse_options(int argc, char *argv[])
{
+ const char *ctrl_thread_env = getenv("CI_THREAD_TYPE_CONTROL");
const char *env = getenv("CI");
progname = argv[0];
odph_parse_options(argc, argv);
+ /* Check if we need to use control thread */
+ if (ctrl_thread_env && !strcmp(ctrl_thread_env, "true"))
+ control_thread = true;
if (env && !strcmp(env, "true")) {
allow_skip_result = 1;
diff --git a/test/performance/.gitignore b/test/performance/.gitignore
index 3a32d0420..ec1915bba 100644
--- a/test/performance/.gitignore
+++ b/test/performance/.gitignore
@@ -23,4 +23,5 @@ odp_sched_latency
odp_sched_perf
odp_sched_pktio
odp_scheduling
+odp_stress
odp_timer_perf
diff --git a/test/performance/Makefile.am b/test/performance/Makefile.am
index b0885808e..336a46d22 100644
--- a/test/performance/Makefile.am
+++ b/test/performance/Makefile.am
@@ -25,6 +25,7 @@ COMPILE_ONLY = odp_cpu_bench \
odp_sched_perf \
odp_sched_pktio \
odp_scheduling \
+ odp_stress \
odp_timer_perf
TESTSCRIPTS = odp_cpu_bench_run.sh \
@@ -72,6 +73,7 @@ odp_pool_perf_SOURCES = odp_pool_perf.c
odp_queue_perf_SOURCES = odp_queue_perf.c
odp_random_SOURCES = odp_random.c
odp_sched_perf_SOURCES = odp_sched_perf.c
+odp_stress_SOURCES = odp_stress.c
odp_timer_perf_SOURCES = odp_timer_perf.c
# l2fwd test depends on generator example
diff --git a/test/performance/odp_cpu_bench.c b/test/performance/odp_cpu_bench.c
index 41a26491f..e912e167d 100644
--- a/test/performance/odp_cpu_bench.c
+++ b/test/performance/odp_cpu_bench.c
@@ -26,7 +26,11 @@
/* Default number of entries in the test lookup table */
#define DEF_LOOKUP_TBL_SIZE (1024 * 1024)
-#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1)
+#define MAX_WORKERS \
+ (((ODP_THREAD_COUNT_MAX - 1) > (MAX_GROUPS * QUEUES_PER_GROUP)) ? \
+ (MAX_GROUPS * QUEUES_PER_GROUP) : \
+ (ODP_THREAD_COUNT_MAX - 1))
+
ODP_STATIC_ASSERT(MAX_WORKERS <= MAX_GROUPS * QUEUES_PER_GROUP,
"Not enough queues for all workers");
diff --git a/test/performance/odp_crypto.c b/test/performance/odp_crypto.c
index 94d9cc6cf..b381fde31 100644
--- a/test/performance/odp_crypto.c
+++ b/test/performance/odp_crypto.c
@@ -1197,6 +1197,7 @@ int main(int argc, char *argv[])
odp_init_local(instance, ODP_THREAD_WORKER);
odp_sys_info_print();
+ memset(&crypto_capa, 0, sizeof(crypto_capa));
if (odp_crypto_capability(&crypto_capa)) {
ODPH_ERR("Crypto capability request failed.\n");
diff --git a/test/performance/odp_dma_perf.c b/test/performance/odp_dma_perf.c
index 615107299..62899f913 100644
--- a/test/performance/odp_dma_perf.c
+++ b/test/performance/odp_dma_perf.c
@@ -21,6 +21,7 @@
#define DEFAULT_SEG_SIZE 1024U
#define ROUNDS 1000000
+#define DEFAULT_WAIT_NS ODP_TIME_SEC_IN_NS
#define COMPL_DELIMITER ","
/* For now, a static maximum amount of input segments */
#define MAX_NUM_IN_SEGS 64
@@ -43,6 +44,8 @@
#define MEGAS 1000000
#define KILOS 1000
+#define RETRIES 1000U
+
typedef struct test_config_t {
int trs_type;
int trs_grn;
@@ -51,6 +54,7 @@ typedef struct test_config_t {
int seg_type;
int num_rounds;
int dma_rounds;
+ uint64_t wait_ns;
struct {
int num_modes;
@@ -104,6 +108,7 @@ static void set_option_defaults(test_config_t *config)
config->num_in_seg = 1;
config->seg_size = DEFAULT_SEG_SIZE;
config->num_rounds = ROUNDS;
+ config->wait_ns = DEFAULT_WAIT_NS;
config->compl_modes.compl_mask = ODP_DMA_COMPL_SYNC;
}
@@ -172,6 +177,8 @@ static void print_usage(void)
" 1: event\n"
" -r, --num_rounds Number of times to run the test scenario. %d by\n"
" default.\n"
+ " -w, --wait_nsec Number of nanoseconds to wait for completion events.\n"
+ " 1 second (1000000000) by default.\n"
" -h, --help This help.\n"
"\n",
MAX_NUM_IN_SEGS, ROUNDS);
@@ -254,11 +261,12 @@ static int parse_options(int argc, char **argv, test_config_t *config)
{ "in_seg_type", required_argument, NULL, 'T' },
{ "compl_modes", required_argument, NULL, 'm' },
{ "num_rounds", required_argument, NULL, 'r' },
+ { "wait_nsec", required_argument, NULL, 'w' },
{ "help", no_argument, NULL, 'h' },
{ NULL, 0, NULL, 0 }
};
- static const char *shortopts = "t:g:i:s:T:m:r:h";
+ static const char *shortopts = "t:g:i:s:T:m:r:w:h";
set_option_defaults(config);
@@ -290,6 +298,9 @@ static int parse_options(int argc, char **argv, test_config_t *config)
case 'r':
config->num_rounds = atoi(optarg);
break;
+ case 'w':
+ config->wait_ns = atoll(optarg);
+ break;
case 'h':
default:
print_usage();
@@ -668,7 +679,7 @@ static void print_humanised_speed(uint64_t speed)
printf("%" PRIu64 " B/s\n", speed);
}
-static void print_results(const test_config_t *config, uint64_t time)
+static void print_results(const test_config_t *config, uint64_t time, uint32_t retries)
{
const int is_sync = config->trs_type == TRS_TYPE_SYNC;
const uint64_t avg_time = time / config->num_rounds;
@@ -705,6 +716,7 @@ static void print_results(const test_config_t *config, uint64_t time)
" average transfer speed: ",
config->num_rounds, avg_time);
print_humanised_speed(avg_speed);
+ printf(" retries with usec sleep: %u\n", retries);
printf("\n=============================================\n");
}
@@ -713,7 +725,8 @@ static int run_dma_sync(test_config_t *config)
odp_dma_transfer_param_t trs_params[config->dma_rounds];
uint32_t trs_lengths[config->dma_rounds];
odp_time_t start, end;
- uint32_t num_rounds = config->num_rounds, offset;
+ uint32_t num_rounds = config->num_rounds, offset, retries = 0U;
+ int done = 0;
config->test_case_api.trs_base_fn(config, trs_params, trs_lengths);
start = odp_time_local_strict();
@@ -724,8 +737,18 @@ static int run_dma_sync(test_config_t *config)
for (int i = 0; i < config->dma_rounds; ++i) {
config->test_case_api.trs_dyn_fn(config, offset, trs_lengths[i]);
- if (odp_dma_transfer(config->dma_config.handle, &trs_params[i], NULL)
- <= 0) {
+ while (1) {
+ done = odp_dma_transfer(config->dma_config.handle, &trs_params[i],
+ NULL);
+
+ if (done > 0)
+ break;
+
+ if (done == 0 && retries++ < RETRIES) {
+ odp_time_wait_ns(1000U);
+ continue;
+ }
+
ODPH_ERR("Error starting a sync DMA transfer.\n");
return -1;
}
@@ -735,7 +758,7 @@ static int run_dma_sync(test_config_t *config)
}
end = odp_time_local_strict();
- print_results(config, odp_time_diff_ns(end, start));
+ print_results(config, odp_time_diff_ns(end, start), retries);
return 0;
}
@@ -854,7 +877,8 @@ static void build_wait_list(const test_config_t *config, odp_dma_compl_param_t c
static inline int wait_dma_transfers_ready(test_config_t *config, compl_wait_entry_t list[])
{
odp_event_t ev;
- const uint64_t wait_time = odp_schedule_wait_time(ODP_TIME_SEC_IN_NS * 5U);
+ const uint64_t wait_time = odp_schedule_wait_time(config->wait_ns);
+ uint64_t start, end;
int done = 0;
for (int i = 0; i < config->dma_rounds; ++i) {
@@ -866,6 +890,9 @@ static inline int wait_dma_transfers_ready(test_config_t *config, compl_wait_ent
return -1;
}
} else {
+ start = odp_time_local_ns();
+ end = start + ODP_TIME_SEC_IN_NS;
+
while (1) {
done = odp_dma_transfer_done(config->dma_config.handle, list[i].id,
NULL);
@@ -873,7 +900,7 @@ static inline int wait_dma_transfers_ready(test_config_t *config, compl_wait_ent
if (done > 0)
break;
- if (done == 0)
+ if (done == 0 && odp_time_local_ns() < end)
continue;
ODPH_ERR("Error waiting poll completion.\n");
@@ -907,10 +934,10 @@ static int run_dma_async_transfer(test_config_t *config)
odp_dma_transfer_param_t trs_params[config->dma_rounds];
uint32_t trs_lengths[config->dma_rounds];
odp_dma_compl_param_t compl_params[config->dma_rounds];
- int ret = 0;
+ int ret = 0, started;
compl_wait_entry_t compl_wait_list[config->dma_rounds];
odp_time_t start, end;
- uint32_t num_rounds = config->num_rounds, offset;
+ uint32_t num_rounds = config->num_rounds, offset, retries = 0U;
config->test_case_api.trs_base_fn(config, trs_params, trs_lengths);
@@ -928,8 +955,18 @@ static int run_dma_async_transfer(test_config_t *config)
for (int i = 0; i < config->dma_rounds; ++i) {
config->test_case_api.trs_dyn_fn(config, offset, trs_lengths[i]);
- if (odp_dma_transfer_start(config->dma_config.handle, &trs_params[i],
- &compl_params[i]) <= 0) {
+ while (1) {
+ started = odp_dma_transfer_start(config->dma_config.handle,
+ &trs_params[i], &compl_params[i]);
+
+ if (started > 0)
+ break;
+
+ if (started == 0 && retries++ < RETRIES) {
+ odp_time_wait_ns(1000U);
+ continue;
+ }
+
ODPH_ERR("Error starting an async DMA transfer.\n");
ret = -1;
goto out_trs_ids;
@@ -946,7 +983,7 @@ static int run_dma_async_transfer(test_config_t *config)
}
end = odp_time_local_strict();
- print_results(config, odp_time_diff_ns(end, start));
+ print_results(config, odp_time_diff_ns(end, start), retries);
out_compl_evs:
free_dma_completion_events(config, compl_params);
diff --git a/test/performance/odp_ipsecfwd.c b/test/performance/odp_ipsecfwd.c
index b917a976e..6098fd964 100644
--- a/test/performance/odp_ipsecfwd.c
+++ b/test/performance/odp_ipsecfwd.c
@@ -71,11 +71,13 @@ typedef struct pktio_s {
odp_queue_t out_ev_qs[MAX_QUEUES];
};
+ odp_pktin_queue_t in_dir_qs[MAX_QUEUES];
odph_ethaddr_t src_mac;
char *name;
odp_pktio_t handle;
- odp_bool_t (*send_fn)(const pktio_t *pktio, uint8_t index, odp_packet_t pkt);
+ uint32_t (*send_fn)(const pktio_t *pktio, uint8_t index, odp_packet_t pkts[], int num);
uint32_t num_tx_qs;
+ uint8_t idx;
} pktio_t;
typedef struct {
@@ -99,8 +101,21 @@ typedef struct prog_config_s prog_config_t;
typedef struct ODP_ALIGNED_CACHE {
stats_t stats;
prog_config_t *prog_config;
+ int thr_idx;
+ uint8_t pktio;
} thread_config_t;
+typedef uint32_t (*rx_fn_t)(thread_config_t *config, odp_event_t evs[], int num);
+typedef void (*ipsec_fn_t)(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, stats_t *stats);
+typedef void (*drain_fn_t)(prog_config_t *config);
+
+typedef struct {
+ rx_fn_t rx;
+ ipsec_fn_t proc;
+ ipsec_fn_t compl;
+ drain_fn_t drain;
+} ops_t;
+
typedef struct prog_config_s {
odph_thread_t thread_tbl[MAX_WORKERS];
thread_config_t thread_config[MAX_WORKERS];
@@ -108,6 +123,7 @@ typedef struct prog_config_s {
fwd_entry_t fwd_entries[MAX_FWDS];
odp_queue_t sa_qs[MAX_SA_QUEUES];
pktio_t pktios[MAX_IFS];
+ ops_t ops;
char *sa_conf_file;
char *fwd_conf_file;
odp_instance_t odp_instance;
@@ -125,6 +141,8 @@ typedef struct prog_config_s {
uint32_t num_sas;
uint32_t num_fwds;
int num_thrs;
+ odp_bool_t is_dir_rx;
+ odp_bool_t is_hashed_tx;
uint8_t mode;
} prog_config_t;
@@ -134,6 +152,23 @@ typedef struct {
int type;
} exposed_alg_t;
+typedef struct {
+ odp_packet_t pkts[MAX_BURST];
+ const pktio_t *pktio;
+ uint32_t num;
+} pkt_vec_t;
+
+typedef struct {
+ pkt_vec_t vecs[MAX_QUEUES];
+ uint8_t num_qs;
+} pkt_out_t;
+
+typedef struct {
+ pkt_out_t ifs[MAX_IFS];
+ odp_bool_t is_hashed_tx;
+ uint8_t q_idx;
+} pkt_ifs_t;
+
static exposed_alg_t exposed_algs[] = {
ALG_ENTRY(ODP_CIPHER_ALG_NULL, CIPHER_TYPE),
ALG_ENTRY(ODP_CIPHER_ALG_DES, CIPHER_TYPE),
@@ -163,6 +198,7 @@ static exposed_alg_t exposed_algs[] = {
static odp_ipsec_sa_t *spi_to_sa_map[2U][MAX_SPIS];
static odp_atomic_u32_t is_running;
static const int ipsec_out_mark;
+static __thread pkt_ifs_t ifs;
static void init_config(prog_config_t *config)
{
@@ -329,32 +365,543 @@ static void print_usage(void)
" -I, --num_input_qs Input queue count. 1 by default.\n"
" -S, --num_sa_qs SA queue count. 1 by default.\n"
" -O, --num_output_qs Output queue count. 1 by default.\n"
+ " -d, --direct_rx Use direct RX. Interfaces will be polled by workers\n"
+ " directly. \"--mode\", \"--num_input_qs\" and\n"
+ " \"--num_output_qs\" options are ignored, input and output\n"
+ " queue counts will match worker count.\n"
" -h, --help This help.\n"
"\n");
}
+static inline odp_ipsec_sa_t *get_in_sa(odp_packet_t pkt)
+{
+ odph_esphdr_t esp;
+ uint32_t spi;
+
+ if (!odp_packet_has_ipsec(pkt))
+ return NULL;
+
+ if (odp_packet_copy_to_mem(pkt, odp_packet_l4_offset(pkt), ODPH_ESPHDR_LEN, &esp) < 0)
+ return NULL;
+
+ spi = odp_be_to_cpu_32(esp.spi);
+
+ return spi <= UINT16_MAX ? spi_to_sa_map[DIR_IN][spi] : NULL;
+}
+
+static inline int process_ipsec_in_enq(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num)
+{
+ odp_ipsec_in_param_t param;
+ int left, sent = 0, ret;
+
+ memset(&param, 0, sizeof(param));
+ /* IPsec in/out need to be identified somehow, so use user_ptr for this. */
+ for (int i = 0; i < num; ++i)
+ odp_packet_user_ptr_set(pkts[i], NULL);
+
+ while (sent < num) {
+ left = num - sent;
+ param.num_sa = left;
+ param.sa = &sas[sent];
+ ret = odp_ipsec_in_enq(&pkts[sent], left, &param);
+
+ if (odp_unlikely(ret <= 0))
+ break;
+
+ sent += ret;
+ }
+
+ return sent;
+}
+
+static inline odp_ipsec_sa_t *get_out_sa(odp_packet_t pkt)
+{
+ odph_udphdr_t udp;
+ uint16_t dst_port;
+
+ if (!odp_packet_has_udp(pkt))
+ return NULL;
+
+ if (odp_packet_copy_to_mem(pkt, odp_packet_l4_offset(pkt), ODPH_UDPHDR_LEN, &udp) < 0)
+ return NULL;
+
+ dst_port = odp_be_to_cpu_16(udp.dst_port);
+
+ return dst_port ? spi_to_sa_map[DIR_OUT][dst_port] : NULL;
+}
+
+static inline int process_ipsec_out_enq(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num)
+{
+ odp_ipsec_out_param_t param;
+ int left, sent = 0, ret;
+
+ memset(&param, 0, sizeof(param));
+ /* IPsec in/out need to be identified somehow, so use user_ptr for this. */
+ for (int i = 0; i < num; ++i)
+ odp_packet_user_ptr_set(pkts[i], &ipsec_out_mark);
+
+ while (sent < num) {
+ left = num - sent;
+ param.num_sa = left;
+ param.sa = &sas[sent];
+ ret = odp_ipsec_out_enq(&pkts[sent], left, &param);
+
+ if (odp_unlikely(ret <= 0))
+ break;
+
+ sent += ret;
+ }
+
+ return sent;
+}
+
+static inline const pktio_t *lookup_and_apply(odp_packet_t pkt, odph_table_t fwd_tbl,
+ uint8_t *q_idx)
+{
+ const uint32_t l3_off = odp_packet_l3_offset(pkt);
+ odph_ipv4hdr_t ipv4;
+ uint32_t dst_ip, src_ip;
+ fwd_entry_t *fwd;
+ odph_ethhdr_t eth;
+
+ if (odp_packet_copy_to_mem(pkt, l3_off, ODPH_IPV4HDR_LEN, &ipv4) < 0)
+ return NULL;
+
+ dst_ip = odp_be_to_cpu_32(ipv4.dst_addr);
+
+ if (odph_iplookup_table_get_value(fwd_tbl, &dst_ip, &fwd, 0U) < 0 || fwd == NULL)
+ return NULL;
+
+ if (l3_off != ODPH_ETHHDR_LEN) {
+ if (l3_off > ODPH_ETHHDR_LEN) {
+ if (odp_packet_pull_head(pkt, l3_off - ODPH_ETHHDR_LEN) == NULL)
+ return NULL;
+ } else {
+ if (odp_packet_push_head(pkt, ODPH_ETHHDR_LEN - l3_off) == NULL)
+ return NULL;
+ }
+ }
+
+ eth.dst = fwd->dst_mac;
+ eth.src = fwd->pktio->src_mac;
+ eth.type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
+
+ if (odp_packet_copy_from_mem(pkt, 0U, ODPH_ETHHDR_LEN, &eth) < 0)
+ return NULL;
+
+ if (q_idx != NULL) {
+ src_ip = odp_be_to_cpu_32(ipv4.src_addr);
+ *q_idx = (src_ip ^ dst_ip) % fwd->pktio->num_tx_qs;
+ }
+
+ return fwd->pktio;
+}
+
+static inline uint32_t forward_packets(odp_packet_t pkts[], int num, odph_table_t fwd_tbl)
+{
+ odp_packet_t pkt;
+ odp_bool_t is_hashed_tx = ifs.is_hashed_tx;
+ uint8_t q_idx = is_hashed_tx ? 0U : ifs.q_idx, qs_done;
+ uint8_t *q_idx_ptr = is_hashed_tx ? &q_idx : NULL;
+ const pktio_t *pktio;
+ pkt_out_t *out;
+ pkt_vec_t *vec;
+ uint32_t num_procd = 0U, ret;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+ pktio = lookup_and_apply(pkt, fwd_tbl, q_idx_ptr);
+
+ if (pktio == NULL) {
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ out = &ifs.ifs[pktio->idx];
+ vec = &out->vecs[q_idx];
+
+ if (vec->num == 0U)
+ out->num_qs++;
+
+ vec->pkts[vec->num++] = pkt;
+ vec->pktio = pktio;
+ }
+
+ for (uint32_t i = 0U; i < MAX_IFS; ++i) {
+ qs_done = 0U;
+ out = &ifs.ifs[i];
+
+ for (uint32_t j = 0U; j < MAX_QUEUES && qs_done < out->num_qs; ++j) {
+ if (out->vecs[j].num == 0U)
+ continue;
+
+ vec = &out->vecs[j];
+ pktio = vec->pktio;
+ ret = pktio->send_fn(pktio, j, vec->pkts, vec->num);
+
+ if (odp_unlikely(ret < vec->num))
+ odp_packet_free_multi(&vec->pkts[ret], vec->num - ret);
+
+ ++qs_done;
+ vec->num = 0U;
+ num_procd += ret;
+ }
+
+ out->num_qs = 0U;
+ }
+
+ return num_procd;
+}
+
+static inline void process_packets_out_enq(odp_packet_t pkts[], int num, odph_table_t fwd_tbl,
+ stats_t *stats)
+{
+ odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_fwd[MAX_BURST];
+ odp_ipsec_sa_t *sa, sas[MAX_BURST];
+ int num_pkts_ips = 0, num_pkts_fwd = 0, num_procd;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+ sa = get_out_sa(pkt);
+
+ if (sa != NULL) {
+ sas[num_pkts_ips] = *sa;
+ pkts_ips[num_pkts_ips] = pkt;
+ ++num_pkts_ips;
+ } else {
+ pkts_fwd[num_pkts_fwd++] = pkt;
+ }
+ }
+
+ if (num_pkts_ips > 0) {
+ num_procd = process_ipsec_out_enq(pkts_ips, sas, num_pkts_ips);
+
+ if (odp_unlikely(num_procd < num_pkts_ips)) {
+ stats->ipsec_out_errs += num_pkts_ips - num_procd;
+ odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd);
+ }
+ }
+
+ if (num_pkts_fwd > 0) {
+ num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl);
+ stats->discards += num_pkts_fwd - num_procd;
+ stats->fwd_pkts += num_procd;
+ }
+}
+
+static void process_packets_in_enq(odp_packet_t pkts[], int num, odph_table_t fwd_tbl,
+ stats_t *stats)
+{
+ odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_out[MAX_BURST];
+ odp_ipsec_sa_t *sa, sas[MAX_BURST];
+ int num_pkts_ips = 0, num_pkts_out = 0, num_procd;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+
+ if (odp_unlikely(odp_packet_has_error(pkt))) {
+ ++stats->discards;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ sa = get_in_sa(pkt);
+
+ if (sa != NULL) {
+ sas[num_pkts_ips] = *sa;
+ pkts_ips[num_pkts_ips] = pkt;
+ ++num_pkts_ips;
+ } else {
+ pkts_out[num_pkts_out++] = pkt;
+ }
+ }
+
+ if (num_pkts_ips > 0) {
+ num_procd = process_ipsec_in_enq(pkts_ips, sas, num_pkts_ips);
+
+ if (odp_unlikely(num_procd < num_pkts_ips)) {
+ stats->ipsec_in_errs += num_pkts_ips - num_procd;
+ odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd);
+ }
+ }
+
+ if (num_pkts_out > 0)
+ process_packets_out_enq(pkts_out, num_pkts_out, fwd_tbl, stats);
+}
+
+static inline odp_bool_t is_ipsec_in(odp_packet_t pkt)
+{
+ return odp_packet_user_ptr(pkt) == NULL;
+}
+
+static void complete_ipsec_ops(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, stats_t *stats)
+{
+ odp_packet_t pkt, pkts_out[MAX_BURST], pkts_fwd[MAX_BURST];
+ odp_bool_t is_in;
+ odp_ipsec_packet_result_t result;
+ int num_pkts_out = 0, num_pkts_fwd = 0, num_procd;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+ is_in = is_ipsec_in(pkt);
+
+ if (odp_unlikely(odp_ipsec_result(&result, pkt) < 0)) {
+ is_in ? ++stats->ipsec_in_errs : ++stats->ipsec_out_errs;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (odp_unlikely(result.status.all != ODP_IPSEC_OK)) {
+ is_in ? ++stats->ipsec_in_errs : ++stats->ipsec_out_errs;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (is_in) {
+ ++stats->ipsec_in_pkts;
+ pkts_out[num_pkts_out++] = pkt;
+ } else {
+ ++stats->ipsec_out_pkts;
+ pkts_fwd[num_pkts_fwd++] = pkt;
+ }
+ }
+
+ if (num_pkts_out > 0)
+ process_packets_out_enq(pkts_out, num_pkts_out, fwd_tbl, stats);
+
+ if (num_pkts_fwd > 0) {
+ num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl);
+ stats->discards += num_pkts_fwd - num_procd;
+ stats->fwd_pkts += num_procd;
+ }
+}
+
+static void drain_scheduler(prog_config_t *config ODP_UNUSED)
+{
+ odp_event_t ev;
+
+ while (true) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ odp_event_free(ev);
+ }
+}
+
+static inline int process_ipsec_in(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num,
+ odp_packet_t pkts_out[])
+{
+ odp_ipsec_in_param_t param;
+ int left, sent = 0, num_out, ret;
+
+ memset(&param, 0, sizeof(param));
+
+ while (sent < num) {
+ left = num - sent;
+ num_out = left;
+ param.num_sa = left;
+ param.sa = &sas[sent];
+ ret = odp_ipsec_in(&pkts[sent], left, &pkts_out[sent], &num_out, &param);
+
+ if (odp_unlikely(ret <= 0))
+ break;
+
+ sent += ret;
+ }
+
+ return sent;
+}
+
+static inline int process_ipsec_out(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num,
+ odp_packet_t pkts_out[])
+{
+ odp_ipsec_out_param_t param;
+ int left, sent = 0, num_out, ret;
+
+ memset(&param, 0, sizeof(param));
+
+ while (sent < num) {
+ left = num - sent;
+ num_out = left;
+ param.num_sa = left;
+ param.sa = &sas[sent];
+ ret = odp_ipsec_out(&pkts[sent], left, &pkts_out[sent], &num_out, &param);
+
+ if (odp_unlikely(ret <= 0))
+ break;
+
+ sent += ret;
+ }
+
+ return sent;
+}
+
+static inline void process_packets_out(odp_packet_t pkts[], int num, odph_table_t fwd_tbl,
+ stats_t *stats)
+{
+ odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_fwd[MAX_BURST], pkts_ips_out[MAX_BURST];
+ odp_ipsec_sa_t *sa, sas[MAX_BURST];
+ int num_pkts_ips = 0, num_pkts_fwd = 0, num_procd;
+ odp_ipsec_packet_result_t result;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+ sa = get_out_sa(pkt);
+
+ if (sa != NULL) {
+ sas[num_pkts_ips] = *sa;
+ pkts_ips[num_pkts_ips] = pkt;
+ ++num_pkts_ips;
+ } else {
+ pkts_fwd[num_pkts_fwd++] = pkt;
+ }
+ }
+
+ if (num_pkts_ips > 0) {
+ num_procd = process_ipsec_out(pkts_ips, sas, num_pkts_ips, pkts_ips_out);
+
+ if (odp_unlikely(num_procd < num_pkts_ips)) {
+ stats->ipsec_out_errs += num_pkts_ips - num_procd;
+ odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd);
+ }
+
+ for (int i = 0; i < num_procd; ++i) {
+ pkt = pkts_ips_out[i];
+
+ if (odp_unlikely(odp_ipsec_result(&result, pkt) < 0)) {
+ ++stats->ipsec_out_errs;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (odp_unlikely(result.status.all != ODP_IPSEC_OK)) {
+ ++stats->ipsec_out_errs;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ ++stats->ipsec_out_pkts;
+ pkts_fwd[num_pkts_fwd++] = pkt;
+ }
+ }
+
+ if (num_pkts_fwd > 0) {
+ num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl);
+ stats->discards += num_pkts_fwd - num_procd;
+ stats->fwd_pkts += num_procd;
+ }
+}
+
+static void process_packets_in(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, stats_t *stats)
+{
+ odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_out[MAX_BURST], pkts_ips_out[MAX_BURST];
+ odp_ipsec_sa_t *sa, sas[MAX_BURST];
+ int num_pkts_ips = 0, num_pkts_out = 0, num_procd;
+ odp_ipsec_packet_result_t result;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+
+ if (odp_unlikely(odp_packet_has_error(pkt))) {
+ ++stats->discards;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ sa = get_in_sa(pkt);
+
+ if (sa != NULL) {
+ sas[num_pkts_ips] = *sa;
+ pkts_ips[num_pkts_ips] = pkt;
+ ++num_pkts_ips;
+ } else {
+ pkts_out[num_pkts_out++] = pkt;
+ }
+ }
+
+ if (num_pkts_ips > 0) {
+ num_procd = process_ipsec_in(pkts_ips, sas, num_pkts_ips, pkts_ips_out);
+
+ if (odp_unlikely(num_procd < num_pkts_ips)) {
+ stats->ipsec_in_errs += num_pkts_ips - num_procd;
+ odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd);
+ }
+
+ for (int i = 0; i < num_procd; ++i) {
+ pkt = pkts_ips_out[i];
+
+ if (odp_unlikely(odp_ipsec_result(&result, pkt) < 0)) {
+ ++stats->ipsec_in_errs;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (odp_unlikely(result.status.all != ODP_IPSEC_OK)) {
+ ++stats->ipsec_in_errs;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ ++stats->ipsec_in_pkts;
+ pkts_out[num_pkts_out++] = pkt;
+ }
+ }
+
+ if (num_pkts_out > 0)
+ process_packets_out(pkts_out, num_pkts_out, fwd_tbl, stats);
+}
+
+static void drain_direct_inputs(prog_config_t *config)
+{
+ odp_packet_t pkt;
+
+ for (uint32_t i = 0U; i < config->num_ifs; ++i) {
+ for (uint32_t j = 0U; j < config->num_input_qs; ++j) {
+ while (odp_pktin_recv(config->pktios[i].in_dir_qs[j], &pkt, 1) == 1)
+ odp_packet_free(pkt);
+ }
+ }
+}
+
static odp_bool_t setup_ipsec(prog_config_t *config)
{
odp_queue_param_t q_param;
odp_ipsec_config_t ipsec_config;
char q_name[ODP_QUEUE_NAME_LEN];
- snprintf(q_name, sizeof(q_name), SHORT_PROG_NAME "_sa_status");
- odp_queue_param_init(&q_param);
- q_param.type = ODP_QUEUE_TYPE_SCHED;
- q_param.sched.prio = odp_schedule_default_prio();
- q_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
- q_param.sched.group = ODP_SCHED_GROUP_ALL;
- config->compl_q = odp_queue_create(q_name, &q_param);
+ if (!config->is_dir_rx) {
+ snprintf(q_name, sizeof(q_name), SHORT_PROG_NAME "_sa_status");
+ odp_queue_param_init(&q_param);
+ q_param.type = ODP_QUEUE_TYPE_SCHED;
+ q_param.sched.prio = odp_schedule_default_prio();
+ q_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ q_param.sched.group = ODP_SCHED_GROUP_ALL;
+ config->compl_q = odp_queue_create(q_name, &q_param);
- if (config->compl_q == ODP_QUEUE_INVALID) {
- ODPH_ERR("Error creating IPsec completion queue\n");
- return false;
+ if (config->compl_q == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Error creating IPsec completion queue\n");
+ return false;
+ }
}
odp_ipsec_config_init(&ipsec_config);
- ipsec_config.inbound_mode = ODP_IPSEC_OP_MODE_ASYNC;
- ipsec_config.outbound_mode = ODP_IPSEC_OP_MODE_ASYNC;
+
+ if (!config->is_dir_rx) {
+ ipsec_config.inbound_mode = ODP_IPSEC_OP_MODE_ASYNC;
+ ipsec_config.outbound_mode = ODP_IPSEC_OP_MODE_ASYNC;
+ config->ops.proc = process_packets_in_enq;
+ config->ops.compl = complete_ipsec_ops;
+ config->ops.drain = drain_scheduler;
+ } else {
+ ipsec_config.inbound_mode = ODP_IPSEC_OP_MODE_SYNC;
+ ipsec_config.outbound_mode = ODP_IPSEC_OP_MODE_SYNC;
+ config->ops.proc = process_packets_in;
+ config->ops.compl = NULL;
+ config->ops.drain = drain_direct_inputs;
+ }
+
ipsec_config.inbound.default_queue = config->compl_q;
/* For tunnel to tunnel, we need to parse up to this to check the UDP port for SA. */
ipsec_config.inbound.parse_level = ODP_PROTO_LAYER_L4;
@@ -497,7 +1044,7 @@ static void parse_sas(prog_config_t *config)
if (!setup_ipsec(config))
return;
- if (!create_sa_dest_queues(&ipsec_capa, config))
+ if (!config->is_dir_rx && !create_sa_dest_queues(&ipsec_capa, config))
return;
file = fopen(config->sa_conf_file, "r");
@@ -640,6 +1187,11 @@ static parse_result_t check_options(prog_config_t *config)
return PRS_NOK;
}
+ if (config->is_dir_rx) {
+ config->num_input_qs = config->num_thrs;
+ config->num_output_qs = config->num_thrs;
+ }
+
return PRS_OK;
}
@@ -658,11 +1210,12 @@ static parse_result_t parse_options(int argc, char **argv, prog_config_t *config
{ "num_input_qs", required_argument, NULL, 'I' },
{ "num_sa_qs", required_argument, NULL, 'S' },
{ "num_output_qs", required_argument, NULL, 'O' },
+ { "direct_rx", no_argument, NULL, 'd'},
{ "help", no_argument, NULL, 'h' },
{ NULL, 0, NULL, 0 }
};
- static const char *shortopts = "i:n:l:c:m:s:f:I:S:O:h";
+ static const char *shortopts = "i:n:l:c:m:s:f:I:S:O:dh";
while (true) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -701,6 +1254,9 @@ static parse_result_t parse_options(int argc, char **argv, prog_config_t *config
case 'O':
config->num_output_qs = atoi(optarg);
break;
+ case 'd':
+ config->is_dir_rx = true;
+ break;
case 'h':
print_usage();
return PRS_TERM;
@@ -732,14 +1288,46 @@ static parse_result_t setup_program(int argc, char **argv, prog_config_t *config
return parse_options(argc, argv, config);
}
-static odp_bool_t send(const pktio_t *pktio, uint8_t index, odp_packet_t pkt)
+static uint32_t schedule(thread_config_t *config ODP_UNUSED, odp_event_t evs[], int num)
+{
+ return odp_schedule_multi_no_wait(NULL, evs, num);
+}
+
+static uint32_t recv(thread_config_t *config, odp_event_t evs[], int num)
{
- return odp_pktout_send(pktio->out_dir_qs[index], &pkt, 1) == 1;
+ prog_config_t *prog_config = config->prog_config;
+ pktio_t *pktio = &prog_config->pktios[config->pktio++ % prog_config->num_ifs];
+ odp_pktin_queue_t in_q = pktio->in_dir_qs[config->thr_idx % prog_config->num_input_qs];
+ odp_packet_t pkts[num];
+ int ret;
+
+ ret = odp_pktin_recv(in_q, pkts, num);
+
+ if (odp_unlikely(ret <= 0))
+ return 0U;
+
+ odp_packet_to_event_multi(pkts, evs, ret);
+
+ return ret;
}
-static odp_bool_t enqueue(const pktio_t *pktio, uint8_t index, odp_packet_t pkt)
+static uint32_t send(const pktio_t *pktio, uint8_t index, odp_packet_t pkts[], int num)
{
- return odp_queue_enq(pktio->out_ev_qs[index], odp_packet_to_event(pkt)) == 0;
+ int ret = odp_pktout_send(pktio->out_dir_qs[index], pkts, num);
+
+ return ret < 0 ? 0U : (uint32_t)ret;
+}
+
+static uint32_t enqueue(const pktio_t *pktio, uint8_t index, odp_packet_t pkts[], int num)
+{
+ odp_event_t evs[MAX_BURST];
+ int ret;
+
+ odp_packet_to_event_multi(pkts, evs, num);
+
+ ret = odp_queue_enq_multi(pktio->out_ev_qs[index], evs, num);
+
+ return ret < 0 ? 0U : (uint32_t)ret;
}
static odp_bool_t setup_pktios(prog_config_t *config)
@@ -765,12 +1353,17 @@ static odp_bool_t setup_pktios(prog_config_t *config)
return false;
}
+ config->ops.rx = !config->is_dir_rx ? schedule : recv;
+ config->is_hashed_tx = !config->is_dir_rx && config->mode == ORDERED;
+
for (uint32_t i = 0U; i < config->num_ifs; ++i) {
pktio = &config->pktios[i];
+ pktio->idx = i;
odp_pktio_param_init(&pktio_param);
- pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
- pktio_param.out_mode = config->mode == ORDERED ? ODP_PKTOUT_MODE_QUEUE :
- ODP_PKTOUT_MODE_DIRECT;
+ pktio_param.in_mode = !config->is_dir_rx ?
+ ODP_PKTIN_MODE_SCHED : ODP_PKTIN_MODE_DIRECT;
+ pktio_param.out_mode = config->is_hashed_tx ?
+ ODP_PKTOUT_MODE_QUEUE : ODP_PKTOUT_MODE_DIRECT;
pktio->handle = odp_pktio_open(pktio->name, config->pktio_pool, &pktio_param);
if (pktio->handle == ODP_PKTIO_INVALID) {
@@ -801,7 +1394,7 @@ static odp_bool_t setup_pktios(prog_config_t *config)
odp_pktin_queue_param_init(&pktin_param);
- if (config->mode == ORDERED)
+ if (config->is_hashed_tx)
pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ORDERED;
if (config->num_input_qs > 1U) {
@@ -810,24 +1403,40 @@ static odp_bool_t setup_pktios(prog_config_t *config)
pktin_param.num_queues = config->num_input_qs;
}
+ pktin_param.op_mode = (config->is_dir_rx &&
+ config->num_thrs > (int)config->num_input_qs) ?
+ ODP_PKTIO_OP_MT : ODP_PKTIO_OP_MT_UNSAFE;
+
if (odp_pktin_queue_config(pktio->handle, &pktin_param) < 0) {
ODPH_ERR("Error configuring packet I/O input queues (%s)\n", pktio->name);
return false;
}
- pktio->send_fn = config->mode == ORDERED ? enqueue : send;
+ if (config->is_dir_rx) {
+ if (odp_pktin_queue(pktio->handle, pktio->in_dir_qs, config->num_input_qs)
+ != (int)config->num_input_qs) {
+ ODPH_ERR("Error querying packet I/O input queue (%s)\n",
+ pktio->name);
+ return false;
+ }
+ }
+
+ pktio->send_fn = config->is_hashed_tx ? enqueue : send;
pktio->num_tx_qs = config->num_output_qs;
odp_pktout_queue_param_init(&pktout_param);
pktout_param.num_queues = pktio->num_tx_qs;
- pktout_param.op_mode = config->num_thrs > (int)pktio->num_tx_qs ?
- ODP_PKTIO_OP_MT : ODP_PKTIO_OP_MT_UNSAFE;
+
+ if (!config->is_hashed_tx) {
+ pktout_param.op_mode = config->num_thrs > (int)pktio->num_tx_qs ?
+ ODP_PKTIO_OP_MT : ODP_PKTIO_OP_MT_UNSAFE;
+ }
if (odp_pktout_queue_config(pktio->handle, &pktout_param) < 0) {
ODPH_ERR("Error configuring packet I/O output queues (%s)\n", pktio->name);
return false;
}
- if (config->mode == ORDERED) {
+ if (config->is_hashed_tx) {
if (odp_pktout_event_queue(pktio->handle, pktio->out_ev_qs,
pktio->num_tx_qs) != (int)pktio->num_tx_qs) {
ODPH_ERR("Error querying packet I/O output event queue (%s)\n",
@@ -889,281 +1498,6 @@ static odp_bool_t setup_fwd_table(prog_config_t *config)
return true;
}
-static inline odp_ipsec_sa_t *get_in_sa(odp_packet_t pkt)
-{
- odph_esphdr_t esp;
- uint32_t spi;
-
- if (!odp_packet_has_ipsec(pkt))
- return NULL;
-
- if (odp_packet_copy_to_mem(pkt, odp_packet_l4_offset(pkt), ODPH_ESPHDR_LEN, &esp) < 0)
- return NULL;
-
- spi = odp_be_to_cpu_32(esp.spi);
-
- return spi <= UINT16_MAX ? spi_to_sa_map[DIR_IN][spi] : NULL;
-}
-
-static inline int process_ipsec_in(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num)
-{
- odp_ipsec_in_param_t param;
- int left, sent = 0, ret;
-
- memset(&param, 0, sizeof(param));
- /* IPsec in/out need to be identified somehow, so use user_ptr for this. */
- for (int i = 0; i < num; ++i)
- odp_packet_user_ptr_set(pkts[i], NULL);
-
- while (sent < num) {
- left = num - sent;
- param.num_sa = left;
- param.sa = &sas[sent];
- ret = odp_ipsec_in_enq(&pkts[sent], left, &param);
-
- if (odp_unlikely(ret <= 0))
- break;
-
- sent += ret;
- }
-
- return sent;
-}
-
-static inline odp_ipsec_sa_t *get_out_sa(odp_packet_t pkt)
-{
- odph_udphdr_t udp;
- uint16_t dst_port;
-
- if (!odp_packet_has_udp(pkt))
- return NULL;
-
- if (odp_packet_copy_to_mem(pkt, odp_packet_l4_offset(pkt), ODPH_UDPHDR_LEN, &udp) < 0)
- return NULL;
-
- dst_port = odp_be_to_cpu_16(udp.dst_port);
-
- return dst_port ? spi_to_sa_map[DIR_OUT][dst_port] : NULL;
-}
-
-static inline int process_ipsec_out(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num)
-{
- odp_ipsec_out_param_t param;
- int left, sent = 0, ret;
-
- memset(&param, 0, sizeof(param));
- /* IPsec in/out need to be identified somehow, so use user_ptr for this. */
- for (int i = 0; i < num; ++i)
- odp_packet_user_ptr_set(pkts[i], &ipsec_out_mark);
-
- while (sent < num) {
- left = num - sent;
- param.num_sa = left;
- param.sa = &sas[sent];
- ret = odp_ipsec_out_enq(&pkts[sent], left, &param);
-
- if (odp_unlikely(ret <= 0))
- break;
-
- sent += ret;
- }
-
- return sent;
-}
-
-static inline const pktio_t *lookup_and_apply(odp_packet_t pkt, odph_table_t fwd_tbl,
- uint8_t *hash)
-{
- const uint32_t l3_off = odp_packet_l3_offset(pkt);
- odph_ipv4hdr_t ipv4;
- uint32_t dst_ip, src_ip;
- fwd_entry_t *fwd;
- odph_ethhdr_t eth;
-
- if (odp_packet_copy_to_mem(pkt, l3_off, ODPH_IPV4HDR_LEN, &ipv4) < 0)
- return NULL;
-
- dst_ip = odp_be_to_cpu_32(ipv4.dst_addr);
-
- if (odph_iplookup_table_get_value(fwd_tbl, &dst_ip, &fwd, 0U) < 0 || fwd == NULL)
- return NULL;
-
- if (l3_off != ODPH_ETHHDR_LEN) {
- if (l3_off > ODPH_ETHHDR_LEN) {
- if (odp_packet_pull_head(pkt, l3_off - ODPH_ETHHDR_LEN) == NULL)
- return NULL;
- } else {
- if (odp_packet_push_head(pkt, ODPH_ETHHDR_LEN - l3_off) == NULL)
- return NULL;
- }
- }
-
- eth.dst = fwd->dst_mac;
- eth.src = fwd->pktio->src_mac;
- eth.type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
-
- if (odp_packet_copy_from_mem(pkt, 0U, ODPH_ETHHDR_LEN, &eth) < 0)
- return NULL;
-
- src_ip = odp_be_to_cpu_32(ipv4.src_addr);
- *hash = src_ip ^ dst_ip;
-
- return fwd->pktio;
-}
-
-static inline uint32_t forward_packets(odp_packet_t pkts[], int num, odph_table_t fwd_tbl)
-{
- odp_packet_t pkt;
- uint8_t hash = 0U;
- const pktio_t *pktio;
- uint32_t num_procd = 0U;
-
- for (int i = 0; i < num; ++i) {
- pkt = pkts[i];
- pktio = lookup_and_apply(pkt, fwd_tbl, &hash);
-
- if (pktio == NULL) {
- odp_packet_free(pkt);
- continue;
- }
-
- if (odp_unlikely(!pktio->send_fn(pktio, hash % pktio->num_tx_qs, pkt))) {
- odp_packet_free(pkt);
- continue;
- }
-
- ++num_procd;
- }
-
- return num_procd;
-}
-
-static inline void process_packets_out(odp_packet_t pkts[], int num, odph_table_t fwd_tbl,
- stats_t *stats)
-{
- odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_fwd[MAX_BURST];
- odp_ipsec_sa_t *sa, sas[MAX_BURST];
- int num_pkts_ips = 0, num_pkts_fwd = 0, num_procd;
-
- for (int i = 0; i < num; ++i) {
- pkt = pkts[i];
- sa = get_out_sa(pkt);
-
- if (sa != NULL) {
- sas[num_pkts_ips] = *sa;
- pkts_ips[num_pkts_ips] = pkt;
- ++num_pkts_ips;
- } else {
- pkts_fwd[num_pkts_fwd++] = pkt;
- }
- }
-
- if (num_pkts_ips > 0) {
- num_procd = process_ipsec_out(pkts_ips, sas, num_pkts_ips);
-
- if (odp_unlikely(num_procd < num_pkts_ips)) {
- num_procd = num_procd < 0 ? 0 : num_procd;
- stats->ipsec_out_errs += num_pkts_ips - num_procd;
- odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd);
- }
- }
-
- if (num_pkts_fwd > 0) {
- num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl);
- stats->discards += num_pkts_fwd - num_procd;
- stats->fwd_pkts += num_procd;
- }
-}
-
-static inline void process_packets_in(odp_packet_t pkts[], int num, odph_table_t fwd_tbl,
- stats_t *stats)
-{
- odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_out[MAX_BURST];
- odp_ipsec_sa_t *sa, sas[MAX_BURST];
- int num_pkts_ips = 0, num_pkts_out = 0, num_procd;
-
- for (int i = 0; i < num; ++i) {
- pkt = pkts[i];
-
- if (odp_unlikely(odp_packet_has_error(pkt))) {
- ++stats->discards;
- odp_packet_free(pkt);
- continue;
- }
-
- sa = get_in_sa(pkt);
-
- if (sa != NULL) {
- sas[num_pkts_ips] = *sa;
- pkts_ips[num_pkts_ips] = pkt;
- ++num_pkts_ips;
- } else {
- pkts_out[num_pkts_out++] = pkt;
- }
- }
-
- if (num_pkts_ips > 0) {
- num_procd = process_ipsec_in(pkts_ips, sas, num_pkts_ips);
-
- if (odp_unlikely(num_procd < num_pkts_ips)) {
- num_procd = num_procd < 0 ? 0 : num_procd;
- stats->ipsec_in_errs += num_pkts_ips - num_procd;
- odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd);
- }
- }
-
- if (num_pkts_out > 0)
- process_packets_out(pkts_out, num_pkts_out, fwd_tbl, stats);
-}
-
-static inline odp_bool_t is_ipsec_in(odp_packet_t pkt)
-{
- return odp_packet_user_ptr(pkt) == NULL;
-}
-
-static inline void complete_ipsec_ops(odp_packet_t pkts[], int num, odph_table_t fwd_tbl,
- stats_t *stats)
-{
- odp_packet_t pkt, pkts_out[MAX_BURST], pkts_fwd[MAX_BURST];
- odp_bool_t is_in;
- odp_ipsec_packet_result_t result;
- int num_pkts_out = 0, num_pkts_fwd = 0, num_procd;
-
- for (int i = 0; i < num; ++i) {
- pkt = pkts[i];
- is_in = is_ipsec_in(pkt);
-
- if (odp_unlikely(odp_ipsec_result(&result, pkt) < 0)) {
- is_in ? ++stats->ipsec_in_errs : ++stats->ipsec_out_errs;
- odp_packet_free(pkt);
- continue;
- }
-
- if (odp_unlikely(result.status.all != ODP_IPSEC_OK)) {
- is_in ? ++stats->ipsec_in_errs : ++stats->ipsec_out_errs;
- odp_packet_free(pkt);
- continue;
- }
-
- if (is_in) {
- ++stats->ipsec_in_pkts;
- pkts_out[num_pkts_out++] = pkt;
- } else {
- ++stats->ipsec_out_pkts;
- pkts_fwd[num_pkts_fwd++] = pkt;
- }
- }
-
- if (num_pkts_out > 0)
- process_packets_out(pkts_out, num_pkts_out, fwd_tbl, stats);
-
- if (num_pkts_fwd > 0) {
- num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl);
- stats->discards += num_pkts_fwd - num_procd;
- stats->fwd_pkts += num_procd;
- }
-}
-
static inline void check_ipsec_status_ev(odp_event_t ev, stats_t *stats)
{
odp_ipsec_status_t status;
@@ -1174,42 +1508,33 @@ static inline void check_ipsec_status_ev(odp_event_t ev, stats_t *stats)
odp_event_free(ev);
}
-static void drain_events(void)
-{
- odp_event_t ev;
-
- while (true) {
- ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
-
- if (ev == ODP_EVENT_INVALID)
- break;
-
- odp_event_free(ev);
- }
-}
-
static int process_packets(void *args)
{
thread_config_t *config = args;
+ int thr_idx = odp_thread_id();
odp_event_t evs[MAX_BURST], ev;
- int cnt;
+ ops_t ops = config->prog_config->ops;
+ uint32_t cnt;
odp_event_type_t type;
odp_event_subtype_t subtype;
odp_packet_t pkt, pkts_in[MAX_BURST], pkts_ips[MAX_BURST];
odph_table_t fwd_tbl = config->prog_config->fwd_tbl;
stats_t *stats = &config->stats;
+ ifs.is_hashed_tx = config->prog_config->is_hashed_tx;
+ ifs.q_idx = thr_idx % config->prog_config->num_output_qs;
+ config->thr_idx = thr_idx;
odp_barrier_wait(&config->prog_config->init_barrier);
while (odp_atomic_load_u32(&is_running)) {
int num_pkts_in = 0, num_pkts_ips = 0;
/* TODO: Add possibility to configure scheduler and ipsec enq/deq burst sizes. */
- cnt = odp_schedule_multi_no_wait(NULL, evs, MAX_BURST);
+ cnt = ops.rx(config, evs, MAX_BURST);
- if (cnt == 0)
+ if (cnt == 0U)
continue;
- for (int i = 0; i < cnt; ++i) {
+ for (uint32_t i = 0U; i < cnt; ++i) {
ev = evs[i];
type = odp_event_types(ev, &subtype);
pkt = odp_packet_from_event(ev);
@@ -1232,14 +1557,14 @@ static int process_packets(void *args)
}
if (num_pkts_in > 0)
- process_packets_in(pkts_in, num_pkts_in, fwd_tbl, stats);
+ ops.proc(pkts_in, num_pkts_in, fwd_tbl, stats);
- if (num_pkts_ips > 0)
- complete_ipsec_ops(pkts_ips, num_pkts_ips, fwd_tbl, stats);
+ if (ops.compl && num_pkts_ips > 0)
+ ops.compl(pkts_ips, num_pkts_ips, fwd_tbl, stats);
}
odp_barrier_wait(&config->prog_config->term_barrier);
- drain_events();
+ ops.drain(config->prog_config);
return 0;
}
@@ -1348,8 +1673,9 @@ static void teardown_test(const prog_config_t *config)
for (uint32_t i = 0U; i < config->num_sas; ++i)
(void)odp_ipsec_sa_disable(config->sas[i]);
- /* Drain SA status events. */
- wait_sas_disabled(config->num_sas);
+ if (!config->is_dir_rx)
+ /* Drain SA status events. */
+ wait_sas_disabled(config->num_sas);
for (uint32_t i = 0U; i < config->num_sas; ++i)
(void)odp_ipsec_sa_destroy(config->sas[i]);
@@ -1405,7 +1731,7 @@ int main(int argc, char **argv)
init_config(&config);
- if (odp_schedule_config(NULL) < 0) {
+ if (!config.is_dir_rx && odp_schedule_config(NULL) < 0) {
ODPH_ERR("Error configuring scheduler\n");
ret = EXIT_FAILURE;
goto out_test;
diff --git a/test/performance/odp_pool_perf.c b/test/performance/odp_pool_perf.c
index 957b1de00..4ae2cf7d3 100644
--- a/test/performance/odp_pool_perf.c
+++ b/test/performance/odp_pool_perf.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2018, Linaro Limited
- * Copyright (c) 2019-2021, Nokia
+ * Copyright (c) 2019-2022, Nokia
*
* All rights reserved.
*
@@ -16,6 +16,13 @@
#include <odp_api.h>
#include <odp/helper/odph_api.h>
+#define STAT_AVAILABLE 0x1
+#define STAT_CACHE 0x2
+#define STAT_THR_CACHE 0x4
+#define STAT_ALLOC_OPS 0x10
+#define STAT_FREE_OPS 0x20
+#define STAT_TOTAL_OPS 0x40
+
typedef struct test_options_t {
uint32_t num_cpu;
uint32_t num_event;
@@ -24,6 +31,7 @@ typedef struct test_options_t {
uint32_t num_burst;
uint32_t data_size;
uint32_t cache_size;
+ uint32_t stats_mode;
int pool_type;
} test_options_t;
@@ -61,6 +69,14 @@ static void print_usage(void)
" -b, --burst Maximum number of events per operation\n"
" -n, --num_burst Number of bursts allocated/freed back-to-back\n"
" -s, --data_size Data size in bytes\n"
+ " -S, --stats_mode Pool statistics usage. Enable counters with combination of these flags:\n"
+ " 0: no pool statistics (default)\n"
+ " 0x1: available\n"
+ " 0x2: cache_available\n"
+ " 0x4: thread_cache_available\n"
+ " 0x10: alloc_ops\n"
+ " 0x20: free_ops\n"
+ " 0x40: total_ops\n"
" -t, --pool_type 0: Buffer pool (default)\n"
" 1: Packet pool\n"
" -C, --cache_size Pool cache size (per thread)\n"
@@ -81,13 +97,14 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
{"burst", required_argument, NULL, 'b'},
{"num_burst", required_argument, NULL, 'n'},
{"data_size", required_argument, NULL, 's'},
+ {"stats_mode", required_argument, NULL, 'S'},
{"pool_type", required_argument, NULL, 't'},
{"cache_size", required_argument, NULL, 'C'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:e:r:b:n:s:t:C:h";
+ static const char *shortopts = "+c:e:r:b:n:s:S:t:C:h";
test_options->num_cpu = 1;
test_options->num_event = 1000;
@@ -95,6 +112,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
test_options->max_burst = 100;
test_options->num_burst = 1;
test_options->data_size = 64;
+ test_options->stats_mode = 0;
test_options->pool_type = 0;
test_options->cache_size = UINT32_MAX;
@@ -123,6 +141,9 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
case 's':
test_options->data_size = atoi(optarg);
break;
+ case 'S':
+ test_options->stats_mode = strtoul(optarg, NULL, 0);
+ break;
case 't':
test_options->pool_type = atoi(optarg);
break;
@@ -186,6 +207,7 @@ static int create_pool(test_global_t *global)
odp_pool_capability_t pool_capa;
odp_pool_param_t pool_param;
odp_pool_t pool;
+ odp_pool_stats_opt_t stats, stats_capa;
uint32_t max_num, max_size, min_cache_size, max_cache_size;
test_options_t *test_options = &global->test_options;
uint32_t num_event = test_options->num_event;
@@ -195,14 +217,30 @@ static int create_pool(test_global_t *global)
uint32_t num_cpu = test_options->num_cpu;
uint32_t data_size = test_options->data_size;
uint32_t cache_size = test_options->cache_size;
+ uint32_t stats_mode = test_options->stats_mode;
int packet_pool = test_options->pool_type;
+ stats.all = 0;
+
odp_pool_param_init(&pool_param);
if (cache_size == UINT32_MAX)
cache_size = packet_pool ? pool_param.pkt.cache_size :
pool_param.buf.cache_size;
+ if (stats_mode & STAT_AVAILABLE)
+ stats.bit.available = 1;
+ if (stats_mode & STAT_CACHE)
+ stats.bit.cache_available = 1;
+ if (stats_mode & STAT_THR_CACHE)
+ stats.bit.thread_cache_available = 1;
+ if (stats_mode & STAT_ALLOC_OPS)
+ stats.bit.alloc_ops = 1;
+ if (stats_mode & STAT_FREE_OPS)
+ stats.bit.free_ops = 1;
+ if (stats_mode & STAT_TOTAL_OPS)
+ stats.bit.total_ops = 1;
+
printf("\nPool performance test\n");
printf(" num cpu %u\n", num_cpu);
printf(" num rounds %u\n", num_round);
@@ -211,6 +249,7 @@ static int create_pool(test_global_t *global)
printf(" num bursts %u\n", num_burst);
printf(" data size %u\n", data_size);
printf(" cache size %u\n", cache_size);
+ printf(" stats mode 0x%x\n", stats_mode);
printf(" pool type %s\n\n", packet_pool ? "packet" : "buffer");
if (odp_pool_capability(&pool_capa)) {
@@ -223,11 +262,19 @@ static int create_pool(test_global_t *global)
max_size = pool_capa.pkt.max_len;
max_cache_size = pool_capa.pkt.max_cache_size;
min_cache_size = pool_capa.pkt.min_cache_size;
+ stats_capa = pool_capa.pkt.stats;
} else {
max_num = pool_capa.buf.max_num;
max_size = pool_capa.buf.max_size;
max_cache_size = pool_capa.buf.max_cache_size;
min_cache_size = pool_capa.buf.min_cache_size;
+ stats_capa = pool_capa.buf.stats;
+ }
+
+ if ((stats_capa.all & stats.all) != stats.all) {
+ printf("Error: requested statistics not supported (0x%" PRIx64 " / 0x%" PRIx64 ")\n",
+ stats.all, stats_capa.all);
+ return -1;
}
if (cache_size < min_cache_size) {
@@ -257,7 +304,6 @@ static int create_pool(test_global_t *global)
pool_param.pkt.max_num = num_event;
pool_param.pkt.max_len = data_size;
pool_param.pkt.cache_size = cache_size;
-
} else {
pool_param.type = ODP_POOL_BUFFER;
pool_param.buf.num = num_event;
@@ -265,6 +311,8 @@ static int create_pool(test_global_t *global)
pool_param.buf.cache_size = cache_size;
}
+ pool_param.stats.all = stats.all;
+
pool = odp_pool_create("pool perf", &pool_param);
if (pool == ODP_POOL_INVALID) {
@@ -472,6 +520,56 @@ static int start_workers(test_global_t *global, odp_instance_t instance)
return 0;
}
+static void test_stats_perf(test_global_t *global)
+{
+ odp_pool_stats_t stats;
+ odp_time_t t1, t2;
+ uint64_t nsec;
+ int i;
+ int num_thr = global->test_options.num_cpu + 1; /* workers + main thread */
+ odp_pool_t pool = global->pool;
+ double nsec_ave = 0.0;
+ const int rounds = 1000;
+
+ if (num_thr > ODP_POOL_MAX_THREAD_STATS)
+ num_thr = ODP_POOL_MAX_THREAD_STATS;
+
+ memset(&stats, 0, sizeof(odp_pool_stats_t));
+ stats.thread.first = 0;
+ stats.thread.last = num_thr - 1;
+
+ t1 = odp_time_local_strict();
+
+ for (i = 0; i < rounds; i++) {
+ if (odp_pool_stats(pool, &stats)) {
+ printf("Error: Stats request failed on round %i\n", i);
+ break;
+ }
+ }
+
+ t2 = odp_time_local_strict();
+ nsec = odp_time_diff_ns(t2, t1);
+
+ if (i > 0)
+ nsec_ave = (double)nsec / i;
+
+ printf("Pool statistics:\n");
+ printf(" odp_pool_stats() calls %i\n", i);
+ printf(" ave call latency %.2f nsec\n", nsec_ave);
+ printf(" num threads %i\n", num_thr);
+ printf(" alloc_ops %" PRIu64 "\n", stats.alloc_ops);
+ printf(" free_ops %" PRIu64 "\n", stats.free_ops);
+ printf(" total_ops %" PRIu64 "\n", stats.total_ops);
+ printf(" available %" PRIu64 "\n", stats.available);
+ printf(" cache_available %" PRIu64 "\n", stats.cache_available);
+ for (i = 0; i < num_thr; i++) {
+ printf(" thr[%2i] cache_available %" PRIu64 "\n",
+ i, stats.thread.cache_available[i]);
+ }
+
+ printf("\n");
+}
+
static void print_stat(test_global_t *global)
{
int i, num;
@@ -615,6 +713,9 @@ int main(int argc, char **argv)
/* Wait workers to exit */
odph_thread_join(global->thread_tbl, global->test_options.num_cpu);
+ if (global->test_options.stats_mode)
+ test_stats_perf(global);
+
print_stat(global);
if (odp_pool_destroy(global->pool)) {
diff --git a/test/performance/odp_sched_latency.c b/test/performance/odp_sched_latency.c
index d4cbfda19..c8dc74656 100644
--- a/test/performance/odp_sched_latency.c
+++ b/test/performance/odp_sched_latency.c
@@ -27,25 +27,8 @@
#define MAX_QUEUES 4096 /**< Maximum number of queues */
#define MAX_GROUPS 64
#define EVENT_POOL_SIZE (1024 * 1024) /**< Event pool size */
-#define TEST_ROUNDS 10 /**< Test rounds for each thread (millions) */
#define MAIN_THREAD 1 /**< Thread ID performing maintenance tasks */
-/* Default values for command line arguments */
-#define SAMPLE_EVENT_PER_PRIO 0 /**< Allocate a separate sample event for
- each priority */
-#define HI_PRIO_EVENTS 0 /**< Number of high priority events */
-#define LO_PRIO_EVENTS 32 /**< Number of low priority events */
-#define HI_PRIO_QUEUES 16 /**< Number of high priority queues */
-#define LO_PRIO_QUEUES 64 /**< Number of low priority queues */
-#define WARM_UP_ROUNDS 100 /**< Number of warm-up rounds */
-
-#define EVENTS_PER_HI_PRIO_QUEUE 0 /**< Alloc HI_PRIO_QUEUES x HI_PRIO_EVENTS
- events */
-#define EVENTS_PER_LO_PRIO_QUEUE 1 /**< Alloc LO_PRIO_QUEUES x LO_PRIO_EVENTS
- events */
-ODP_STATIC_ASSERT(HI_PRIO_QUEUES <= MAX_QUEUES, "Too many HI priority queues");
-ODP_STATIC_ASSERT(LO_PRIO_QUEUES <= MAX_QUEUES, "Too many LO priority queues");
-
#define CACHE_ALIGN_ROUNDUP(x)\
((ODP_CACHE_LINE_SIZE) * \
(((x) + ODP_CACHE_LINE_SIZE - 1) / (ODP_CACHE_LINE_SIZE)))
@@ -70,7 +53,7 @@ typedef enum {
/** Test event */
typedef struct {
- uint64_t ts; /**< Send timestamp */
+ odp_time_t time_stamp; /**< Send timestamp */
event_type_t type; /**< Message type */
int src_idx[NUM_PRIOS]; /**< Source ODP queue */
int prio; /**< Source queue priority */
@@ -89,6 +72,7 @@ typedef struct {
struct {
int queues; /**< Number of scheduling queues */
int events; /**< Number of events */
+ int sample_events;
odp_bool_t events_per_queue; /**< Allocate 'queues' x 'events'
test events */
} prio[NUM_PRIOS];
@@ -103,6 +87,7 @@ typedef struct {
uint64_t tot; /**< Total event latency. Sum of all events. */
uint64_t min; /**< Minimum event latency */
uint64_t max; /**< Maximum event latency */
+ uint64_t max_idx; /**< Index of the maximum latency sample event */
} test_stat_t;
/** Performance test statistics (per core) */
@@ -298,20 +283,24 @@ static void print_results(test_globals_t *globals)
else
printf(" LO_PRIO events: %i\n", args->prio[LO_PRIO].events);
+ printf(" LO_PRIO sample events: %i\n", args->prio[LO_PRIO].sample_events);
+
printf(" HI_PRIO queues: %i\n", args->prio[HI_PRIO].queues);
if (args->prio[HI_PRIO].events_per_queue)
printf(" HI_PRIO event per queue: %i\n\n",
args->prio[HI_PRIO].events);
else
- printf(" HI_PRIO events: %i\n\n", args->prio[HI_PRIO].events);
+ printf(" HI_PRIO events: %i\n", args->prio[HI_PRIO].events);
+
+ printf(" HI_PRIO sample events: %i\n\n", args->prio[HI_PRIO].sample_events);
for (i = 0; i < NUM_PRIOS; i++) {
memset(&total, 0, sizeof(test_stat_t));
total.min = UINT64_MAX;
printf("%s priority\n"
- "Thread Avg[ns] Min[ns] Max[ns] Samples Total\n"
- "---------------------------------------------------------------\n",
+ "Thread Avg[ns] Min[ns] Max[ns] Samples Total Max idx\n"
+ "-----------------------------------------------------------------------\n",
i == HI_PRIO ? "HIGH" : "LOW");
for (j = 1; j <= args->cpu_count; j++) {
lat = &globals->core_stat[j].prio[i];
@@ -331,11 +320,11 @@ static void print_results(test_globals_t *globals)
avg = lat->events ? lat->tot / lat->sample_events : 0;
printf("%-8d %-10" PRIu64 " %-10" PRIu64 " "
- "%-10" PRIu64 " %-10" PRIu64 " %-10" PRIu64 "\n",
+ "%-10" PRIu64 " %-10" PRIu64 " %-10" PRIu64 " %-10" PRIu64 "\n",
j, avg, lat->min, lat->max, lat->sample_events,
- lat->events);
+ lat->events, lat->max_idx);
}
- printf("---------------------------------------------------------------\n");
+ printf("-----------------------------------------------------------------------\n");
if (total.sample_events == 0) {
printf("Total N/A\n\n");
continue;
@@ -398,9 +387,9 @@ static int join_groups(test_globals_t *globals, int thr)
*/
static int test_schedule(int thr, test_globals_t *globals)
{
+ odp_time_t time;
odp_event_t ev;
odp_buffer_t buf;
- odp_queue_t src_queue;
odp_queue_t dst_queue;
uint64_t latency;
uint64_t i;
@@ -416,8 +405,12 @@ static int test_schedule(int thr, test_globals_t *globals)
change_queue = globals->args.forward_mode != EVENT_FORWARD_NONE ? 1 : 0;
+ odp_barrier_wait(&globals->barrier);
+
for (i = 0; i < test_rounds; i++) {
- ev = odp_schedule(&src_queue, ODP_SCHED_WAIT);
+ ev = odp_schedule(NULL, ODP_SCHED_WAIT);
+
+ time = odp_time_global_strict();
buf = odp_buffer_from_event(ev);
event = odp_buffer_addr(buf);
@@ -425,10 +418,12 @@ static int test_schedule(int thr, test_globals_t *globals)
stats = &globals->core_stat[thr].prio[event->prio];
if (event->type == SAMPLE) {
- latency = odp_time_to_ns(odp_time_global()) - event->ts;
+ latency = odp_time_to_ns(time) - odp_time_to_ns(event->time_stamp);
- if (latency > stats->max)
+ if (latency > stats->max) {
stats->max = latency;
+ stats->max_idx = stats->sample_events;
+ }
if (latency < stats->min)
stats->min = latency;
stats->tot += latency;
@@ -459,7 +454,7 @@ static int test_schedule(int thr, test_globals_t *globals)
dst_queue = globals->queue[event->prio][dst_idx];
if (event->type == SAMPLE)
- event->ts = odp_time_to_ns(odp_time_global());
+ event->time_stamp = odp_time_global_strict();
if (odp_queue_enq(dst_queue, ev)) {
ODPH_ERR("[%i] Queue enqueue failed.\n", thr);
@@ -472,6 +467,8 @@ static int test_schedule(int thr, test_globals_t *globals)
odp_schedule_pause();
while (1) {
+ odp_queue_t src_queue;
+
ev = odp_schedule(&src_queue, ODP_SCHED_NO_WAIT);
if (ev == ODP_EVENT_INVALID)
@@ -509,7 +506,6 @@ static int run_thread(void *arg ODP_UNUSED)
test_globals_t *globals;
test_args_t *args;
int thr;
- int sample_events = 0;
thr = odp_thread_id();
@@ -528,23 +524,18 @@ static int run_thread(void *arg ODP_UNUSED)
args = &globals->args;
if (enqueue_events(HI_PRIO, args->prio[HI_PRIO].queues,
- args->prio[HI_PRIO].events, 1,
+ args->prio[HI_PRIO].events, args->prio[HI_PRIO].sample_events,
!args->prio[HI_PRIO].events_per_queue,
globals))
return -1;
- if (!args->prio[HI_PRIO].queues || args->sample_per_prio)
- sample_events = 1;
-
if (enqueue_events(LO_PRIO, args->prio[LO_PRIO].queues,
- args->prio[LO_PRIO].events, sample_events,
+ args->prio[LO_PRIO].events, args->prio[LO_PRIO].sample_events,
!args->prio[LO_PRIO].events_per_queue,
globals))
return -1;
}
- odp_barrier_wait(&globals->barrier);
-
if (test_schedule(thr, globals))
return -1;
@@ -562,7 +553,7 @@ static void usage(void)
"Usage: ./odp_sched_latency [options]\n"
"Optional OPTIONS:\n"
" -c, --count <number> CPU count, 0=all available, default=1\n"
- " -d, --duration <number> Test duration in scheduling rounds (millions), default=%d, min=1\n"
+ " -d, --duration <number> Test duration in scheduling rounds (millions), default=10, min=1\n"
" -f, --forward-mode <mode> Selection of target queue\n"
" 0: Random (default)\n"
" 1: Incremental\n"
@@ -573,14 +564,16 @@ static void usage(void)
" -i, --isolate <mode> Select if shared or isolated groups are used. Ignored when num_group <= 0.\n"
" 0: All queues share groups (default)\n"
" 1: Separate groups for high and low priority queues. Creates 2xnum_group groups.\n"
- " -l, --lo-prio-queues <number> Number of low priority scheduled queues\n"
- " -t, --hi-prio-queues <number> Number of high priority scheduled queues\n"
- " -m, --lo-prio-events-per-queue <number> Number of events per low priority queue\n"
- " -n, --hi-prio-events-per-queue <number> Number of events per high priority queues\n"
- " -o, --lo-prio-events <number> Total number of low priority events (overrides the\n"
- " number of events per queue)\n"
- " -p, --hi-prio-events <number> Total number of high priority events (overrides the\n"
- " number of events per queue)\n"
+ " -l, --lo-prio-queues <number> Number of low priority scheduled queues (default=64)\n"
+ " -t, --hi-prio-queues <number> Number of high priority scheduled queues (default=16)\n"
+ " -m, --lo-prio-events-per-queue <number> Number of events per low priority queue (default=32).\n"
+ " Does not include sample event.\n"
+ " -n, --hi-prio-events-per-queue <number> Number of events per high priority queues (default=0)\n"
+ " Does not include sample event.\n"
+ " -o, --lo-prio-events <number> Total number of low priority events. Overrides the\n"
+ " number of events per queue, does not include sample event.\n"
+ " -p, --hi-prio-events <number> Total number of high priority events. Overrides the\n"
+ " number of events per queue, does not include sample event.\n"
" -r --sample-per-prio Allocate a separate sample event for each priority. By default\n"
" a single sample event is used and its priority is changed after\n"
" each processing round.\n"
@@ -588,9 +581,8 @@ static void usage(void)
" 0: ODP_SCHED_SYNC_PARALLEL (default)\n"
" 1: ODP_SCHED_SYNC_ATOMIC\n"
" 2: ODP_SCHED_SYNC_ORDERED\n"
- " -w, --warm-up <number> Number of warm-up rounds, default=%d, min=1\n"
- " -h, --help Display help and exit.\n\n"
- , TEST_ROUNDS, WARM_UP_ROUNDS);
+ " -w, --warm-up <number> Number of warm-up rounds, default=100, min=1\n"
+ " -h, --help Display help and exit.\n\n");
}
/**
@@ -631,16 +623,18 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
args->forward_mode = EVENT_FORWARD_RAND;
args->num_group = 0;
args->isolate = 0;
- args->test_rounds = TEST_ROUNDS;
- args->warm_up_rounds = WARM_UP_ROUNDS;
+ args->test_rounds = 10;
+ args->warm_up_rounds = 100;
args->sync_type = ODP_SCHED_SYNC_PARALLEL;
- args->sample_per_prio = SAMPLE_EVENT_PER_PRIO;
- args->prio[LO_PRIO].queues = LO_PRIO_QUEUES;
- args->prio[HI_PRIO].queues = HI_PRIO_QUEUES;
- args->prio[LO_PRIO].events = LO_PRIO_EVENTS;
- args->prio[HI_PRIO].events = HI_PRIO_EVENTS;
- args->prio[LO_PRIO].events_per_queue = EVENTS_PER_LO_PRIO_QUEUE;
- args->prio[HI_PRIO].events_per_queue = EVENTS_PER_HI_PRIO_QUEUE;
+ args->sample_per_prio = 0;
+ args->prio[LO_PRIO].queues = 64;
+ args->prio[HI_PRIO].queues = 16;
+ args->prio[LO_PRIO].events = 32;
+ args->prio[HI_PRIO].events = 0;
+ args->prio[LO_PRIO].events_per_queue = 1;
+ args->prio[HI_PRIO].events_per_queue = 0;
+ args->prio[LO_PRIO].sample_events = 0;
+ args->prio[HI_PRIO].sample_events = 1;
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -737,6 +731,9 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
ODPH_ERR("Too many groups. Max supported %i.\n", MAX_GROUPS);
exit(EXIT_FAILURE);
}
+
+ if (args->prio[HI_PRIO].queues == 0 || args->sample_per_prio)
+ args->prio[LO_PRIO].sample_events = 1;
}
static void randomize_queues(odp_queue_t queues[], uint32_t num, uint64_t *seed)
diff --git a/test/performance/odp_stress.c b/test/performance/odp_stress.c
new file mode 100644
index 000000000..15b44c113
--- /dev/null
+++ b/test/performance/odp_stress.c
@@ -0,0 +1,857 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+typedef struct test_options_t {
+ uint32_t num_cpu;
+ uint64_t period_ns;
+ uint64_t rounds;
+ uint64_t mem_size;
+ int mode;
+ int group_mode;
+
+} test_options_t;
+
+typedef struct test_stat_t {
+ uint64_t rounds;
+ uint64_t tot_nsec;
+ uint64_t work_nsec;
+
+} test_stat_t;
+
+typedef struct test_stat_sum_t {
+ uint64_t rounds;
+ uint64_t tot_nsec;
+ uint64_t work_nsec;
+
+} test_stat_sum_t;
+
+typedef struct thread_arg_t {
+ void *global;
+ int worker_idx;
+
+} thread_arg_t;
+
+typedef struct test_global_t {
+ test_options_t test_options;
+ odp_atomic_u32_t exit_test;
+ odp_barrier_t barrier;
+ odp_cpumask_t cpumask;
+ odp_timer_pool_t timer_pool;
+ odp_pool_t tmo_pool;
+ uint64_t period_ticks;
+ uint8_t *worker_mem;
+ odp_timer_t timer[ODP_THREAD_COUNT_MAX];
+ odp_queue_t tmo_queue[ODP_THREAD_COUNT_MAX];
+ odp_schedule_group_t group[ODP_THREAD_COUNT_MAX];
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ test_stat_t stat[ODP_THREAD_COUNT_MAX];
+ thread_arg_t thread_arg[ODP_THREAD_COUNT_MAX];
+ test_stat_sum_t stat_sum;
+
+} test_global_t;
+
+test_global_t *test_global;
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Stress test options:\n"
+ "\n"
+ " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs. Default: 1\n"
+ " -p, --period_ns Timeout period in nsec. Default: 1 sec\n"
+ " -r, --rounds Number of timeout rounds. Default: 10\n"
+ " -m, --mode Select test mode. Default: 1\n"
+ " 0: No stress, just wait for timeouts\n"
+ " 1: Memcpy\n"
+ " -s, --mem_size Memory size per worker in bytes. Default: 2048\n"
+ " -g, --group_mode Select schedule group mode: Default: 1\n"
+ " 0: Use GROUP_ALL group. Scheduler load balances timeout events.\n"
+ " 1: Create a group per CPU. Dedicated timeout event per CPU.\n"
+ " -h, --help This help\n"
+ "\n");
+}
+
+static int parse_options(int argc, char *argv[], test_options_t *test_options)
+{
+ int opt;
+ int long_index;
+ int ret = 0;
+
+ static const struct option longopts[] = {
+ {"num_cpu", required_argument, NULL, 'c'},
+ {"period_ns", required_argument, NULL, 'p'},
+ {"rounds", required_argument, NULL, 'r'},
+ {"mode", required_argument, NULL, 'm'},
+ {"mem_size", required_argument, NULL, 's'},
+ {"group_mode", required_argument, NULL, 'g'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "+c:p:r:m:s:g:h";
+
+ test_options->num_cpu = 1;
+ test_options->period_ns = 1000 * ODP_TIME_MSEC_IN_NS;
+ test_options->rounds = 10;
+ test_options->mode = 1;
+ test_options->mem_size = 2048;
+ test_options->group_mode = 1;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'c':
+ test_options->num_cpu = atoi(optarg);
+ break;
+ case 'p':
+ test_options->period_ns = atoll(optarg);
+ break;
+ case 'r':
+ test_options->rounds = atoll(optarg);
+ break;
+ case 'm':
+ test_options->mode = atoi(optarg);
+ break;
+ case 's':
+ test_options->mem_size = atoll(optarg);
+ break;
+ case 'g':
+ test_options->group_mode = atoi(optarg);
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (test_options->mode) {
+ if (test_options->mem_size < 2) {
+ ODPH_ERR("Too small memory size\n");
+ return -1;
+ }
+ }
+
+ return ret;
+}
+
+static int set_num_cpu(test_global_t *global)
+{
+ int ret;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+
+ /* One thread used for the main thread */
+ if (num_cpu < 0 || num_cpu > ODP_THREAD_COUNT_MAX - 1) {
+ ODPH_ERR("Bad number of workers. Maximum is %i.\n", ODP_THREAD_COUNT_MAX - 1);
+ return -1;
+ }
+
+ ret = odp_cpumask_default_worker(&global->cpumask, num_cpu);
+
+ if (num_cpu && ret != num_cpu) {
+ ODPH_ERR("Too many workers. Max supported %i\n.", ret);
+ return -1;
+ }
+
+ /* Zero: all available workers */
+ if (num_cpu == 0) {
+ num_cpu = ret;
+ test_options->num_cpu = num_cpu;
+ }
+
+ odp_barrier_init(&global->barrier, num_cpu + 1);
+
+ return 0;
+}
+
+static int join_group(test_global_t *global, int worker_idx, int thr)
+{
+ odp_thrmask_t thrmask;
+ odp_schedule_group_t group;
+
+ odp_thrmask_zero(&thrmask);
+ odp_thrmask_set(&thrmask, thr);
+ group = global->group[worker_idx];
+
+ if (odp_schedule_group_join(group, &thrmask)) {
+ ODPH_ERR("Thread %i failed to join group %i\n", thr, worker_idx);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int worker_thread(void *arg)
+{
+ int thr, timer_ret;
+ uint32_t exit_test;
+ odp_event_t ev;
+ odp_timeout_t tmo;
+ odp_timer_t timer;
+ uint64_t tot_nsec, work_sum, max_nsec;
+ odp_timer_start_t start_param;
+ odp_time_t t1, t2, max_time;
+ odp_time_t work_t1, work_t2;
+ uint8_t *src, *dst;
+ thread_arg_t *thread_arg = arg;
+ int worker_idx = thread_arg->worker_idx;
+ test_global_t *global = thread_arg->global;
+ test_options_t *test_options = &global->test_options;
+ int mode = test_options->mode;
+ uint64_t mem_size = test_options->mem_size;
+ uint64_t copy_size = mem_size / 2;
+ uint64_t rounds = 0;
+ int ret = 0;
+ uint32_t done = 0;
+ uint64_t wait = ODP_SCHED_WAIT;
+
+ thr = odp_thread_id();
+ max_nsec = 2 * test_options->rounds * test_options->period_ns;
+ max_time = odp_time_local_from_ns(max_nsec);
+ printf("Thread %i starting on CPU %i\n", thr, odp_cpu_id());
+
+ if (test_options->group_mode == 0) {
+ /* Timeout events are load balanced. Using this
+ * period to poll exit status. */
+ wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
+ } else {
+ if (join_group(global, worker_idx, thr)) {
+ /* Join failed, exit after barrier */
+ wait = ODP_SCHED_NO_WAIT;
+ done = 1;
+ }
+ }
+
+ if (mode) {
+ src = global->worker_mem + worker_idx * mem_size;
+ dst = src + copy_size;
+ }
+
+ start_param.tick_type = ODP_TIMER_TICK_REL;
+ start_param.tick = global->period_ticks;
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ work_sum = 0;
+ t1 = odp_time_local();
+ max_time = odp_time_sum(t1, max_time);
+
+ while (1) {
+ ev = odp_schedule(NULL, wait);
+
+ exit_test = odp_atomic_load_u32(&global->exit_test);
+ exit_test += done;
+
+ if (ev == ODP_EVENT_INVALID) {
+ odp_time_t cur_time = odp_time_local();
+
+ if (odp_time_cmp(cur_time, max_time) > 0)
+ exit_test += 1;
+
+ if (exit_test) {
+ /* Exit loop without schedule context */
+ break;
+ }
+
+ continue;
+ }
+
+ rounds++;
+
+ if (rounds < test_options->rounds) {
+ tmo = odp_timeout_from_event(ev);
+ timer = odp_timeout_timer(tmo);
+ start_param.tmo_ev = ev;
+
+ timer_ret = odp_timer_start(timer, &start_param);
+
+ if (timer_ret != ODP_TIMER_SUCCESS) {
+ ODPH_ERR("Timer start failed (%" PRIu64 ")\n", rounds);
+ done = 1;
+ }
+ } else {
+ done = 1;
+ }
+
+ /* Do work */
+ if (mode) {
+ work_t1 = odp_time_local();
+
+ memcpy(dst, src, copy_size);
+
+ work_t2 = odp_time_local();
+ work_sum += odp_time_diff_ns(work_t2, work_t1);
+ }
+
+ if (done) {
+ /* Stop timer and do not wait events */
+ wait = ODP_SCHED_NO_WAIT;
+ odp_event_free(ev);
+ }
+ }
+
+ t2 = odp_time_local();
+ tot_nsec = odp_time_diff_ns(t2, t1);
+
+ /* Update stats*/
+ global->stat[thr].rounds = rounds;
+ global->stat[thr].tot_nsec = tot_nsec;
+ global->stat[thr].work_nsec = work_sum;
+
+ return ret;
+}
+
+static int start_workers(test_global_t *global, odp_instance_t instance)
+{
+ odph_thread_common_param_t thr_common;
+ int i, ret;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ odph_thread_param_t thr_param[num_cpu];
+
+ memset(global->thread_tbl, 0, sizeof(global->thread_tbl));
+ odph_thread_common_param_init(&thr_common);
+
+ thr_common.instance = instance;
+ thr_common.cpumask = &global->cpumask;
+
+ for (i = 0; i < num_cpu; i++) {
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = worker_thread;
+ thr_param[i].arg = &global->thread_arg[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ }
+
+ ret = odph_thread_create(global->thread_tbl, &thr_common, thr_param, num_cpu);
+
+ if (ret != num_cpu) {
+ ODPH_ERR("Thread create failed %i\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int create_timers(test_global_t *global)
+{
+ odp_timer_capability_t timer_capa;
+ odp_timer_res_capability_t timer_res_capa;
+ odp_timer_pool_param_t timer_pool_param;
+ odp_timer_pool_t tp;
+ odp_pool_param_t pool_param;
+ odp_pool_t pool;
+ double duration;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+ uint64_t period_ns = test_options->period_ns;
+ uint64_t res_ns = period_ns / 1000;
+
+ if (odp_timer_capability(ODP_CLOCK_DEFAULT, &timer_capa)) {
+ ODPH_ERR("Timer capability failed\n");
+ return -1;
+ }
+
+ if (timer_capa.queue_type_sched == 0) {
+ ODPH_ERR("Timer does not support sched queues\n");
+ return -1;
+ }
+
+ memset(&timer_res_capa, 0, sizeof(odp_timer_res_capability_t));
+ timer_res_capa.max_tmo = 2 * period_ns;
+ if (odp_timer_res_capability(ODP_CLOCK_DEFAULT, &timer_res_capa)) {
+ ODPH_ERR("Timer resolution capability failed. Too long period.\n");
+ return -1;
+ }
+
+ if (res_ns < timer_res_capa.res_ns)
+ res_ns = timer_res_capa.res_ns;
+
+ duration = test_options->rounds * (double)period_ns / ODP_TIME_SEC_IN_NS;
+
+ printf(" num timers %u\n", num_cpu);
+ printf(" resolution %" PRIu64 " nsec\n", res_ns);
+ printf(" period %" PRIu64 " nsec\n", period_ns);
+ printf(" test duration %.2f sec\n", duration);
+ if (test_options->group_mode == 0)
+ printf(" force stop after %.2f sec\n", 2 * duration);
+ printf("\n");
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_TIMEOUT;
+ pool_param.tmo.num = num_cpu;
+
+ pool = odp_pool_create("Timeout pool", &pool_param);
+ global->tmo_pool = pool;
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Pool create failed\n");
+ return -1;
+ }
+
+ odp_timer_pool_param_init(&timer_pool_param);
+ timer_pool_param.res_ns = res_ns;
+ timer_pool_param.min_tmo = period_ns / 2;
+ timer_pool_param.max_tmo = 2 * period_ns;
+ timer_pool_param.num_timers = 2 * num_cpu; /* extra for stop events */
+ timer_pool_param.clk_src = ODP_CLOCK_DEFAULT;
+
+ tp = odp_timer_pool_create("Stress timers", &timer_pool_param);
+ global->timer_pool = tp;
+ if (tp == ODP_TIMER_POOL_INVALID) {
+ ODPH_ERR("Timer pool create failed\n");
+ return -1;
+ }
+
+ odp_timer_pool_start();
+
+ global->period_ticks = odp_timer_ns_to_tick(tp, period_ns);
+
+ return 0;
+}
+
+static int create_queues(test_global_t *global)
+{
+ odp_schedule_capability_t sched_capa;
+ odp_thrmask_t thrmask;
+ odp_queue_param_t queue_param;
+ uint32_t i;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+
+ if (odp_schedule_capability(&sched_capa)) {
+ ODPH_ERR("Schedule capability failed\n");
+ return -1;
+ }
+
+ if (test_options->group_mode) {
+ if ((sched_capa.max_groups - 1) < num_cpu) {
+ ODPH_ERR("Too many workers. Not enough schedule groups.\n");
+ return -1;
+ }
+
+ odp_thrmask_zero(&thrmask);
+
+ /* A group per worker thread */
+ for (i = 0; i < num_cpu; i++) {
+ global->group[i] = odp_schedule_group_create(NULL, &thrmask);
+
+ if (global->group[i] == ODP_SCHED_GROUP_INVALID) {
+ ODPH_ERR("Schedule group create failed (%u)\n", i);
+ return -1;
+ }
+ }
+ }
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ for (i = 0; i < num_cpu; i++) {
+ if (test_options->group_mode)
+ queue_param.sched.group = global->group[i];
+
+ global->tmo_queue[i] = odp_queue_create(NULL, &queue_param);
+
+ if (global->tmo_queue[i] == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Timeout dest queue create failed (%u)\n", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int start_timers(test_global_t *global)
+{
+ odp_timer_start_t start_param;
+ uint32_t i;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+ odp_timeout_t tmo[num_cpu];
+ odp_timer_t timer[num_cpu];
+
+ for (i = 0; i < num_cpu; i++) {
+ tmo[i] = odp_timeout_alloc(global->tmo_pool);
+
+ if (tmo[i] == ODP_TIMEOUT_INVALID) {
+ ODPH_ERR("Timeout alloc failed (%u)\n", i);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < num_cpu; i++) {
+ timer[i] = odp_timer_alloc(global->timer_pool, global->tmo_queue[i], NULL);
+
+ if (timer[i] == ODP_TIMER_INVALID) {
+ ODPH_ERR("Timer alloc failed (%u)\n", i);
+ return -1;
+ }
+
+ global->timer[i] = timer[i];
+ }
+
+ start_param.tick_type = ODP_TIMER_TICK_REL;
+ start_param.tick = global->period_ticks;
+
+ for (i = 0; i < num_cpu; i++) {
+ start_param.tmo_ev = odp_timeout_to_event(tmo[i]);
+
+ if (odp_timer_start(timer[i], &start_param) != ODP_TIMER_SUCCESS) {
+ ODPH_ERR("Timer start failed (%u)\n", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void destroy_timers(test_global_t *global)
+{
+ uint32_t i;
+ odp_event_t ev;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+
+ for (i = 0; i < num_cpu; i++) {
+ odp_timer_t timer = global->timer[i];
+
+ if (timer == ODP_TIMER_INVALID)
+ continue;
+
+ ev = odp_timer_free(timer);
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ }
+
+ if (global->timer_pool != ODP_TIMER_POOL_INVALID)
+ odp_timer_pool_destroy(global->timer_pool);
+
+ for (i = 0; i < num_cpu; i++) {
+ odp_queue_t queue = global->tmo_queue[i];
+
+ if (queue == ODP_QUEUE_INVALID)
+ continue;
+
+ if (odp_queue_destroy(queue))
+ ODPH_ERR("Queue destroy failed (%u)\n", i);
+ }
+
+ if (test_options->group_mode) {
+ for (i = 0; i < num_cpu; i++) {
+ odp_schedule_group_t group = global->group[i];
+
+ if (group == ODP_SCHED_GROUP_INVALID)
+ continue;
+
+ if (odp_schedule_group_destroy(group))
+ ODPH_ERR("Schedule group destroy failed (%u)\n", i);
+ }
+ }
+
+ if (global->tmo_pool != ODP_POOL_INVALID)
+ odp_pool_destroy(global->tmo_pool);
+}
+
+static void sig_handler(int signo)
+{
+ (void)signo;
+
+ if (test_global == NULL)
+ return;
+
+ odp_atomic_add_u32(&test_global->exit_test, 1);
+}
+
+static void stop_workers(test_global_t *global)
+{
+ uint32_t i;
+ odp_timeout_t tmo;
+ odp_event_t ev;
+ odp_queue_t queue;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+
+ odp_atomic_add_u32(&test_global->exit_test, 1);
+
+ for (i = 0; i < num_cpu; i++) {
+ queue = global->tmo_queue[i];
+ if (queue == ODP_QUEUE_INVALID)
+ continue;
+
+ tmo = odp_timeout_alloc(global->tmo_pool);
+
+ if (tmo == ODP_TIMEOUT_INVALID)
+ continue;
+
+ ev = odp_timeout_to_event(tmo);
+ if (odp_queue_enq(queue, ev)) {
+ ODPH_ERR("Enqueue failed %u\n", i);
+ odp_event_free(ev);
+ }
+ }
+}
+
+static void sum_stat(test_global_t *global)
+{
+ uint32_t i;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+ test_stat_sum_t *sum = &global->stat_sum;
+
+ memset(sum, 0, sizeof(test_stat_sum_t));
+
+ for (i = 1; i < num_cpu + 1 ; i++) {
+ sum->rounds += global->stat[i].rounds;
+ sum->tot_nsec += global->stat[i].tot_nsec;
+ sum->work_nsec += global->stat[i].work_nsec;
+ }
+}
+
+static void print_stat(test_global_t *global)
+{
+ uint32_t i;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+ int mode = test_options->mode;
+ test_stat_sum_t *sum = &global->stat_sum;
+ double sec_ave, work_ave, perc;
+ double round_ave = 0.0;
+ double copy_ave = 0.0;
+ double copy_tot = 0.0;
+ double cpu_load = 0.0;
+ const double mega = 1000000.0;
+ const double giga = 1000000000.0;
+ uint32_t num = 0;
+
+ if (num_cpu == 0)
+ return;
+
+ sec_ave = (sum->tot_nsec / giga) / num_cpu;
+ work_ave = (sum->work_nsec / giga) / num_cpu;
+
+ printf("\n");
+ printf("CPU load from work (percent) per thread:\n");
+ printf("----------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 1; i < num_cpu + 1; i++) {
+ if (global->stat[i].tot_nsec == 0)
+ continue;
+
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ perc = 100.0 * ((double)global->stat[i].work_nsec) / global->stat[i].tot_nsec;
+
+ printf("%6.2f ", perc);
+ num++;
+ }
+
+ if (sec_ave != 0.0) {
+ round_ave = (double)sum->rounds / num_cpu;
+ cpu_load = 100.0 * (work_ave / sec_ave);
+
+ if (mode) {
+ uint64_t copy_bytes = sum->rounds * test_options->mem_size / 2;
+
+ copy_ave = copy_bytes / (sum->work_nsec / giga);
+ copy_tot = copy_ave * num_cpu;
+ }
+ }
+
+ printf("\n\n");
+ printf("TOTAL (%i workers)\n", num_cpu);
+ printf(" ave time: %.2f sec\n", sec_ave);
+ printf(" ave work: %.2f sec\n", work_ave);
+ printf(" ave CPU load: %.2f\n", cpu_load);
+ printf(" ave rounds per sec: %.2f\n", round_ave / sec_ave);
+ printf(" ave copy speed: %.2f MB/sec\n", copy_ave / mega);
+ printf(" total copy speed: %.2f MB/sec\n", copy_tot / mega);
+ printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t helper_options;
+ odp_instance_t instance;
+ odp_init_t init;
+ odp_shm_t shm, shm_global;
+ odp_schedule_config_t sched_config;
+ test_global_t *global;
+ test_options_t *test_options;
+ int i, mode;
+ uint32_t num_cpu;
+ uint64_t mem_size;
+ odp_shm_t shm_work = ODP_SHM_INVALID;
+
+ signal(SIGINT, sig_handler);
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init);
+ init.mem_model = helper_options.mem_model;
+
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("Global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ shm = odp_shm_reserve("Stress global", sizeof(test_global_t), ODP_CACHE_LINE_SIZE, 0);
+ shm_global = shm;
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("SHM reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ global = odp_shm_addr(shm);
+ if (global == NULL) {
+ ODPH_ERR("SHM addr failed\n");
+ exit(EXIT_FAILURE);
+ }
+ test_global = global;
+
+ memset(global, 0, sizeof(test_global_t));
+ odp_atomic_init_u32(&global->exit_test, 0);
+
+ global->timer_pool = ODP_TIMER_POOL_INVALID;
+ global->tmo_pool = ODP_POOL_INVALID;
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ global->timer[i] = ODP_TIMER_INVALID;
+ global->tmo_queue[i] = ODP_QUEUE_INVALID;
+ global->group[i] = ODP_SCHED_GROUP_INVALID;
+
+ global->thread_arg[i].global = global;
+ global->thread_arg[i].worker_idx = i;
+ }
+
+ if (parse_options(argc, argv, &global->test_options))
+ exit(EXIT_FAILURE);
+
+ test_options = &global->test_options;
+ mode = test_options->mode;
+
+ odp_sys_info_print();
+
+ odp_schedule_config_init(&sched_config);
+ sched_config.sched_group.all = 1;
+ sched_config.sched_group.control = 0;
+ sched_config.sched_group.worker = 0;
+
+ odp_schedule_config(&sched_config);
+
+ if (set_num_cpu(global))
+ exit(EXIT_FAILURE);
+
+ num_cpu = test_options->num_cpu;
+
+ /* Memory for workers */
+ if (mode) {
+ mem_size = test_options->mem_size * num_cpu;
+
+ shm = odp_shm_reserve("Test memory", mem_size, ODP_CACHE_LINE_SIZE, 0);
+ shm_work = shm;
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("SHM reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ global->worker_mem = odp_shm_addr(shm);
+ if (global->worker_mem == NULL) {
+ ODPH_ERR("SHM addr failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(global->worker_mem, 0, mem_size);
+ }
+
+ printf("\n");
+ printf("Test parameters\n");
+ printf(" num workers %u\n", num_cpu);
+ printf(" mode %i\n", mode);
+ printf(" group mode %i\n", test_options->group_mode);
+ printf(" mem size per worker %" PRIu64 " bytes\n", test_options->mem_size);
+
+ if (create_timers(global))
+ exit(EXIT_FAILURE);
+
+ if (create_queues(global))
+ exit(EXIT_FAILURE);
+
+ /* Start worker threads */
+ start_workers(global, instance);
+
+ /* Wait until all workers are ready */
+ odp_barrier_wait(&global->barrier);
+
+ if (start_timers(global)) {
+ /* Stop all workers, if some timer did not start */
+ ODPH_ERR("Timers did not start. Stopping workers.\n");
+ stop_workers(global);
+ }
+
+ /* Wait workers to exit */
+ odph_thread_join(global->thread_tbl, num_cpu);
+
+ sum_stat(global);
+
+ print_stat(global);
+
+ destroy_timers(global);
+
+ if (mode) {
+ if (odp_shm_free(shm_work)) {
+ ODPH_ERR("SHM free failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (odp_shm_free(shm_global)) {
+ ODPH_ERR("SHM free failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Term local failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Term global failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
diff --git a/test/validation/api/atomic/atomic.c b/test/validation/api/atomic/atomic.c
index d4329bc6b..cbdd219fc 100644
--- a/test/validation/api/atomic/atomic.c
+++ b/test/validation/api/atomic/atomic.c
@@ -8,7 +8,6 @@
#include <malloc.h>
#include <odp_api.h>
#include <odp/helper/odph_api.h>
-#include <CUnit/Basic.h>
#include <odp_cunit_common.h>
#include <unistd.h>
diff --git a/test/validation/api/barrier/barrier.c b/test/validation/api/barrier/barrier.c
index ce52fd2d7..e0665b7cf 100644
--- a/test/validation/api/barrier/barrier.c
+++ b/test/validation/api/barrier/barrier.c
@@ -8,7 +8,6 @@
#include <malloc.h>
#include <odp_api.h>
#include <odp/helper/odph_api.h>
-#include <CUnit/Basic.h>
#include <odp_cunit_common.h>
#include <unistd.h>
diff --git a/test/validation/api/buffer/buffer.c b/test/validation/api/buffer/buffer.c
index c3484e14a..2d9632342 100644
--- a/test/validation/api/buffer/buffer.c
+++ b/test/validation/api/buffer/buffer.c
@@ -75,6 +75,8 @@ static void test_pool_alloc_free(const odp_pool_param_t *param)
odp_pool_print(pool);
for (i = 0; i < num; i++) {
+ odp_buffer_t buf;
+
buffer[i] = odp_buffer_alloc(pool);
if (buffer[i] == ODP_BUFFER_INVALID)
@@ -88,6 +90,10 @@ static void test_pool_alloc_free(const odp_pool_param_t *param)
ev = odp_buffer_to_event(buffer[i]);
CU_ASSERT(odp_buffer_from_event(ev) == buffer[i]);
+ odp_buffer_to_event_multi(&buffer[i], &ev, 1);
+ odp_buffer_from_event_multi(&buf, &ev, 1);
+ CU_ASSERT(buf == buffer[i]);
+
if (odp_event_type(ev) != ODP_EVENT_BUFFER)
wrong_type = true;
if (odp_event_subtype(ev) != ODP_EVENT_NO_SUBTYPE)
@@ -147,12 +153,20 @@ static void test_pool_alloc_free_multi(const odp_pool_param_t *param)
ret = 0;
for (i = 0; i < num; i += ret) {
+ odp_buffer_t buf[BURST];
+ odp_event_t event[BURST];
+
ret = odp_buffer_alloc_multi(pool, &buffer[i], BURST);
CU_ASSERT(ret >= 0);
CU_ASSERT(ret <= BURST);
if (ret <= 0)
break;
+
+ odp_buffer_to_event_multi(&buffer[i], event, ret);
+ odp_buffer_from_event_multi(buf, event, ret);
+ for (int j = 0; j < ret; j++)
+ CU_ASSERT(buf[j] == buffer[i + j]);
}
num_buf = i;
diff --git a/test/validation/api/classification/odp_classification_basic.c b/test/validation/api/classification/odp_classification_basic.c
index f914ea2ec..f7aa96e52 100644
--- a/test/validation/api/classification/odp_classification_basic.c
+++ b/test/validation/api/classification/odp_classification_basic.c
@@ -53,7 +53,9 @@ static void classification_test_create_cos(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
cos = odp_cls_cos_create(NULL, &cls_param);
CU_ASSERT(odp_cos_to_u64(cos) != odp_cos_to_u64(ODP_COS_INVALID));
@@ -124,7 +126,9 @@ static void classification_test_destroy_cos(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
cos = odp_cls_cos_create(name, &cls_param);
CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
@@ -172,7 +176,9 @@ static void classification_test_create_pmr_match(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
cos = odp_cls_cos_create("pmr_match", &cls_param);
CU_ASSERT(cos != ODP_COS_INVALID);
@@ -229,7 +235,9 @@ static void classification_test_cos_set_queue(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
cos_queue = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos_queue != ODP_COS_INVALID);
@@ -271,7 +279,9 @@ static void classification_test_cos_set_pool(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
cos = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
@@ -289,6 +299,8 @@ static void classification_test_cos_set_pool(void)
odp_pool_destroy(cos_pool);
}
+#if ODP_DEPRECATED_API
+
static void classification_test_cos_set_drop(void)
{
int retval;
@@ -324,6 +336,8 @@ static void classification_test_cos_set_drop(void)
odp_queue_destroy(queue);
}
+#endif
+
static void classification_test_pmr_composite_create(void)
{
odp_pmr_t pmr_composite;
@@ -360,7 +374,9 @@ static void classification_test_pmr_composite_create(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
cos = odp_cls_cos_create("pmr_match", &cls_param);
CU_ASSERT(cos != ODP_COS_INVALID);
@@ -446,7 +462,9 @@ odp_testinfo_t classification_suite_basic[] = {
ODP_TEST_INFO(classification_test_destroy_cos),
ODP_TEST_INFO(classification_test_create_pmr_match),
ODP_TEST_INFO(classification_test_cos_set_queue),
+#if ODP_DEPRECATED_API
ODP_TEST_INFO(classification_test_cos_set_drop),
+#endif
ODP_TEST_INFO(classification_test_cos_set_pool),
ODP_TEST_INFO(classification_test_pmr_composite_create),
ODP_TEST_INFO_CONDITIONAL(classification_test_create_cos_with_hash_queues,
diff --git a/test/validation/api/classification/odp_classification_test_pmr.c b/test/validation/api/classification/odp_classification_test_pmr.c
index 280e525af..0b29783c0 100644
--- a/test/validation/api/classification/odp_classification_test_pmr.c
+++ b/test/validation/api/classification/odp_classification_test_pmr.c
@@ -77,7 +77,9 @@ void configure_default_cos(odp_pktio_t pktio, odp_cos_t *cos,
odp_cls_cos_param_init(&cls_param);
cls_param.pool = default_pool;
cls_param.queue = default_queue;
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
default_cos = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT(default_cos != ODP_COS_INVALID);
@@ -151,7 +153,9 @@ static void classification_test_pktin_classifier_flag(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
cos = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT(cos != ODP_COS_INVALID);
@@ -245,7 +249,9 @@ static void _classification_test_pmr_term_tcp_dport(int num_pkt)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
cos = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT(cos != ODP_COS_INVALID);
@@ -418,7 +424,9 @@ static void test_pmr(const odp_pmr_param_t *pmr_param, odp_packet_t pkt,
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
cos = odp_cls_cos_create("PMR test cos", &cls_param);
CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
@@ -689,7 +697,9 @@ static void classification_test_pmr_term_dmac(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
cos = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
@@ -1027,7 +1037,9 @@ static void classification_test_pmr_pool_set(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
cos = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
@@ -1126,7 +1138,9 @@ static void classification_test_pmr_queue_set(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
cos = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
@@ -1427,7 +1441,9 @@ static void test_pmr_series(const int num_udp, int marking)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue_ip;
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
cos_ip = odp_cls_cos_create("cos_ip", &cls_param);
CU_ASSERT_FATAL(cos_ip != ODP_COS_INVALID);
@@ -1469,7 +1485,9 @@ static void test_pmr_series(const int num_udp, int marking)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue_udp[i];
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
cos_udp[i] = odp_cls_cos_create(name, &cls_param);
CU_ASSERT_FATAL(cos_udp[i] != ODP_COS_INVALID);
diff --git a/test/validation/api/classification/odp_classification_tests.c b/test/validation/api/classification/odp_classification_tests.c
index 4511fc1d7..962885c06 100644
--- a/test/validation/api/classification/odp_classification_tests.c
+++ b/test/validation/api/classification/odp_classification_tests.c
@@ -19,7 +19,10 @@ static odp_pool_t pool_default;
static odp_pktio_t pktio_loop;
static odp_pktio_capability_t pktio_capa;
static odp_cls_testcase_u tc;
+
+#ifdef ODP_DEPRECATED
static int global_num_l2_qos;
+#endif
#define NUM_COS_PMR_CHAIN 2
#define NUM_COS_DEFAULT 1
@@ -259,7 +262,9 @@ void configure_cls_pmr_chain(odp_bool_t enable_pktv)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_PMR_CHAIN_SRC];
cls_param.queue = queue_list[CLS_PMR_CHAIN_SRC];
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
if (enable_pktv) {
cls_param.vector.enable = true;
@@ -289,7 +294,9 @@ void configure_cls_pmr_chain(odp_bool_t enable_pktv)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_PMR_CHAIN_DST];
cls_param.queue = queue_list[CLS_PMR_CHAIN_DST];
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
if (enable_pktv) {
cls_param.vector.enable = true;
@@ -409,7 +416,9 @@ void configure_pktio_default_cos(odp_bool_t enable_pktv)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_DEFAULT];
cls_param.queue = queue_list[CLS_DEFAULT];
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
if (enable_pktv) {
cls_param.vector.enable = true;
@@ -623,7 +632,9 @@ void configure_pktio_error_cos(odp_bool_t enable_pktv)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_ERROR];
cls_param.queue = queue_list[CLS_ERROR];
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
if (enable_pktv) {
cls_param.vector.enable = true;
@@ -698,6 +709,8 @@ static void classification_test_pktio_set_headroom(void)
CU_ASSERT(retval < 0);
}
+#ifdef ODP_DEPRECATED
+
void configure_cos_with_l2_priority(odp_bool_t enable_pktv)
{
uint8_t num_qos = CLS_L2_QOS_MAX;
@@ -798,6 +811,8 @@ void test_cos_with_l2_priority(odp_bool_t enable_pktv)
}
}
+#endif
+
void configure_pmr_cos(odp_bool_t enable_pktv)
{
uint16_t val;
@@ -827,7 +842,9 @@ void configure_pmr_cos(odp_bool_t enable_pktv)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_PMR];
cls_param.queue = queue_list[CLS_PMR];
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
if (enable_pktv) {
cls_param.vector.enable = true;
@@ -910,7 +927,9 @@ void configure_pktio_pmr_composite(odp_bool_t enable_pktv)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_PMR_SET];
cls_param.queue = queue_list[CLS_PMR_SET];
+#if ODP_DEPRECATED_API
cls_param.drop_policy = ODP_COS_DROP_POOL;
+#endif
if (enable_pktv) {
cls_param.vector.enable = true;
@@ -1010,11 +1029,13 @@ static void classification_test_pktio_configure_common(odp_bool_t enable_pktv)
tc.pmr_chain = 1;
num_cos -= NUM_COS_PMR_CHAIN;
}
+#ifdef ODP_DEPRECATED
if (num_cos >= NUM_COS_L2_PRIO && TEST_L2_QOS) {
configure_cos_with_l2_priority(enable_pktv);
tc.l2_priority = 1;
num_cos -= NUM_COS_L2_PRIO;
}
+#endif
if (num_cos >= NUM_COS_PMR && TEST_PMR) {
configure_pmr_cos(enable_pktv);
tc.pmr_cos = 1;
@@ -1049,8 +1070,10 @@ static void classification_test_pktio_test_common(odp_bool_t enable_pktv)
test_pktio_error_cos(enable_pktv);
if (tc.pmr_chain && TEST_PMR_CHAIN)
test_cls_pmr_chain(enable_pktv);
+#ifdef ODP_DEPRECATED
if (tc.l2_priority && TEST_L2_QOS)
test_cos_with_l2_priority(enable_pktv);
+#endif
if (tc.pmr_cos && TEST_PMR)
test_pmr_cos(enable_pktv);
if (tc.pmr_composite_cos && TEST_PMR_SET)
diff --git a/test/validation/api/classification/odp_classification_testsuites.h b/test/validation/api/classification/odp_classification_testsuites.h
index 8fa43099d..06e98d4cb 100644
--- a/test/validation/api/classification/odp_classification_testsuites.h
+++ b/test/validation/api/classification/odp_classification_testsuites.h
@@ -40,7 +40,9 @@ typedef union odp_cls_testcase {
uint32_t drop_cos:1;
uint32_t error_cos:1;
uint32_t pmr_chain:1;
+#ifdef ODP_DEPRECATED
uint32_t l2_priority:1;
+#endif
uint32_t pmr_cos:1;
uint32_t pmr_composite_cos:1;
};
diff --git a/test/validation/api/crypto/odp_crypto_test_inp.c b/test/validation/api/crypto/odp_crypto_test_inp.c
index 150a470ec..10f1b5ee2 100644
--- a/test/validation/api/crypto/odp_crypto_test_inp.c
+++ b/test/validation/api/crypto/odp_crypto_test_inp.c
@@ -7,7 +7,6 @@
#include <odp_api.h>
#include <odp/helper/odph_api.h>
-#include <CUnit/Basic.h>
#include <odp_cunit_common.h>
#include "test_vectors.h"
diff --git a/test/validation/api/ipsec/ipsec.c b/test/validation/api/ipsec/ipsec.c
index c98afae4e..11860bf9b 100644
--- a/test/validation/api/ipsec/ipsec.c
+++ b/test/validation/api/ipsec/ipsec.c
@@ -16,11 +16,24 @@
#include "test_vectors.h"
#include "reass_test_vectors.h"
+#define EVENT_BUFFER_SIZE 3
+
+struct buffered_event_s {
+ odp_queue_t from;
+ odp_event_t event;
+};
+
+static struct buffered_event_s sched_ev_buffer[EVENT_BUFFER_SIZE];
struct suite_context_s suite_context;
static odp_ipsec_capability_t capa;
+static int sched_ev_buffer_tail;
+odp_bool_t sa_expiry_notified;
+
#define PKT_POOL_NUM 64
#define EVENT_WAIT_TIME ODP_TIME_SEC_IN_NS
+#define STATUS_EVENT_WAIT_TIME ODP_TIME_MSEC_IN_NS
+#define SCHED_EVENT_RETRY_COUNT 2
#define PACKET_USER_PTR ((void *)0x1212fefe)
#define IPSEC_SA_CTX ((void *)0xfefefafa)
@@ -101,11 +114,68 @@ static int pktio_start(odp_pktio_t pktio, odp_bool_t in, odp_bool_t out)
return 1;
}
-static odp_event_t sched_queue_deq(uint64_t wait_ns)
+static int sched_event_buffer_add(odp_queue_t from, odp_event_t event)
+{
+ if (sched_ev_buffer_tail + 1 == EVENT_BUFFER_SIZE)
+ return -ENOMEM;
+
+ sched_ev_buffer[sched_ev_buffer_tail].from = from;
+ sched_ev_buffer[sched_ev_buffer_tail].event = event;
+ sched_ev_buffer_tail++;
+
+ return 0;
+}
+
+static odp_event_t sched_event_buffer_get(odp_queue_t from)
+{
+ odp_event_t ev;
+ int i, j;
+
+ if (odp_queue_type(from) == ODP_QUEUE_TYPE_PLAIN)
+ return ODP_EVENT_INVALID;
+
+ /* Look for a matching entry */
+ for (i = 0; i < sched_ev_buffer_tail; i++)
+ if (sched_ev_buffer[i].from == from)
+ break;
+
+ /* Remove entry from buffer */
+ if (i != sched_ev_buffer_tail) {
+ ev = sched_ev_buffer[i].event;
+
+ for (j = 1; i + j < sched_ev_buffer_tail; j++)
+ sched_ev_buffer[i + j - 1] = sched_ev_buffer[i + j];
+
+ sched_ev_buffer_tail--;
+ } else {
+ ev = ODP_EVENT_INVALID;
+ }
+
+ return ev;
+}
+
+static odp_event_t sched_queue_deq(odp_queue_t queue, uint64_t wait_ns)
{
uint64_t wait = odp_schedule_wait_time(wait_ns);
+ odp_event_t ev = ODP_EVENT_INVALID;
+ odp_queue_t from;
+ int retry = 0;
- return odp_schedule(NULL, wait);
+ /* Check if buffered events are available */
+ ev = sched_event_buffer_get(queue);
+ if (ODP_EVENT_INVALID != ev)
+ return ev;
+
+ do {
+ ev = odp_schedule(&from, wait);
+
+ if ((ev != ODP_EVENT_INVALID) && (from != queue)) {
+ CU_ASSERT_FATAL(0 == sched_event_buffer_add(from, ev));
+ ev = ODP_EVENT_INVALID;
+ }
+ } while (ev == ODP_EVENT_INVALID && (++retry < SCHED_EVENT_RETRY_COUNT));
+
+ return ev;
}
static odp_event_t plain_queue_deq(odp_queue_t queue, uint64_t wait_ns)
@@ -131,7 +201,7 @@ static odp_event_t recv_event(odp_queue_t queue, uint64_t wait_ns)
if (odp_queue_type(queue) == ODP_QUEUE_TYPE_PLAIN)
event = plain_queue_deq(queue, wait_ns);
else
- event = sched_queue_deq(wait_ns);
+ event = sched_queue_deq(queue, wait_ns);
return event;
}
@@ -370,6 +440,50 @@ void ipsec_sa_param_fill(odp_ipsec_sa_param_t *param,
param->lifetime.hard_limit.packets = 10000 * 1000;
}
+static void ipsec_status_event_handle(odp_event_t ev_status,
+ odp_ipsec_sa_t sa,
+ enum ipsec_test_sa_expiry sa_expiry)
+{
+ odp_ipsec_status_t status = {
+ .id = 0,
+ .sa = ODP_IPSEC_SA_INVALID,
+ .result = 0,
+ .warn.all = 0,
+ };
+
+ CU_ASSERT_FATAL(ODP_EVENT_INVALID != ev_status);
+ CU_ASSERT_EQUAL(1, odp_event_is_valid(ev_status));
+ CU_ASSERT_EQUAL_FATAL(ODP_EVENT_IPSEC_STATUS, odp_event_type(ev_status));
+
+ CU_ASSERT_EQUAL(0, odp_ipsec_status(&status, ev_status));
+ CU_ASSERT_EQUAL(ODP_IPSEC_STATUS_WARN, status.id);
+ CU_ASSERT_EQUAL(sa, status.sa);
+ CU_ASSERT_EQUAL(0, status.result);
+
+ if (IPSEC_TEST_EXPIRY_IGNORED != sa_expiry) {
+ if (IPSEC_TEST_EXPIRY_SOFT_PKT == sa_expiry) {
+ CU_ASSERT_EQUAL(1, status.warn.soft_exp_packets);
+ sa_expiry_notified = true;
+ } else if (IPSEC_TEST_EXPIRY_SOFT_BYTE == sa_expiry) {
+ CU_ASSERT_EQUAL(1, status.warn.soft_exp_bytes);
+ sa_expiry_notified = true;
+ }
+ }
+
+ odp_event_free(ev_status);
+}
+
+void ipsec_status_event_get(odp_ipsec_sa_t sa,
+ enum ipsec_test_sa_expiry sa_expiry)
+{
+ uint64_t wait_time = (sa_expiry == IPSEC_TEST_EXPIRY_IGNORED) ? 0 : STATUS_EVENT_WAIT_TIME;
+ odp_event_t ev;
+
+ ev = recv_event(suite_context.queue, wait_time);
+ if (ODP_EVENT_INVALID != ev)
+ ipsec_status_event_handle(ev, sa, sa_expiry);
+}
+
void ipsec_sa_destroy(odp_ipsec_sa_t sa)
{
odp_event_t event;
@@ -698,6 +812,45 @@ static int ipsec_process_in(const ipsec_test_part *part,
return num_out;
}
+static int ipsec_check_sa_expiry(enum ipsec_test_sa_expiry sa_expiry,
+ odp_ipsec_packet_result_t *result)
+{
+ if (sa_expiry == IPSEC_TEST_EXPIRY_IGNORED)
+ return 0;
+
+ if (!sa_expiry_notified) {
+ if (sa_expiry == IPSEC_TEST_EXPIRY_SOFT_PKT) {
+ if (result->status.warn.soft_exp_packets)
+ sa_expiry_notified = true;
+ } else if (sa_expiry == IPSEC_TEST_EXPIRY_SOFT_BYTE) {
+ if (result->status.warn.soft_exp_bytes)
+ sa_expiry_notified = true;
+ } else if (sa_expiry == IPSEC_TEST_EXPIRY_HARD_PKT) {
+ if (result->status.error.hard_exp_packets)
+ sa_expiry_notified = true;
+
+ return -1;
+ } else if (sa_expiry == IPSEC_TEST_EXPIRY_HARD_BYTE) {
+ if (result->status.error.hard_exp_bytes)
+ sa_expiry_notified = true;
+
+ return -1;
+ }
+ } else {
+ if (sa_expiry == IPSEC_TEST_EXPIRY_HARD_PKT) {
+ CU_ASSERT(result->status.error.hard_exp_packets);
+
+ return -1;
+ } else if (sa_expiry == IPSEC_TEST_EXPIRY_HARD_BYTE) {
+ CU_ASSERT(result->status.error.hard_exp_bytes);
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
static int ipsec_send_out_one(const ipsec_test_part *part,
odp_ipsec_sa_t sa,
odp_packet_t *pkto)
@@ -824,18 +977,39 @@ static int ipsec_send_out_one(const ipsec_test_part *part,
pkto[i] = odp_packet_from_event(ev);
CU_ASSERT_FATAL(pkto[i] != ODP_PACKET_INVALID);
+
+ if (part->out[i].sa_expiry != IPSEC_TEST_EXPIRY_NONE)
+ ipsec_status_event_get(sa, part->out[i].sa_expiry);
+
i++;
continue;
}
ev = recv_event(suite_context.queue, 0);
if (ODP_EVENT_INVALID != ev) {
+ odp_event_type_t ev_type;
+
CU_ASSERT(odp_event_is_valid(ev) == 1);
+ ev_type = odp_event_types(ev, &subtype);
+
+ if ((ODP_EVENT_IPSEC_STATUS == ev_type) &&
+ part->out[i].sa_expiry != IPSEC_TEST_EXPIRY_NONE) {
+ ipsec_status_event_handle(ev, sa, part->out[i].sa_expiry);
+ continue;
+ }
+
CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
- odp_event_types(ev, &subtype));
+ ev_type);
CU_ASSERT_EQUAL(ODP_EVENT_PACKET_IPSEC,
subtype);
- CU_ASSERT(part->out[i].status.error.all);
+
+ /* In the case of SA hard expiry tests, hard expiry error bits are
+ * expected to be set. The exact error bits expected to be set based
+ * on sa_expiry is checked eventually in ipsec_check_sa_expiry()
+ * from the caller of this function.
+ */
+ if (part->out[i].sa_expiry == IPSEC_TEST_EXPIRY_NONE)
+ CU_ASSERT(part->out[i].status.error.all);
pkto[i] = odp_ipsec_packet_from_event(ev);
CU_ASSERT_FATAL(pkto[i] != ODP_PACKET_INVALID);
@@ -924,6 +1098,8 @@ static void verify_in(const ipsec_test_part *part,
ODP_IPSEC_OP_MODE_INLINE,
result.flag.inline_mode);
CU_ASSERT_EQUAL(sa, result.sa);
+ CU_ASSERT_EQUAL(part->out[i].status.warn.all,
+ result.status.warn.all);
if (ODP_IPSEC_SA_INVALID != sa)
CU_ASSERT_EQUAL(IPSEC_SA_CTX,
odp_ipsec_sa_context(sa));
@@ -999,6 +1175,11 @@ int ipsec_check_out(const ipsec_test_part *part, odp_ipsec_sa_t sa,
} else {
/* IPsec packet */
CU_ASSERT_EQUAL(0, odp_ipsec_result(&result, pkto[i]));
+
+ if (part->out[i].sa_expiry != IPSEC_TEST_EXPIRY_NONE)
+ if (ipsec_check_sa_expiry(part->out[i].sa_expiry, &result) != 0)
+ return num_out;
+
CU_ASSERT_EQUAL(part->out[i].status.error.all,
result.status.error.all);
if (0 == result.status.error.all)
diff --git a/test/validation/api/ipsec/ipsec.h b/test/validation/api/ipsec/ipsec.h
index a2bb478a3..1c17693f7 100644
--- a/test/validation/api/ipsec/ipsec.h
+++ b/test/validation/api/ipsec/ipsec.h
@@ -72,6 +72,15 @@ enum ipsec_test_stats {
IPSEC_TEST_STATS_AUTH_ERR,
};
+enum ipsec_test_sa_expiry {
+ IPSEC_TEST_EXPIRY_NONE = 0,
+ IPSEC_TEST_EXPIRY_IGNORED,
+ IPSEC_TEST_EXPIRY_SOFT_BYTE,
+ IPSEC_TEST_EXPIRY_SOFT_PKT,
+ IPSEC_TEST_EXPIRY_HARD_BYTE,
+ IPSEC_TEST_EXPIRY_HARD_PKT,
+};
+
typedef struct {
odp_bool_t lookup;
odp_bool_t inline_hdr_in_packet;
@@ -101,9 +110,12 @@ typedef struct {
* differs from that of input test packet (pkt_in).
*/
uint32_t orig_ip_len;
+ enum ipsec_test_sa_expiry sa_expiry;
} out[MAX_FRAGS];
} ipsec_test_part;
+extern odp_bool_t sa_expiry_notified;
+
void ipsec_sa_param_fill(odp_ipsec_sa_param_t *param,
odp_ipsec_dir_t dir,
odp_ipsec_protocol_t proto,
@@ -151,5 +163,7 @@ int ipsec_check_test_sa_update_seq_num(void);
int ipsec_check_esp_aes_gcm_128_reass_ipv4(void);
int ipsec_check_esp_aes_gcm_128_reass_ipv6(void);
int ipsec_check_esp_null_aes_xcbc(void);
+void ipsec_status_event_get(odp_ipsec_sa_t sa,
+ enum ipsec_test_sa_expiry sa_expiry);
#endif
diff --git a/test/validation/api/ipsec/ipsec_test_out.c b/test/validation/api/ipsec/ipsec_test_out.c
index 236997735..bb318edad 100644
--- a/test/validation/api/ipsec/ipsec_test_out.c
+++ b/test/validation/api/ipsec/ipsec_test_out.c
@@ -1593,6 +1593,138 @@ static void test_test_sa_update_seq_num(void)
printf("\n ");
}
+#define SOFT_LIMIT_PKT_CNT 1024
+#define HARD_LIMIT_PKT_CNT 2048
+#define DELTA_PKT_CNT 320
+
+static void test_out_ipv4_esp_sa_expiry(enum ipsec_test_sa_expiry expiry)
+{
+ int byte_count_per_packet = pkt_ipv4_icmp_0.len - pkt_ipv4_icmp_0.l3_offset;
+ uint32_t src = IPV4ADDR(10, 0, 11, 2);
+ uint32_t dst = IPV4ADDR(10, 0, 22, 2);
+ odp_ipsec_tunnel_param_t out_tunnel;
+ odp_ipsec_sa_param_t param_out;
+ int i, inc, limit, delta;
+ uint64_t soft_limit_byte;
+ uint64_t hard_limit_byte;
+ uint64_t soft_limit_pkt;
+ uint64_t hard_limit_pkt;
+ odp_ipsec_sa_t out_sa;
+
+ switch (expiry) {
+ case IPSEC_TEST_EXPIRY_SOFT_PKT:
+ soft_limit_pkt = SOFT_LIMIT_PKT_CNT;
+ hard_limit_pkt = HARD_LIMIT_PKT_CNT;
+ soft_limit_byte = 0;
+ hard_limit_byte = 0;
+ delta = DELTA_PKT_CNT;
+ limit = soft_limit_pkt;
+ inc = 1;
+ break;
+ case IPSEC_TEST_EXPIRY_HARD_PKT:
+ soft_limit_pkt = SOFT_LIMIT_PKT_CNT;
+ hard_limit_pkt = HARD_LIMIT_PKT_CNT;
+ soft_limit_byte = 0;
+ hard_limit_byte = 0;
+ delta = DELTA_PKT_CNT;
+ limit = hard_limit_pkt;
+ inc = 1;
+ break;
+ case IPSEC_TEST_EXPIRY_SOFT_BYTE:
+ soft_limit_pkt = 0;
+ hard_limit_pkt = 0;
+ soft_limit_byte = byte_count_per_packet * SOFT_LIMIT_PKT_CNT;
+ hard_limit_byte = byte_count_per_packet * HARD_LIMIT_PKT_CNT;
+ delta = byte_count_per_packet * DELTA_PKT_CNT;
+ limit = soft_limit_byte;
+ inc = byte_count_per_packet;
+ break;
+ case IPSEC_TEST_EXPIRY_HARD_BYTE:
+ soft_limit_pkt = 0;
+ hard_limit_pkt = 0;
+ soft_limit_byte = byte_count_per_packet * SOFT_LIMIT_PKT_CNT;
+ hard_limit_byte = byte_count_per_packet * HARD_LIMIT_PKT_CNT;
+ delta = byte_count_per_packet * DELTA_PKT_CNT;
+ limit = hard_limit_byte;
+ inc = byte_count_per_packet;
+ break;
+ default:
+ return;
+ }
+
+ memset(&out_tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ out_tunnel.type = ODP_IPSEC_TUNNEL_IPV4;
+ out_tunnel.ipv4.src_addr = &src;
+ out_tunnel.ipv4.dst_addr = &dst;
+
+ ipsec_sa_param_fill(&param_out, ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP,
+ 0x4a2cbfe7, &out_tunnel,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA1_HMAC, &key_5a_160,
+ NULL, NULL);
+
+ param_out.lifetime.soft_limit.bytes = soft_limit_byte;
+ param_out.lifetime.hard_limit.bytes = hard_limit_byte;
+ param_out.lifetime.soft_limit.packets = soft_limit_pkt;
+ param_out.lifetime.hard_limit.packets = hard_limit_pkt;
+
+ out_sa = odp_ipsec_sa_create(&param_out);
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, out_sa);
+
+ ipsec_test_part test_out = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ESP,
+ },
+ },
+ };
+
+ test_out.out[0].sa_expiry = IPSEC_TEST_EXPIRY_IGNORED;
+
+ for (i = 0; i < limit - delta; i += inc)
+ ipsec_check_out_one(&test_out, out_sa);
+
+ sa_expiry_notified = false;
+ test_out.out[0].sa_expiry = expiry;
+
+ for (; i <= limit && !sa_expiry_notified; i += inc)
+ ipsec_check_out_one(&test_out, out_sa);
+
+ CU_ASSERT(sa_expiry_notified);
+
+ for (; i <= limit + delta; i += inc)
+ ipsec_check_out_one(&test_out, out_sa);
+
+ ipsec_sa_destroy(out_sa);
+}
+
+static void test_out_ipv4_esp_sa_pkt_expiry(void)
+{
+ printf("\n IPv4 IPsec SA packet soft expiry");
+ test_out_ipv4_esp_sa_expiry(IPSEC_TEST_EXPIRY_SOFT_PKT);
+
+ printf("\n IPv4 IPsec SA packet hard expiry");
+ test_out_ipv4_esp_sa_expiry(IPSEC_TEST_EXPIRY_HARD_PKT);
+
+ printf("\n");
+}
+
+static void test_out_ipv4_esp_sa_byte_expiry(void)
+{
+ printf("\n IPv4 IPsec SA byte soft expiry");
+ test_out_ipv4_esp_sa_expiry(IPSEC_TEST_EXPIRY_SOFT_BYTE);
+
+ printf("\n IPv4 IPsec SA byte hard expiry");
+ test_out_ipv4_esp_sa_expiry(IPSEC_TEST_EXPIRY_HARD_BYTE);
+
+ printf("\n");
+}
+
static void ipsec_test_capability(void)
{
odp_ipsec_capability_t capa;
@@ -1900,6 +2032,10 @@ odp_testinfo_t ipsec_out_suite[] = {
ipsec_check_esp_null_aes_xcbc),
ODP_TEST_INFO_CONDITIONAL(test_sa_info,
ipsec_check_esp_aes_cbc_128_sha1),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_sa_pkt_expiry,
+ ipsec_check_esp_aes_cbc_128_sha1),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_sa_byte_expiry,
+ ipsec_check_esp_aes_cbc_128_sha1),
ODP_TEST_INFO_CONDITIONAL(test_test_sa_update_seq_num,
ipsec_check_test_sa_update_seq_num),
ODP_TEST_INFO(test_esp_out_in_all_basic),
diff --git a/test/validation/api/lock/lock.c b/test/validation/api/lock/lock.c
index bf9318e76..729994d66 100644
--- a/test/validation/api/lock/lock.c
+++ b/test/validation/api/lock/lock.c
@@ -7,7 +7,6 @@
#include <malloc.h>
#include <odp_api.h>
#include <odp/helper/odph_api.h>
-#include <CUnit/Basic.h>
#include <odp_cunit_common.h>
#include <unistd.h>
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index 2eb1a5a28..446411b22 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -1605,6 +1605,7 @@ static void test_defaults(uint8_t fill)
CU_ASSERT_EQUAL(qp_in.hash_enable, 0);
CU_ASSERT_EQUAL(qp_in.hash_proto.all_bits, 0);
CU_ASSERT_EQUAL(qp_in.num_queues, 1);
+ CU_ASSERT_EQUAL(qp_in.queue_size[0], 0);
CU_ASSERT_EQUAL(qp_in.queue_param.enq_mode, ODP_QUEUE_OP_MT);
CU_ASSERT_EQUAL(qp_in.queue_param.sched.prio, odp_schedule_default_prio());
CU_ASSERT_EQUAL(qp_in.queue_param.sched.sync, ODP_SCHED_SYNC_PARALLEL);
@@ -1749,6 +1750,8 @@ static void pktio_test_pktio_config(void)
CU_ASSERT(!config.reassembly.en_ipv6);
CU_ASSERT(config.reassembly.max_wait_time == 0);
CU_ASSERT(config.reassembly.max_num_frags == 2);
+ CU_ASSERT(config.flow_control.pause_rx == ODP_PKTIO_LINK_PAUSE_OFF);
+ CU_ASSERT(config.flow_control.pause_tx == ODP_PKTIO_LINK_PAUSE_OFF);
/* Indicate packet refs might be used */
config.pktout.bit.no_packet_refs = 0;
@@ -1822,11 +1825,13 @@ static void pktio_test_link_info(void)
link_info.duplex == ODP_PKTIO_LINK_DUPLEX_HALF ||
link_info.duplex == ODP_PKTIO_LINK_DUPLEX_FULL);
CU_ASSERT(link_info.pause_rx == ODP_PKTIO_LINK_PAUSE_UNKNOWN ||
+ link_info.pause_rx == ODP_PKTIO_LINK_PAUSE_OFF ||
link_info.pause_rx == ODP_PKTIO_LINK_PAUSE_ON ||
- link_info.pause_rx == ODP_PKTIO_LINK_PAUSE_OFF);
+ link_info.pause_rx == ODP_PKTIO_LINK_PFC_ON);
CU_ASSERT(link_info.pause_tx == ODP_PKTIO_LINK_PAUSE_UNKNOWN ||
+ link_info.pause_tx == ODP_PKTIO_LINK_PAUSE_OFF ||
link_info.pause_tx == ODP_PKTIO_LINK_PAUSE_ON ||
- link_info.pause_tx == ODP_PKTIO_LINK_PAUSE_OFF);
+ link_info.pause_tx == ODP_PKTIO_LINK_PFC_ON);
CU_ASSERT(link_info.status == ODP_PKTIO_LINK_STATUS_UNKNOWN ||
link_info.status == ODP_PKTIO_LINK_STATUS_UP ||
link_info.status == ODP_PKTIO_LINK_STATUS_DOWN);
@@ -1838,6 +1843,242 @@ static void pktio_test_link_info(void)
}
}
+static int pktio_check_flow_control(int pfc, int rx)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0)
+ return ODP_TEST_INACTIVE;
+
+ if (pfc == 0 && rx == 1 && capa.flow_control.pause_rx == 1)
+ return ODP_TEST_ACTIVE;
+
+ if (pfc == 1 && rx == 1 && capa.flow_control.pfc_rx == 1)
+ return ODP_TEST_ACTIVE;
+
+ if (pfc == 0 && rx == 0 && capa.flow_control.pause_tx == 1)
+ return ODP_TEST_ACTIVE;
+
+ if (pfc == 1 && rx == 0 && capa.flow_control.pfc_tx == 1)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int pktio_check_pause_rx(void)
+{
+ return pktio_check_flow_control(0, 1);
+}
+
+static int pktio_check_pause_tx(void)
+{
+ return pktio_check_flow_control(0, 0);
+}
+
+static int pktio_check_pause_both(void)
+{
+ int rx = pktio_check_pause_rx();
+ int tx = pktio_check_pause_tx();
+
+ if (rx == ODP_TEST_ACTIVE && tx == ODP_TEST_ACTIVE)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int pktio_check_pfc_rx(void)
+{
+ return pktio_check_flow_control(1, 1);
+}
+
+static int pktio_check_pfc_tx(void)
+{
+ return pktio_check_flow_control(1, 0);
+}
+
+static int pktio_check_pfc_both(void)
+{
+ int rx = pktio_check_pfc_rx();
+ int tx = pktio_check_pfc_tx();
+
+ if (rx == ODP_TEST_ACTIVE && tx == ODP_TEST_ACTIVE)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static odp_cos_t set_default_cos(odp_pktio_t pktio, odp_queue_t queue)
+{
+ odp_cls_cos_param_t cos_param;
+ odp_cos_t cos;
+ int ret;
+
+ odp_cls_cos_param_init(&cos_param);
+ cos_param.queue = queue;
+ cos_param.pool = pool[0];
+
+ cos = odp_cls_cos_create("Default CoS", &cos_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ ret = odp_pktio_default_cos_set(pktio, cos);
+ CU_ASSERT_FATAL(ret == 0);
+
+ return cos;
+}
+
+static odp_cos_t create_pfc_cos(odp_cos_t default_cos, odp_queue_t queue, odp_pmr_t *pmr_out)
+{
+ odp_cls_cos_param_t cos_param;
+ odp_cos_t cos;
+ odp_pmr_param_t pmr_param;
+ odp_pmr_t pmr;
+ uint8_t pcp = 1;
+ uint8_t mask = 0x7;
+
+ /* Setup a CoS to control generation of PFC frame generation. PFC for the VLAN
+ * priority level is generated when queue/pool resource usage gets above 80%. */
+ odp_cls_cos_param_init(&cos_param);
+ cos_param.queue = queue;
+ cos_param.pool = pool[0];
+ cos_param.bp.enable = 1;
+ cos_param.bp.threshold.type = ODP_THRESHOLD_PERCENT;
+ cos_param.bp.threshold.percent.max = 80;
+ cos_param.bp.pfc_level = pcp;
+
+ cos = odp_cls_cos_create("PFC CoS", &cos_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_VLAN_PCP_0;
+ pmr_param.match.value = &pcp;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = 1;
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT_FATAL(pmr != ODP_PMR_INVALID);
+
+ *pmr_out = pmr;
+
+ return cos;
+}
+
+static void pktio_config_flow_control(int pfc, int rx, int tx)
+{
+ odp_pktio_t pktio;
+ odp_pktio_config_t config;
+ int ret;
+ odp_cos_t default_cos = ODP_COS_INVALID;
+ odp_cos_t cos = ODP_COS_INVALID;
+ odp_pmr_t pmr = ODP_PMR_INVALID;
+ odp_queue_t queue = ODP_QUEUE_INVALID;
+ odp_pktio_link_pause_t mode = ODP_PKTIO_LINK_PAUSE_ON;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ odp_pktio_config_init(&config);
+
+ if (pfc)
+ mode = ODP_PKTIO_LINK_PFC_ON;
+
+ if (rx)
+ config.flow_control.pause_rx = mode;
+
+ if (tx)
+ config.flow_control.pause_tx = mode;
+
+ ret = odp_pktio_config(pktio, &config);
+ CU_ASSERT_FATAL(ret == 0);
+
+ if (pfc && tx) {
+ /* Enable classifier for PFC backpressure configuration. Overrides previous
+ * pktin queue config. */
+ odp_pktin_queue_param_t pktin_param;
+
+ odp_pktin_queue_param_init(&pktin_param);
+
+ pktin_param.classifier_enable = 1;
+
+ ret = odp_pktin_queue_config(pktio, &pktin_param);
+ CU_ASSERT_FATAL(ret == 0);
+ }
+
+ ret = odp_pktio_start(pktio);
+ CU_ASSERT(ret == 0);
+
+ if (pfc && tx) {
+ odp_queue_param_t qparam;
+
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+
+ queue = odp_queue_create("CoS queue", &qparam);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ default_cos = set_default_cos(pktio, queue);
+
+ cos = create_pfc_cos(default_cos, queue, &pmr);
+ }
+
+ if (pmr != ODP_PMR_INVALID)
+ odp_cls_pmr_destroy(pmr);
+
+ if (cos != ODP_COS_INVALID)
+ odp_cos_destroy(cos);
+
+ if (default_cos != ODP_COS_INVALID)
+ odp_cos_destroy(default_cos);
+
+ if (queue != ODP_QUEUE_INVALID)
+ odp_queue_destroy(queue);
+
+ CU_ASSERT(odp_pktio_stop(pktio) == 0);
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+}
+
+static void pktio_test_enable_pause_rx(void)
+{
+ pktio_config_flow_control(0, 1, 0);
+}
+
+static void pktio_test_enable_pause_tx(void)
+{
+ pktio_config_flow_control(0, 0, 1);
+}
+
+static void pktio_test_enable_pause_both(void)
+{
+ pktio_config_flow_control(0, 1, 1);
+}
+
+static void pktio_test_enable_pfc_rx(void)
+{
+ pktio_config_flow_control(1, 1, 0);
+}
+
+static void pktio_test_enable_pfc_tx(void)
+{
+ pktio_config_flow_control(1, 0, 1);
+}
+
+static void pktio_test_enable_pfc_both(void)
+{
+ pktio_config_flow_control(1, 1, 1);
+}
+
static void pktio_test_pktin_queue_config_direct(void)
{
odp_pktio_t pktio;
@@ -1959,28 +2200,33 @@ static void pktio_test_pktin_queue_config_queue(void)
odp_pktio_capability_t capa;
odp_pktin_queue_param_t queue_param;
odp_pktin_queue_t pktin_queues[MAX_QUEUES];
- odp_queue_t in_queues[MAX_QUEUES];
int num_queues;
- pktio = create_pktio(0, ODP_PKTIN_MODE_QUEUE, ODP_PKTOUT_MODE_DIRECT);
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
capa.max_input_queues > 0);
num_queues = capa.max_input_queues;
+ CU_ASSERT_FATAL(num_queues <= ODP_PKTIN_MAX_QUEUES);
+
+ CU_ASSERT(capa.min_input_queue_size <= capa.max_input_queue_size);
odp_pktin_queue_param_init(&queue_param);
queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
queue_param.hash_proto.proto.ipv4_udp = 1;
queue_param.num_queues = num_queues;
+ for (int i = 0; i < num_queues; i++)
+ queue_param.queue_size[i] = capa.max_input_queue_size;
+
CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
- CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES)
- == num_queues);
- CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES) < 0);
+ CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES) == num_queues);
queue_param.num_queues = 1;
+ queue_param.queue_size[0] = capa.min_input_queue_size;
+
CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
queue_param.num_queues = capa.max_input_queues + 1;
@@ -4927,6 +5173,12 @@ odp_testinfo_t pktio_suite_unsegmented[] = {
pktio_check_pktout_compl_plain_queue),
ODP_TEST_INFO_CONDITIONAL(pktio_test_pktout_compl_sched_queue,
pktio_check_pktout_compl_sched_queue),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pause_rx, pktio_check_pause_rx),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pause_tx, pktio_check_pause_tx),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pause_both, pktio_check_pause_both),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pfc_rx, pktio_check_pfc_rx),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pfc_tx, pktio_check_pfc_tx),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pfc_both, pktio_check_pfc_both),
ODP_TEST_INFO_NULL
};
diff --git a/test/validation/api/shmem/shmem.c b/test/validation/api/shmem/shmem.c
index a10e7d1d9..86d070a4a 100644
--- a/test/validation/api/shmem/shmem.c
+++ b/test/validation/api/shmem/shmem.c
@@ -261,6 +261,118 @@ static void shmem_test_reserve(void)
CU_ASSERT(odp_shm_free(shm) == 0);
}
+static void shmem_test_info(void)
+{
+ odp_shm_t shm;
+ void *addr;
+ int ret;
+ uint32_t i;
+ uint64_t sum_len;
+ uintptr_t next;
+ odp_shm_info_t info;
+ const char *name = "info_test";
+ uint32_t num_seg = 32;
+ uint64_t size = 4 * 1024 * 1024;
+ uint64_t align = 64;
+ int support_pa = 0;
+ int support_iova = 0;
+
+ if (_global_shm_capa.max_size && _global_shm_capa.max_size < size)
+ size = _global_shm_capa.max_size;
+
+ if (_global_shm_capa.max_align < align)
+ align = _global_shm_capa.max_align;
+
+ shm = odp_shm_reserve(name, size, align, 0);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ addr = odp_shm_addr(shm);
+ CU_ASSERT(addr != NULL);
+
+ if (addr)
+ memset(addr, 0, size);
+
+ memset(&info, 0, sizeof(odp_shm_info_t));
+ ret = odp_shm_info(shm, &info);
+
+ CU_ASSERT_FATAL(ret == 0);
+ CU_ASSERT(strcmp(name, info.name) == 0);
+ CU_ASSERT(info.addr == addr);
+ CU_ASSERT(info.size == size);
+ CU_ASSERT(info.page_size > 0);
+ CU_ASSERT(info.flags == 0);
+ CU_ASSERT(info.num_seg > 0);
+
+ /* Limit number of segments as it may get large with small page sizes */
+ if (info.num_seg < num_seg)
+ num_seg = info.num_seg;
+
+ /* all segments */
+ odp_shm_segment_info_t seginfo_a[num_seg];
+
+ memset(seginfo_a, 0, num_seg * sizeof(odp_shm_segment_info_t));
+
+ ret = odp_shm_segment_info(shm, 0, num_seg, seginfo_a);
+ CU_ASSERT_FATAL(ret == 0);
+
+ CU_ASSERT(seginfo_a[0].addr == (uintptr_t)addr);
+
+ sum_len = 0;
+ next = 0;
+
+ printf("\n\n");
+ printf("SHM segment info\n");
+ printf("%3s %16s %16s %16s %16s\n", "idx", "addr", "iova", "pa", "len");
+
+ for (i = 0; i < num_seg; i++) {
+ printf("%3u %16" PRIxPTR " %16" PRIx64 " %16" PRIx64 " %16" PRIu64 "\n",
+ i, seginfo_a[i].addr, seginfo_a[i].iova, seginfo_a[i].pa, seginfo_a[i].len);
+
+ CU_ASSERT(seginfo_a[i].addr != 0);
+ CU_ASSERT(seginfo_a[i].len > 0);
+
+ if (next) {
+ CU_ASSERT(seginfo_a[i].addr == next);
+ next += seginfo_a[i].len;
+ } else {
+ next = seginfo_a[i].addr + seginfo_a[i].len;
+ }
+
+ if (seginfo_a[i].iova != ODP_SHM_IOVA_INVALID)
+ support_iova = 1;
+
+ if (seginfo_a[i].pa != ODP_SHM_PA_INVALID)
+ support_pa = 1;
+
+ sum_len += seginfo_a[i].len;
+ }
+
+ printf("\n");
+ printf("IOVA: %s, PA: %s\n\n", support_iova ? "supported" : "not supported",
+ support_pa ? "supported" : "not supported");
+
+ CU_ASSERT(sum_len == size);
+
+ if (num_seg > 1) {
+ /* all, except the first one */
+ odp_shm_segment_info_t seginfo_b[num_seg];
+
+ memset(seginfo_b, 0xff, num_seg * sizeof(odp_shm_segment_info_t));
+
+ ret = odp_shm_segment_info(shm, 1, num_seg - 1, &seginfo_b[1]);
+ CU_ASSERT_FATAL(ret == 0);
+
+ for (i = 1; i < num_seg; i++) {
+ CU_ASSERT(seginfo_a[i].addr == seginfo_b[i].addr);
+ CU_ASSERT(seginfo_a[i].iova == seginfo_b[i].iova);
+ CU_ASSERT(seginfo_a[i].pa == seginfo_b[i].pa);
+ CU_ASSERT(seginfo_a[i].len == seginfo_b[i].len);
+ }
+ }
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
static int shmem_check_flag_hp(void)
{
if (_global_shm_capa.flags & ODP_SHM_HP)
@@ -1027,6 +1139,7 @@ static int shm_suite_init(void)
odp_testinfo_t shmem_suite[] = {
ODP_TEST_INFO(shmem_test_capability),
ODP_TEST_INFO(shmem_test_reserve),
+ ODP_TEST_INFO(shmem_test_info),
ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_hp, shmem_check_flag_hp),
ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_no_hp, shmem_check_flag_no_hp),
ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_proc, shmem_check_flag_proc),
diff --git a/test/validation/api/system/system.c b/test/validation/api/system/system.c
index b1fcbe3c3..f0cf48738 100644
--- a/test/validation/api/system/system.c
+++ b/test/validation/api/system/system.c
@@ -579,6 +579,51 @@ static void system_test_info(void)
}
}
+static void system_test_meminfo(void)
+{
+ const int32_t max_num = 128;
+ odp_system_meminfo_t info, info_0;
+ int32_t ret, ret_0, num, i;
+ odp_system_memblock_t block[max_num];
+
+ /* Meminfo without blocks */
+ ret_0 = odp_system_meminfo(&info_0, NULL, 0);
+ CU_ASSERT_FATAL(ret_0 >= 0);
+
+ ret = odp_system_meminfo(&info, block, max_num);
+ CU_ASSERT_FATAL(ret >= 0);
+
+ /* Totals should match independent of per block output */
+ CU_ASSERT(ret == ret_0);
+ CU_ASSERT(info_0.total_mapped == info.total_mapped);
+ CU_ASSERT(info_0.total_used == info.total_used);
+ CU_ASSERT(info_0.total_overhead == info.total_overhead);
+
+ CU_ASSERT(info.total_mapped >= info.total_used);
+ CU_ASSERT(info.total_used >= info.total_overhead);
+
+ num = ret;
+ if (ret > max_num)
+ num = max_num;
+
+ printf("\n\n");
+ printf("System meminfo contain %i blocks, printing %i blocks:\n", ret, num);
+
+ printf(" %s %-32s %16s %14s %14s %12s\n", "index", "name", "addr",
+ "used", "overhead", "page_size");
+
+ for (i = 0; i < num; i++) {
+ printf(" [%3i] %-32s %16" PRIxPTR " %14" PRIu64 " %14" PRIu64 " %12" PRIu64 "\n",
+ i, block[i].name, block[i].addr, block[i].used, block[i].overhead,
+ block[i].page_size);
+ }
+
+ printf("\n");
+ printf("Total mapped: %" PRIu64 "\n", info.total_mapped);
+ printf("Total used: %" PRIu64 "\n", info.total_used);
+ printf("Total overhead: %" PRIu64 "\n\n", info.total_overhead);
+}
+
odp_testinfo_t system_suite[] = {
ODP_TEST_INFO(test_version_api_str),
ODP_TEST_INFO(test_version_str),
@@ -609,6 +654,7 @@ odp_testinfo_t system_suite[] = {
ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles_long_period,
system_check_cycle_counter),
ODP_TEST_INFO(system_test_info),
+ ODP_TEST_INFO(system_test_meminfo),
ODP_TEST_INFO(system_test_info_print),
ODP_TEST_INFO(system_test_config_print),
ODP_TEST_INFO_NULL,