aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/Makefile.am1
-rw-r--r--test/Makefile.inc44
-rw-r--r--test/README9
-rw-r--r--test/common/Makefile.am28
-rw-r--r--test/common/mask_common.c (renamed from test/common_plat/common/mask_common.c)5
-rw-r--r--test/common/mask_common.h (renamed from test/common_plat/common/mask_common.h)2
-rw-r--r--test/common/odp_cunit_common.c753
-rw-r--r--test/common/odp_cunit_common.h201
-rw-r--r--test/common/packet_common.c133
-rw-r--r--test/common/packet_common.h67
-rw-r--r--test/common/test_common_macros.h17
-rw-r--r--test/common/test_packet_custom.h124
-rw-r--r--test/common/test_packet_ipsec.h188
-rw-r--r--test/common/test_packet_ipv4.h459
-rw-r--r--test/common/test_packet_ipv4_with_crc.h234
-rw-r--r--test/common/test_packet_ipv6.h123
-rw-r--r--test/common_plat/Makefile.am7
-rw-r--r--test/common_plat/common/Makefile.am13
-rw-r--r--test/common_plat/common/odp_cunit_common.c373
-rw-r--r--test/common_plat/common/odp_cunit_common.h106
-rw-r--r--test/common_plat/m4/configure.m433
-rw-r--r--test/common_plat/m4/miscellaneous.m49
-rw-r--r--test/common_plat/m4/performance.m49
-rw-r--r--test/common_plat/m4/validation.m458
-rw-r--r--test/common_plat/miscellaneous/Makefile.am12
-rw-r--r--test/common_plat/miscellaneous/odp_api_from_cpp.cpp12
-rw-r--r--test/common_plat/performance/.gitignore10
-rw-r--r--test/common_plat/performance/Makefile.am53
-rw-r--r--test/common_plat/performance/odp_crypto.c984
-rw-r--r--test/common_plat/performance/odp_l2fwd.c1531
-rwxr-xr-xtest/common_plat/performance/odp_pktio_ordered_run.sh42
-rwxr-xr-xtest/common_plat/performance/odp_scheduling_run.sh29
-rw-r--r--test/common_plat/validation/api/.gitignore2
-rw-r--r--test/common_plat/validation/api/Makefile.am28
-rw-r--r--test/common_plat/validation/api/Makefile.inc16
-rw-r--r--test/common_plat/validation/api/atomic/Makefile.am10
-rw-r--r--test/common_plat/validation/api/atomic/atomic.c909
-rw-r--r--test/common_plat/validation/api/atomic/atomic.h39
-rw-r--r--test/common_plat/validation/api/atomic/atomic_main.c12
-rw-r--r--test/common_plat/validation/api/barrier/Makefile.am10
-rw-r--r--test/common_plat/validation/api/barrier/barrier.h30
-rw-r--r--test/common_plat/validation/api/barrier/barrier_main.c12
-rw-r--r--test/common_plat/validation/api/buffer/Makefile.am10
-rw-r--r--test/common_plat/validation/api/buffer/buffer.c287
-rw-r--r--test/common_plat/validation/api/buffer/buffer.h32
-rw-r--r--test/common_plat/validation/api/buffer/buffer_main.c11
-rw-r--r--test/common_plat/validation/api/classification/Makefile.am14
-rw-r--r--test/common_plat/validation/api/classification/classification.c43
-rw-r--r--test/common_plat/validation/api/classification/classification_main.c12
-rw-r--r--test/common_plat/validation/api/classification/odp_classification_basic.c330
-rw-r--r--test/common_plat/validation/api/classification/odp_classification_test_pmr.c1864
-rw-r--r--test/common_plat/validation/api/classification/odp_classification_testsuites.h62
-rw-r--r--test/common_plat/validation/api/cpumask/Makefile.am11
-rw-r--r--test/common_plat/validation/api/cpumask/cpumask.c116
-rw-r--r--test/common_plat/validation/api/cpumask/cpumask.h28
-rw-r--r--test/common_plat/validation/api/cpumask/cpumask_main.c11
-rw-r--r--test/common_plat/validation/api/crypto/Makefile.am11
-rw-r--r--test/common_plat/validation/api/crypto/crypto.c129
-rw-r--r--test/common_plat/validation/api/crypto/crypto.h45
-rw-r--r--test/common_plat/validation/api/crypto/crypto_main.c12
-rw-r--r--test/common_plat/validation/api/crypto/odp_crypto_test_inp.c1076
-rw-r--r--test/common_plat/validation/api/crypto/odp_crypto_test_inp.h22
-rw-r--r--test/common_plat/validation/api/crypto/test_vectors.h353
-rw-r--r--test/common_plat/validation/api/crypto/test_vectors_len.h38
-rw-r--r--test/common_plat/validation/api/errno/Makefile.am10
-rw-r--r--test/common_plat/validation/api/errno/errno.h24
-rw-r--r--test/common_plat/validation/api/errno/errno_main.c12
-rw-r--r--test/common_plat/validation/api/hash/Makefile.am10
-rw-r--r--test/common_plat/validation/api/hash/hash.c54
-rw-r--r--test/common_plat/validation/api/hash/hash.h24
-rw-r--r--test/common_plat/validation/api/hash/hash_main.c12
-rw-r--r--test/common_plat/validation/api/init/.gitignore3
-rw-r--r--test/common_plat/validation/api/init/Makefile.am16
-rw-r--r--test/common_plat/validation/api/init/init.c188
-rw-r--r--test/common_plat/validation/api/init/init.h32
-rw-r--r--test/common_plat/validation/api/init/init_main_abort.c11
-rw-r--r--test/common_plat/validation/api/init/init_main_log.c11
-rw-r--r--test/common_plat/validation/api/init/init_main_ok.c11
-rw-r--r--test/common_plat/validation/api/lock/Makefile.am10
-rw-r--r--test/common_plat/validation/api/lock/lock.h46
-rw-r--r--test/common_plat/validation/api/lock/lock_main.c12
-rw-r--r--test/common_plat/validation/api/packet/Makefile.am10
-rw-r--r--test/common_plat/validation/api/packet/packet.c2451
-rw-r--r--test/common_plat/validation/api/packet/packet.h56
-rw-r--r--test/common_plat/validation/api/packet/packet_main.c12
-rw-r--r--test/common_plat/validation/api/pktio/Makefile.am10
-rw-r--r--test/common_plat/validation/api/pktio/pktio.c2205
-rw-r--r--test/common_plat/validation/api/pktio/pktio.h64
-rw-r--r--test/common_plat/validation/api/pktio/pktio_main.c12
-rw-r--r--test/common_plat/validation/api/pool/Makefile.am10
-rw-r--r--test/common_plat/validation/api/pool/pool.c126
-rw-r--r--test/common_plat/validation/api/pool/pool.h28
-rw-r--r--test/common_plat/validation/api/pool/pool_main.c12
-rw-r--r--test/common_plat/validation/api/queue/Makefile.am10
-rw-r--r--test/common_plat/validation/api/queue/queue.c329
-rw-r--r--test/common_plat/validation/api/queue/queue.h31
-rw-r--r--test/common_plat/validation/api/queue/queue_main.c12
-rw-r--r--test/common_plat/validation/api/random/Makefile.am10
-rw-r--r--test/common_plat/validation/api/random/random.c90
-rw-r--r--test/common_plat/validation/api/random/random.h26
-rw-r--r--test/common_plat/validation/api/random/random_main.c12
-rw-r--r--test/common_plat/validation/api/scheduler/.gitignore1
-rw-r--r--test/common_plat/validation/api/scheduler/Makefile.am10
-rw-r--r--test/common_plat/validation/api/scheduler/scheduler.c1669
-rw-r--r--test/common_plat/validation/api/scheduler/scheduler.h62
-rw-r--r--test/common_plat/validation/api/scheduler/scheduler_main.c12
-rw-r--r--test/common_plat/validation/api/shmem/Makefile.am10
-rw-r--r--test/common_plat/validation/api/shmem/shmem.h27
-rw-r--r--test/common_plat/validation/api/shmem/shmem_main.c12
-rw-r--r--test/common_plat/validation/api/std_clib/.gitignore1
-rw-r--r--test/common_plat/validation/api/std_clib/Makefile.am10
-rw-r--r--test/common_plat/validation/api/std_clib/std_clib.h21
-rw-r--r--test/common_plat/validation/api/std_clib/std_clib_main.c12
-rw-r--r--test/common_plat/validation/api/system/Makefile.am10
-rw-r--r--test/common_plat/validation/api/system/system.c344
-rw-r--r--test/common_plat/validation/api/system/system.h43
-rw-r--r--test/common_plat/validation/api/system/system_main.c12
-rw-r--r--test/common_plat/validation/api/thread/Makefile.am12
-rw-r--r--test/common_plat/validation/api/thread/thread.c140
-rw-r--r--test/common_plat/validation/api/thread/thread.h33
-rw-r--r--test/common_plat/validation/api/thread/thread_main.c12
-rw-r--r--test/common_plat/validation/api/time/Makefile.am10
-rw-r--r--test/common_plat/validation/api/time/time.c476
-rw-r--r--test/common_plat/validation/api/time/time.h40
-rw-r--r--test/common_plat/validation/api/time/time_main.c12
-rw-r--r--test/common_plat/validation/api/timer/Makefile.am10
-rw-r--r--test/common_plat/validation/api/timer/timer.c605
-rw-r--r--test/common_plat/validation/api/timer/timer.h27
-rw-r--r--test/common_plat/validation/api/timer/timer_main.c12
-rw-r--r--test/common_plat/validation/api/traffic_mngr/Makefile.am10
-rw-r--r--test/common_plat/validation/api/traffic_mngr/traffic_mngr.h45
-rw-r--r--test/common_plat/validation/api/traffic_mngr/traffic_mngr_main.c12
-rw-r--r--test/linux-dpdk/Makefile.am56
-rw-r--r--test/linux-dpdk/Makefile.inc19
-rw-r--r--test/linux-dpdk/m4/configure.m42
l---------test/linux-dpdk/run-test1
l---------test/linux-dpdk/validation/api/pktio/.gitignore1
-rw-r--r--test/linux-dpdk/validation/api/pktio/Makefile.am4
l---------test/linux-dpdk/validation/api/pktio/pktio_env1
-rwxr-xr-xtest/linux-dpdk/validation/api/pktio/pktio_run.sh111
-rwxr-xr-xtest/linux-dpdk/wrapper-script.sh76
-rw-r--r--test/linux-generic/.gitignore3
-rw-r--r--test/linux-generic/Makefile.am83
-rw-r--r--test/linux-generic/Makefile.inc20
-rw-r--r--test/linux-generic/m4/configure.m49
-rw-r--r--test/linux-generic/m4/performance.m49
-rw-r--r--test/linux-generic/mmap_vlan_ins/.gitignore2
-rw-r--r--test/linux-generic/mmap_vlan_ins/Makefile.am15
-rw-r--r--test/linux-generic/mmap_vlan_ins/mmap_vlan_ins.c226
-rwxr-xr-xtest/linux-generic/mmap_vlan_ins/mmap_vlan_ins.sh75
-rw-r--r--test/linux-generic/mmap_vlan_ins/pktio_env120
-rw-r--r--test/linux-generic/mmap_vlan_ins/vlan.pcapbin9728 -> 0 bytes
-rw-r--r--test/linux-generic/performance/.gitignore2
-rw-r--r--test/linux-generic/performance/Makefile.am13
-rwxr-xr-xtest/linux-generic/performance/odp_scheduling_run_proc.sh30
-rw-r--r--test/linux-generic/pktio_ipc/.gitignore2
-rw-r--r--test/linux-generic/pktio_ipc/Makefile.am20
-rw-r--r--test/linux-generic/pktio_ipc/ipc_common.c174
-rw-r--r--test/linux-generic/pktio_ipc/ipc_common.h96
-rw-r--r--test/linux-generic/pktio_ipc/pktio_ipc1.c355
-rw-r--r--test/linux-generic/pktio_ipc/pktio_ipc2.c239
-rwxr-xr-xtest/linux-generic/pktio_ipc/pktio_ipc_run.sh89
-rw-r--r--test/linux-generic/ring/.gitignore1
-rw-r--r--test/linux-generic/ring/Makefile.am14
-rw-r--r--test/linux-generic/ring/ring_basic.c361
-rw-r--r--test/linux-generic/ring/ring_main.c12
-rw-r--r--test/linux-generic/ring/ring_stress.c244
-rw-r--r--test/linux-generic/ring/ring_suites.c74
-rw-r--r--test/linux-generic/ring/ring_suites.h34
-rwxr-xr-xtest/linux-generic/run-test67
-rw-r--r--test/linux-generic/validation/Makefile.inc1
-rw-r--r--test/linux-generic/validation/api/Makefile.inc1
-rw-r--r--test/linux-generic/validation/api/pktio/.gitignore2
-rw-r--r--test/linux-generic/validation/api/pktio/Makefile.am15
-rw-r--r--test/linux-generic/validation/api/pktio/pktio_env120
-rwxr-xr-xtest/linux-generic/validation/api/pktio/pktio_run.sh125
-rwxr-xr-xtest/linux-generic/validation/api/pktio/pktio_run_dpdk.sh95
-rwxr-xr-xtest/linux-generic/validation/api/pktio/pktio_run_netmap.sh123
-rwxr-xr-xtest/linux-generic/validation/api/pktio/pktio_run_pcap.sh36
-rwxr-xr-xtest/linux-generic/validation/api/pktio/pktio_run_tap.sh119
-rw-r--r--test/linux-generic/validation/api/shmem/.gitignore3
-rw-r--r--test/linux-generic/validation/api/shmem/Makefile.am28
-rw-r--r--test/linux-generic/validation/api/shmem/shmem.h21
-rw-r--r--test/linux-generic/validation/api/shmem/shmem_common.h23
-rw-r--r--test/linux-generic/validation/api/shmem/shmem_linux.c299
-rw-r--r--test/linux-generic/validation/api/shmem/shmem_linux.h9
-rw-r--r--test/linux-generic/validation/api/shmem/shmem_odp1.c79
-rw-r--r--test/linux-generic/validation/api/shmem/shmem_odp1.h7
-rw-r--r--test/linux-generic/validation/api/shmem/shmem_odp2.c103
-rw-r--r--test/linux-generic/validation/api/shmem/shmem_odp2.h7
-rw-r--r--test/m4/configure.m454
-rw-r--r--test/m4/miscellaneous.m423
-rw-r--r--test/m4/performance.m49
-rw-r--r--test/m4/validation.m434
-rw-r--r--test/miscellaneous/.gitignore (renamed from test/common_plat/miscellaneous/.gitignore)1
-rw-r--r--test/miscellaneous/Makefile.am45
-rw-r--r--test/miscellaneous/odp_api_from_cpp.cpp26
-rw-r--r--test/miscellaneous/odp_api_headers.c39
-rw-r--r--test/performance/.gitignore33
-rw-r--r--test/performance/Makefile.am125
-rw-r--r--test/performance/bench_common.c247
-rw-r--r--test/performance/bench_common.h239
-rw-r--r--test/performance/dummy_crc.h (renamed from test/common_plat/performance/dummy_crc.h)4
-rw-r--r--test/performance/odp_atomic_perf.c1406
-rw-r--r--test/performance/odp_bench_buffer.c896
-rw-r--r--test/performance/odp_bench_misc.c1063
-rw-r--r--test/performance/odp_bench_packet.c (renamed from test/common_plat/performance/odp_bench_packet.c)1066
-rw-r--r--test/performance/odp_bench_pktio_sp.c1140
-rw-r--r--test/performance/odp_bench_timer.c742
-rw-r--r--test/performance/odp_cpu_bench.c837
-rwxr-xr-xtest/performance/odp_cpu_bench_run.sh19
-rw-r--r--test/performance/odp_crc.c308
-rw-r--r--test/performance/odp_crypto.c1526
-rwxr-xr-xtest/performance/odp_crypto_run.sh19
-rw-r--r--test/performance/odp_dma_perf.c1951
-rwxr-xr-xtest/performance/odp_dma_perf_run.sh74
-rw-r--r--test/performance/odp_dmafwd.c1475
-rwxr-xr-xtest/performance/odp_dmafwd_run.sh72
-rw-r--r--test/performance/odp_ipsec.c1422
-rwxr-xr-xtest/performance/odp_ipsec_run.sh19
-rw-r--r--test/performance/odp_ipsecfwd.c2074
-rw-r--r--test/performance/odp_ipsecfwd.conf41
-rw-r--r--test/performance/odp_l2fwd.c2643
-rwxr-xr-xtest/performance/odp_l2fwd_run.sh (renamed from test/common_plat/performance/odp_l2fwd_run.sh)34
-rw-r--r--test/performance/odp_lock_perf.c699
-rw-r--r--test/performance/odp_mem_perf.c487
-rw-r--r--test/performance/odp_packet_gen.c2242
-rwxr-xr-xtest/performance/odp_packet_gen_run.sh88
-rw-r--r--test/performance/odp_pktio_ordered.c (renamed from test/common_plat/performance/odp_pktio_ordered.c)290
-rwxr-xr-xtest/performance/odp_pktio_ordered_run.sh61
-rw-r--r--test/performance/odp_pktio_perf.c (renamed from test/common_plat/performance/odp_pktio_perf.c)310
-rw-r--r--test/performance/odp_pool_latency.c1382
-rw-r--r--test/performance/odp_pool_perf.c750
-rw-r--r--test/performance/odp_queue_perf.c651
-rw-r--r--test/performance/odp_random.c552
-rw-r--r--test/performance/odp_sched_latency.c (renamed from test/common_plat/performance/odp_sched_latency.c)633
-rwxr-xr-xtest/performance/odp_sched_latency_run.sh (renamed from test/common_plat/performance/odp_sched_latency_run.sh)10
-rw-r--r--test/performance/odp_sched_perf.c1518
-rwxr-xr-xtest/performance/odp_sched_perf_run.sh33
-rw-r--r--test/performance/odp_sched_pktio.c1600
-rwxr-xr-xtest/performance/odp_sched_pktio_run.sh109
-rw-r--r--test/performance/odp_scheduling.c (renamed from test/common_plat/performance/odp_scheduling.c)271
-rwxr-xr-xtest/performance/odp_scheduling_run.sh37
-rw-r--r--test/performance/odp_stash_perf.c523
-rw-r--r--test/performance/odp_stress.c876
-rw-r--r--test/performance/odp_timer_perf.c1402
-rwxr-xr-xtest/performance/odp_timer_perf_run.sh33
-rw-r--r--test/performance/udp64.pcap (renamed from test/common_plat/performance/udp64.pcap)bin7624 -> 7624 bytes
-rw-r--r--test/test_debug.h93
-rw-r--r--test/validation/Makefile.am (renamed from test/common_plat/validation/Makefile.am)2
-rw-r--r--test/validation/api/.gitignore (renamed from test/linux-dpdk/.gitignore)0
-rw-r--r--test/validation/api/Makefile.am100
-rw-r--r--test/validation/api/Makefile.inc3
-rw-r--r--test/validation/api/README (renamed from test/common_plat/validation/api/README)2
-rw-r--r--test/validation/api/align/.gitignore1
-rw-r--r--test/validation/api/align/Makefile.am4
-rw-r--r--test/validation/api/align/align.c156
-rw-r--r--test/validation/api/atomic/.gitignore (renamed from test/common_plat/validation/api/atomic/.gitignore)0
-rw-r--r--test/validation/api/atomic/Makefile.am4
-rw-r--r--test/validation/api/atomic/atomic.c1717
-rw-r--r--test/validation/api/barrier/.gitignore (renamed from test/common_plat/validation/api/barrier/.gitignore)0
-rw-r--r--test/validation/api/barrier/Makefile.am4
-rw-r--r--test/validation/api/barrier/barrier.c (renamed from test/common_plat/validation/api/barrier/barrier.c)132
-rw-r--r--test/validation/api/buffer/.gitignore (renamed from test/common_plat/validation/api/buffer/.gitignore)0
-rw-r--r--test/validation/api/buffer/Makefile.am4
-rw-r--r--test/validation/api/buffer/buffer.c610
-rw-r--r--test/validation/api/byteorder/.gitignore1
-rw-r--r--test/validation/api/byteorder/Makefile.am4
-rw-r--r--test/validation/api/byteorder/byteorder.c107
-rw-r--r--test/validation/api/chksum/.gitignore1
-rw-r--r--test/validation/api/chksum/Makefile.am4
-rw-r--r--test/validation/api/chksum/chksum.c454
-rw-r--r--test/validation/api/classification/.gitignore (renamed from test/common_plat/validation/api/classification/.gitignore)0
-rw-r--r--test/validation/api/classification/Makefile.am11
-rw-r--r--test/validation/api/classification/classification.c48
-rw-r--r--test/validation/api/classification/classification.h (renamed from test/common_plat/validation/api/classification/classification.h)55
-rw-r--r--test/validation/api/classification/odp_classification_basic.c753
-rw-r--r--test/validation/api/classification/odp_classification_common.c (renamed from test/common_plat/validation/api/classification/odp_classification_common.c)321
-rw-r--r--test/validation/api/classification/odp_classification_test_pmr.c2186
-rw-r--r--test/validation/api/classification/odp_classification_tests.c (renamed from test/common_plat/validation/api/classification/odp_classification_tests.c)645
-rw-r--r--test/validation/api/classification/odp_classification_testsuites.h94
-rw-r--r--test/validation/api/comp/.gitignore1
-rw-r--r--test/validation/api/comp/Makefile.am7
-rw-r--r--test/validation/api/comp/comp.c573
-rw-r--r--test/validation/api/comp/test_vectors.h1997
-rw-r--r--test/validation/api/cpumask/.gitignore (renamed from test/common_plat/validation/api/cpumask/.gitignore)0
-rw-r--r--test/validation/api/cpumask/Makefile.am5
-rw-r--r--test/validation/api/cpumask/cpumask.c200
-rw-r--r--test/validation/api/crypto/.gitignore (renamed from test/common_plat/validation/api/crypto/.gitignore)0
-rw-r--r--test/validation/api/crypto/Makefile.am14
-rw-r--r--test/validation/api/crypto/crypto_op_test.c614
-rw-r--r--test/validation/api/crypto/crypto_op_test.h49
-rw-r--r--test/validation/api/crypto/odp_crypto_test_inp.c2414
-rw-r--r--test/validation/api/crypto/test_vector_defs.h3167
-rw-r--r--test/validation/api/crypto/test_vectors.h72
-rw-r--r--test/validation/api/crypto/test_vectors_len.h150
-rw-r--r--test/validation/api/crypto/util.c310
-rw-r--r--test/validation/api/crypto/util.h49
-rw-r--r--test/validation/api/dma/.gitignore1
-rw-r--r--test/validation/api/dma/Makefile.am4
-rw-r--r--test/validation/api/dma/dma.c1705
-rw-r--r--test/validation/api/errno/.gitignore (renamed from test/common_plat/validation/api/errno/.gitignore)0
-rw-r--r--test/validation/api/errno/Makefile.am4
-rw-r--r--test/validation/api/errno/errno.c (renamed from test/common_plat/validation/api/errno/errno.c)9
-rw-r--r--test/validation/api/event/.gitignore1
-rw-r--r--test/validation/api/event/Makefile.am4
-rw-r--r--test/validation/api/event/event.c473
-rw-r--r--test/validation/api/hash/.gitignore (renamed from test/common_plat/validation/api/hash/.gitignore)0
-rw-r--r--test/validation/api/hash/Makefile.am4
-rw-r--r--test/validation/api/hash/hash.c765
-rw-r--r--test/validation/api/hints/.gitignore1
-rw-r--r--test/validation/api/hints/Makefile.am4
-rw-r--r--test/validation/api/hints/hints.c92
-rw-r--r--test/validation/api/init/.gitignore1
-rw-r--r--test/validation/api/init/Makefile.am15
-rwxr-xr-xtest/validation/api/init/init_abort.sh3
-rwxr-xr-xtest/validation/api/init/init_defaults.sh3
-rwxr-xr-xtest/validation/api/init/init_feature_disabled.sh3
-rwxr-xr-xtest/validation/api/init/init_feature_enabled.sh3
-rwxr-xr-xtest/validation/api/init/init_log.sh3
-rwxr-xr-xtest/validation/api/init/init_log_thread.sh3
-rw-r--r--test/validation/api/init/init_main.c325
-rwxr-xr-xtest/validation/api/init/init_num_thr.sh3
-rwxr-xr-xtest/validation/api/init/init_test_param_init.sh3
-rwxr-xr-xtest/validation/api/init/init_test_term_abnormal.sh3
-rw-r--r--test/validation/api/ipsec/.gitignore1
-rw-r--r--test/validation/api/ipsec/Makefile.am25
-rw-r--r--test/validation/api/ipsec/ipsec.c1589
-rw-r--r--test/validation/api/ipsec/ipsec.h164
-rwxr-xr-xtest/validation/api/ipsec/ipsec_async.sh3
-rwxr-xr-xtest/validation/api/ipsec/ipsec_inline_in.sh3
-rwxr-xr-xtest/validation/api/ipsec/ipsec_inline_out.sh3
-rw-r--r--test/validation/api/ipsec/ipsec_main.c83
-rwxr-xr-xtest/validation/api/ipsec/ipsec_sync.sh3
-rw-r--r--test/validation/api/ipsec/ipsec_test_in.c2369
-rw-r--r--test/validation/api/ipsec/ipsec_test_out.c2077
-rw-r--r--test/validation/api/ipsec/reass_test_vectors.c353
-rw-r--r--test/validation/api/ipsec/reass_test_vectors.h67
-rw-r--r--test/validation/api/ipsec/test_vectors.h2168
-rw-r--r--test/validation/api/lock/.gitignore (renamed from test/common_plat/validation/api/lock/.gitignore)0
-rw-r--r--test/validation/api/lock/Makefile.am4
-rw-r--r--test/validation/api/lock/lock.c (renamed from test/common_plat/validation/api/lock/lock.c)235
-rw-r--r--test/validation/api/ml/.gitignore1
-rw-r--r--test/validation/api/ml/Makefile.am4
-rw-r--r--test/validation/api/ml/ml.c572
-rw-r--r--test/validation/api/packet/.gitignore (renamed from test/common_plat/validation/api/packet/.gitignore)0
-rw-r--r--test/validation/api/packet/Makefile.am4
-rw-r--r--test/validation/api/packet/packet.c4597
-rw-r--r--test/validation/api/pktio/.gitignore (renamed from test/common_plat/validation/api/pktio/.gitignore)0
-rw-r--r--test/validation/api/pktio/Makefile.am4
-rw-r--r--test/validation/api/pktio/lso.c938
-rw-r--r--test/validation/api/pktio/lso.h19
-rw-r--r--test/validation/api/pktio/parser.c609
-rw-r--r--test/validation/api/pktio/parser.h19
-rw-r--r--test/validation/api/pktio/pktio.c5517
-rw-r--r--test/validation/api/pool/.gitignore (renamed from test/common_plat/validation/api/pool/.gitignore)0
-rw-r--r--test/validation/api/pool/Makefile.am4
-rw-r--r--test/validation/api/pool/pool.c2386
-rw-r--r--test/validation/api/queue/.gitignore (renamed from test/common_plat/validation/api/queue/.gitignore)0
-rw-r--r--test/validation/api/queue/Makefile.am4
-rw-r--r--test/validation/api/queue/queue.c1176
-rw-r--r--test/validation/api/random/.gitignore (renamed from test/common_plat/validation/api/random/.gitignore)0
-rw-r--r--test/validation/api/random/Makefile.am5
-rw-r--r--test/validation/api/random/random.c538
-rw-r--r--test/validation/api/scheduler/.gitignore2
-rw-r--r--test/validation/api/scheduler/Makefile.am5
-rw-r--r--test/validation/api/scheduler/scheduler.c3770
-rw-r--r--test/validation/api/scheduler/scheduler_no_predef_groups.c225
-rw-r--r--test/validation/api/shmem/.gitignore (renamed from test/common_plat/validation/api/shmem/.gitignore)0
-rw-r--r--test/validation/api/shmem/Makefile.am4
-rw-r--r--test/validation/api/shmem/shmem.c (renamed from test/common_plat/validation/api/shmem/shmem.c)599
-rw-r--r--test/validation/api/stash/.gitignore1
-rw-r--r--test/validation/api/stash/Makefile.am4
-rw-r--r--test/validation/api/stash/stash.c1397
-rw-r--r--test/validation/api/std/.gitignore1
-rw-r--r--test/validation/api/std/Makefile.am4
-rw-r--r--test/validation/api/std/std.c (renamed from test/common_plat/validation/api/std_clib/std_clib.c)27
-rw-r--r--test/validation/api/system/.gitignore (renamed from test/common_plat/validation/api/system/.gitignore)0
-rw-r--r--test/validation/api/system/Makefile.am4
-rw-r--r--test/validation/api/system/system.c699
-rw-r--r--test/validation/api/thread/.gitignore (renamed from test/common_plat/validation/api/thread/.gitignore)0
-rw-r--r--test/validation/api/thread/Makefile.am6
-rw-r--r--test/validation/api/thread/thread.c270
-rw-r--r--test/validation/api/time/.gitignore (renamed from test/common_plat/validation/api/time/.gitignore)0
-rw-r--r--test/validation/api/time/Makefile.am4
-rw-r--r--test/validation/api/time/time.c1031
-rw-r--r--test/validation/api/timer/.gitignore (renamed from test/common_plat/validation/api/timer/.gitignore)0
-rw-r--r--test/validation/api/timer/Makefile.am4
-rw-r--r--test/validation/api/timer/timer.c3309
-rw-r--r--test/validation/api/traffic_mngr/.gitignore (renamed from test/common_plat/validation/api/traffic_mngr/.gitignore)0
-rw-r--r--test/validation/api/traffic_mngr/Makefile.am5
-rw-r--r--test/validation/api/traffic_mngr/traffic_mngr.c (renamed from test/common_plat/validation/api/traffic_mngr/traffic_mngr.c)1947
392 files changed, 93937 insertions, 24605 deletions
diff --git a/test/Makefile.am b/test/Makefile.am
deleted file mode 100644
index 3b5917329..000000000
--- a/test/Makefile.am
+++ /dev/null
@@ -1 +0,0 @@
-SUBDIRS = common_plat @with_platform@
diff --git a/test/Makefile.inc b/test/Makefile.inc
index 063b04b01..6d14f9c17 100644
--- a/test/Makefile.inc
+++ b/test/Makefile.inc
@@ -1,26 +1,38 @@
+if PLATFORM_IS_LINUX_DPDK
include $(top_srcdir)/platform/@with_platform@/Makefile.inc
-LIB = $(top_builddir)/lib
+endif
+
+include $(top_srcdir)/Makefile.inc
+
+COMMON_DIR = $(top_builddir)/test/common
+
+LIBODP = $(LIB)/libodphelper.la $(LIB)/lib$(ODP_LIB_NAME).la
+
+LIBCUNIT_COMMON = $(COMMON_DIR)/libcunit_common.la
+LIBCPUMASK_COMMON = $(COMMON_DIR)/libcpumask_common.la
+LIBPACKET_COMMON = $(COMMON_DIR)/libpacket_common.la
+LIBTHRMASK_COMMON = $(COMMON_DIR)/libthrmask_common.la
#in the following line, the libs using the symbols should come before
#the libs containing them! The includer is given a chance to add things
-#before libodp by setting PRE_LDADD before the inclusion.
-LDADD = $(PRE_LDADD) $(LIB)/libodphelper.la $(LIB)/libodp-dpdk.la
+#before libodp by setting PRELDADD before the inclusion.
+LDADD = $(PRELDADD) $(LIBODP)
+PRELDADD =
+
+AM_CPPFLAGS = \
+ $(ODP_INCLUDES) \
+ $(HELPER_INCLUDES) \
+ -I$(top_srcdir)/test/common
-INCFLAGS = \
- -I$(top_builddir)/platform/@with_platform@/include \
- -I$(top_srcdir)/helper/include \
- -I$(top_srcdir)/include \
- -I$(top_srcdir)/include/odp/arch/@ARCH_ABI@ \
- -I$(top_srcdir)/platform/@with_platform@/include \
- -I$(top_srcdir)/test \
- -I$(top_builddir)/include
+AM_CFLAGS += $(CUNIT_CFLAGS)
-AM_CFLAGS += $(INCFLAGS)
-AM_CXXFLAGS = $(INCFLAGS)
+if STATIC_APPS
+AM_LDFLAGS += -static
+endif
-AM_LDFLAGS += -L$(LIB)
+AM_LDFLAGS += $(PLAT_DEP_LIBS)
@VALGRIND_CHECK_RULES@
-valgrind_tools = memcheck
-TESTS_ENVIRONMENT= ODP_PLATFORM=${with_platform} EXEEXT=${EXEEXT}
+TESTS_ENVIRONMENT = ODP_PLATFORM=${with_platform} \
+ EXEEXT=${EXEEXT}
diff --git a/test/README b/test/README
index f4886d35b..4ef634d53 100644
--- a/test/README
+++ b/test/README
@@ -1,9 +1,9 @@
-Copyright (c) 2014, Linaro Limited
+Copyright (c) 2014-2018, Linaro Limited
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
-Files in test/common_plat/validation directory are intended to be terse
+Files in test/validation directory are intended to be terse
checks that help ensure that the ODP implementations all perform identically
and to specification. Tests inside the validation directory shall be platform
independent.
@@ -15,3 +15,8 @@ $ make
$ cd test/<platform_name>
$ make check-valgrind
+To run these tests with gdb, use the following libtool command:
+$ libtool --mode=execute gdb ./<test_name>
+
+Refer to the prerequisites section of the DEPENDENCIES file for how to
+install the libtool package.
diff --git a/test/common/Makefile.am b/test/common/Makefile.am
new file mode 100644
index 000000000..72658df73
--- /dev/null
+++ b/test/common/Makefile.am
@@ -0,0 +1,28 @@
+include $(top_srcdir)/test/Makefile.inc
+
+if cunit_support
+
+noinst_LTLIBRARIES = \
+ libcunit_common.la \
+ libcpumask_common.la \
+ libpacket_common.la \
+ libthrmask_common.la
+
+libcunit_common_la_SOURCES = odp_cunit_common.c odp_cunit_common.h
+libcunit_common_la_LIBADD = $(CUNIT_LIBS)
+
+libcpumask_common_la_SOURCES = mask_common.c mask_common.h
+
+libpacket_common_la_SOURCES = packet_common.c packet_common.h
+
+libthrmask_common_la_SOURCES = mask_common.c mask_common.h
+libthrmask_common_la_CFLAGS = $(AM_CFLAGS) -DTEST_THRMASK
+
+endif
+
+noinst_HEADERS = test_common_macros.h \
+ test_packet_custom.h \
+ test_packet_ipsec.h \
+ test_packet_ipv4.h \
+ test_packet_ipv4_with_crc.h \
+ test_packet_ipv6.h
diff --git a/test/common_plat/common/mask_common.c b/test/common/mask_common.c
index b31534c64..65c9c6629 100644
--- a/test/common_plat/common/mask_common.c
+++ b/test/common/mask_common.c
@@ -1,10 +1,11 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <odp_api.h>
+#include <odp/helper/odph_api.h>
#include "odp_cunit_common.h"
#include "mask_common.h"
@@ -457,7 +458,7 @@ MASK_TESTFUNC(next)
_odp_mask_from_str(&mask1, TEST_MASK_1_3);
- for (i = 0; i < sizeof(expected) / sizeof(int); i++)
+ for (i = 0; i < ODPH_ARRAY_SIZE(expected); i++)
CU_ASSERT(_odp_mask_next(&mask1, i) == expected[i]);
}
diff --git a/test/common_plat/common/mask_common.h b/test/common/mask_common.h
index e7a38a7c7..60c2390b8 100644
--- a/test/common_plat/common/mask_common.h
+++ b/test/common/mask_common.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
diff --git a/test/common/odp_cunit_common.c b/test/common/odp_cunit_common.c
new file mode 100644
index 000000000..651ae791e
--- /dev/null
+++ b/test/common/odp_cunit_common.c
@@ -0,0 +1,753 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
+ * Copyright (c) 2021, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <odp_api.h>
+#include "odp_cunit_common.h"
+#include <odp/helper/odph_api.h>
+
+#include <CUnit/TestDB.h>
+
+#if defined __GNUC__ && (((__GNUC__ == 4) && \
+ (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4))
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wstrict-prototypes"
+#endif
+#include <CUnit/Automated.h>
+#if defined __GNUC__ && (((__GNUC__ == 4) && \
+ (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4))
+#pragma GCC diagnostic pop
+#endif
+
+/* Globals */
+static int running_in_ci;
+static odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+static int threads_running;
+static odp_instance_t instance;
+static bool control_thread;
+static char *progname;
+static int (*thread_func)(void *);
+
+/*
+ * global init/term functions which may be registered
+ * defaults to functions performing odp init/term.
+ */
+static int tests_global_init(odp_instance_t *inst);
+static int tests_global_term(odp_instance_t inst);
+static struct {
+ int (*global_init_ptr)(odp_instance_t *inst);
+ int (*global_term_ptr)(odp_instance_t inst);
+} global_init_term = {tests_global_init, tests_global_term};
+
+static odp_suiteinfo_t *global_testsuites;
+
+#define MAX_STR_LEN 256
+#define MAX_FAILURES 10
+
+/* Recorded assertion failure for later CUnit call in the initial thread */
+typedef struct assertion_failure_t {
+ char cond[MAX_STR_LEN];
+ char file[MAX_STR_LEN];
+ unsigned int line;
+ int fatal;
+} assertion_failure_t;
+
+typedef struct thr_global_t {
+ assertion_failure_t failure[MAX_FAILURES];
+ unsigned long num_failures;
+} thr_global_t;
+
+static thr_global_t *thr_global;
+
+static __thread int initial_thread = 1; /* Are we the initial thread? */
+static __thread jmp_buf longjmp_env;
+
+void odp_cu_assert(CU_BOOL value, unsigned int line,
+ const char *condition, const char *file, CU_BOOL fatal)
+{
+ unsigned long idx;
+
+ if (initial_thread) {
+ CU_assertImplementation(value, line, condition, file, "", fatal);
+ return;
+ }
+
+ /* Assertion ok, just return */
+ if (value)
+ return;
+
+ /*
+ * Non-initial thread/process cannot call CUnit assert because:
+ *
+ * - CU_assertImplementation() is not thread-safe
+ * - In process mode an assertion failure would be lost because it
+ * would not be recorded in the memory of the initial process.
+ * - Fatal asserts in CUnit perform longjmp which cannot be done in
+ * an other thread or process that did the setjmp.
+ *
+ * --> Record the assertion failure in shared memory so that it can be
+ * processed later in the context of the initial thread/process.
+ * --> In fatal assert, longjmp within the current thread.
+ */
+
+ idx = __atomic_fetch_add(&thr_global->num_failures, 1, __ATOMIC_RELAXED);
+
+ if (idx < MAX_FAILURES) {
+ assertion_failure_t *a = &thr_global->failure[idx];
+
+ strncpy(a->cond, condition, sizeof(a->cond));
+ strncpy(a->file, file, sizeof(a->file));
+ a->cond[sizeof(a->cond) - 1] = 0;
+ a->file[sizeof(a->file) - 1] = 0;
+ a->line = line;
+ a->fatal = fatal;
+ }
+
+ if (fatal)
+ longjmp(longjmp_env, 1);
+}
+
+static void handle_postponed_asserts(void)
+{
+ unsigned long num = thr_global->num_failures;
+
+ if (num > MAX_FAILURES)
+ num = MAX_FAILURES;
+
+ for (unsigned long n = 0; n < num; n++) {
+ assertion_failure_t *a = &thr_global->failure[n];
+
+ /*
+ * Turn fatal failures into non-fatal failures as we are just
+ * reporting them. Threads that saw fatal failures which
+ * prevented them from continuing have already been stopped.
+ */
+ CU_assertImplementation(0, a->line, a->cond, a->file, "", CU_FALSE);
+ }
+ thr_global->num_failures = 0;
+}
+
+static int threads_init(void)
+{
+ static int initialized;
+
+ if (initialized)
+ return 0;
+
+ /*
+ * Use shared memory mapping for the global structure to make it
+ * visible in the child processes if running in process mode.
+ */
+ thr_global = mmap(NULL, sizeof(thr_global_t),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS,
+ -1, 0);
+ if (thr_global == MAP_FAILED)
+ return -1;
+
+ initialized = 1;
+ return 0;
+}
+
+static int run_thread(void *arg)
+{
+ int rc;
+
+ /* Make sure this is zero also in process mode "threads" */
+ initial_thread = 0;
+
+ if (setjmp(longjmp_env) == 0) {
+ /* Normal return, proceed to the thread function. */
+ rc = (*thread_func)(arg);
+ } else {
+ /*
+ * Return from longjmp done by the thread function.
+ * We return 0 here since odph_thread_join() does not like
+ * nonzero exit statuses.
+ */
+ rc = 0;
+ }
+
+ return rc;
+}
+
+int odp_cunit_thread_create(int num, int func_ptr(void *), void *const arg[], int priv, int sync)
+{
+ int i, ret;
+ odp_cpumask_t cpumask;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[num];
+
+ if (num > ODP_THREAD_COUNT_MAX) {
+ fprintf(stderr, "error: %s: too many threads: num=%d max=%d\n", __func__,
+ num, ODP_THREAD_COUNT_MAX);
+ return -1;
+ }
+
+ if (threads_running) {
+ /* thread_tbl is already in use */
+ fprintf(stderr, "error: %s: threads already running\n", __func__);
+ return -1;
+ }
+
+ thread_func = func_ptr;
+
+ odph_thread_common_param_init(&thr_common);
+
+ if (arg == NULL)
+ priv = 0;
+
+ for (i = 0; i < num; i++) {
+ odph_thread_param_init(&thr_param[i]);
+
+ thr_param[i].start = run_thread;
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+
+ if (arg)
+ thr_param[i].arg = arg[i];
+ else
+ thr_param[i].arg = NULL;
+
+ if (priv == 0)
+ break;
+ }
+
+ odp_cpumask_default_worker(&cpumask, num);
+
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = !priv;
+ thr_common.sync = sync;
+
+ /* Create and start additional threads */
+ ret = odph_thread_create(thread_tbl, &thr_common, thr_param, num);
+
+ if (ret != num)
+ fprintf(stderr, "error: odph_thread_create() failed.\n");
+
+ threads_running = (ret > 0);
+
+ return ret;
+}
+
+int odp_cunit_thread_join(int num)
+{
+ /* Wait for threads to exit */
+ if (odph_thread_join(thread_tbl, num) != num) {
+ fprintf(stderr, "error: odph_thread_join() failed.\n");
+ return -1;
+ }
+ threads_running = 0;
+ thread_func = 0;
+
+ handle_postponed_asserts();
+
+ return 0;
+}
+
+static int tests_global_init(odp_instance_t *inst)
+{
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+ odp_thread_type_t thr_type;
+
+ if (odph_options(&helper_options)) {
+ fprintf(stderr, "error: odph_options() failed.\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
+ fprintf(stderr, "error: odp_init_global() failed.\n");
+ return -1;
+ }
+
+ thr_type = control_thread ? ODP_THREAD_CONTROL : ODP_THREAD_WORKER;
+ if (0 != odp_init_local(*inst, thr_type)) {
+ fprintf(stderr, "error: odp_init_local() failed.\n");
+ return -1;
+ }
+ if (0 != odp_schedule_config(NULL)) {
+ fprintf(stderr, "error: odp_schedule_config(NULL) failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int tests_global_term(odp_instance_t inst)
+{
+ if (0 != odp_term_local()) {
+ fprintf(stderr, "error: odp_term_local() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ fprintf(stderr, "error: odp_term_global() failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * register tests_global_init and tests_global_term functions.
+ * If some of these functions are not registered, the defaults functions
+ * (tests_global_init() and tests_global_term()) defined above are used.
+ * One should use these register functions when defining these hooks.
+ * Note that passing NULL as function pointer is valid and will simply
+ * prevent the default (odp init/term) to be done.
+ */
+void odp_cunit_register_global_init(int (*func_init_ptr)(odp_instance_t *inst))
+{
+ global_init_term.global_init_ptr = func_init_ptr;
+}
+
+void odp_cunit_register_global_term(int (*func_term_ptr)(odp_instance_t inst))
+{
+ global_init_term.global_term_ptr = func_term_ptr;
+}
+
+static odp_suiteinfo_t *cunit_get_suite_info(const char *suite_name)
+{
+ odp_suiteinfo_t *sinfo;
+
+ for (sinfo = global_testsuites; sinfo->name; sinfo++)
+ if (strcmp(sinfo->name, suite_name) == 0)
+ return sinfo;
+
+ return NULL;
+}
+
+static odp_testinfo_t *cunit_get_test_info(odp_suiteinfo_t *sinfo,
+ const char *test_name)
+{
+ odp_testinfo_t *tinfo;
+
+ for (tinfo = sinfo->testinfo_tbl; tinfo->name; tinfo++)
+ if (strcmp(tinfo->name, test_name) == 0)
+ return tinfo;
+
+ return NULL;
+}
+
+/* A wrapper for the suite's init function. This is done to allow for a
+ * potential runtime check to determine whether each test in the suite
+ * is active (enabled by using ODP_TEST_INFO_CONDITIONAL()). If present,
+ * the conditional check is run after the suite's init function.
+ */
+static int _cunit_suite_init(void)
+{
+ int ret = 0;
+ CU_pSuite cur_suite = CU_get_current_suite();
+ odp_suiteinfo_t *sinfo;
+ odp_testinfo_t *tinfo;
+
+ /* find the suite currently being run */
+ cur_suite = CU_get_current_suite();
+ if (!cur_suite)
+ return -1;
+
+ sinfo = cunit_get_suite_info(cur_suite->pName);
+ if (!sinfo)
+ return -1;
+
+ /* execute its init function */
+ if (sinfo->init_func) {
+ ret = sinfo->init_func();
+ if (ret)
+ return ret;
+ }
+
+ /* run any configured conditional checks and mark inactive tests */
+ for (tinfo = sinfo->testinfo_tbl; tinfo->name; tinfo++) {
+ CU_pTest ptest;
+ CU_ErrorCode err;
+
+ if (!tinfo->check_active || tinfo->check_active())
+ continue;
+
+ /* test is inactive, mark it as such */
+ ptest = CU_get_test_by_name(tinfo->name, cur_suite);
+ if (ptest)
+ err = CU_set_test_active(ptest, CU_FALSE);
+ else
+ err = CUE_NOTEST;
+
+ if (err != CUE_SUCCESS) {
+ fprintf(stderr, "%s: failed to set test %s inactive\n",
+ __func__, tinfo->name);
+ return -1;
+ }
+ }
+
+ return ret;
+}
+
+/* Print names of all inactive tests of the suite. This should be called by
+ * every suite terminate function. Otherwise, inactive tests are not listed in
+ * test suite results. */
+int odp_cunit_print_inactive(void)
+{
+ CU_pSuite cur_suite;
+ CU_pTest ptest;
+ odp_suiteinfo_t *sinfo;
+ odp_testinfo_t *tinfo;
+ int first = 1;
+
+ cur_suite = CU_get_current_suite();
+ if (cur_suite == NULL)
+ return -1;
+
+ sinfo = cunit_get_suite_info(cur_suite->pName);
+ if (sinfo == NULL)
+ return -1;
+
+ for (tinfo = sinfo->testinfo_tbl; tinfo->name; tinfo++) {
+ ptest = CU_get_test_by_name(tinfo->name, cur_suite);
+ if (ptest == NULL) {
+ fprintf(stderr, "%s: test not found: %s\n",
+ __func__, tinfo->name);
+ return -1;
+ }
+
+ if (ptest->fActive)
+ continue;
+
+ if (first) {
+ printf("\n\nSuite: %s\n", sinfo->name);
+ printf(" Inactive tests:\n");
+ first = 0;
+ }
+
+ printf(" %s\n", tinfo->name);
+ }
+
+ return 0;
+}
+
+int odp_cunit_set_inactive(void)
+{
+ CU_pSuite cur_suite;
+ CU_pTest ptest;
+ odp_suiteinfo_t *sinfo;
+ odp_testinfo_t *tinfo;
+
+ cur_suite = CU_get_current_suite();
+ if (cur_suite == NULL)
+ return -1;
+
+ sinfo = cunit_get_suite_info(cur_suite->pName);
+ if (sinfo == NULL)
+ return -1;
+
+ for (tinfo = sinfo->testinfo_tbl; tinfo->name; tinfo++) {
+ ptest = CU_get_test_by_name(tinfo->name, cur_suite);
+ if (ptest == NULL) {
+ fprintf(stderr, "%s: test not found: %s\n",
+ __func__, tinfo->name);
+ return -1;
+ }
+ CU_set_test_active(ptest, false);
+ }
+
+ return 0;
+}
+
+static int default_term_func(void)
+{
+ return odp_cunit_print_inactive();
+}
+
+static void _cunit_test_setup_func(void)
+{
+ CU_AllTestsCompleteMessageHandler all_test_comp_handler;
+ CU_SuiteCompleteMessageHandler suite_comp_handler;
+ CU_pFailureRecord failrec;
+ CU_pSuite suite;
+
+ if (!getenv("ODP_CUNIT_FAIL_IMMEDIATE"))
+ return;
+
+ if (CU_get_number_of_failure_records() == 0)
+ return;
+
+ /* User wants the suite to fail immediately once a test hits an error */
+ suite = CU_get_current_suite();
+ failrec = CU_get_failure_list();
+
+ printf("Force aborting as a previous test failed\n");
+
+ /* Call the Cleanup functions before aborting */
+ suite->pCleanupFunc();
+
+ suite_comp_handler = CU_get_suite_complete_handler();
+ if (suite_comp_handler)
+ suite_comp_handler(suite, failrec);
+
+ all_test_comp_handler = CU_get_all_test_complete_handler();
+ if (all_test_comp_handler)
+ all_test_comp_handler(failrec);
+
+ exit(EXIT_FAILURE);
+}
+
+/*
+ * Register suites and tests with CUnit.
+ *
+ * Similar to CU_register_suites() but using locally defined wrapper
+ * types.
+ */
+static int cunit_register_suites(odp_suiteinfo_t testsuites[])
+{
+ odp_suiteinfo_t *sinfo;
+ odp_testinfo_t *tinfo;
+ CU_pSuite suite;
+ CU_pTest test;
+ CU_CleanupFunc term_func;
+
+ for (sinfo = testsuites; sinfo->name; sinfo++) {
+ term_func = default_term_func;
+ if (sinfo->term_func)
+ term_func = sinfo->term_func;
+
+ suite = CU_add_suite_with_setup_and_teardown(sinfo->name, _cunit_suite_init,
+ term_func, _cunit_test_setup_func,
+ NULL);
+ if (!suite)
+ return CU_get_error();
+
+ for (tinfo = sinfo->testinfo_tbl; tinfo->name; tinfo++) {
+ test = CU_add_test(suite, tinfo->name,
+ tinfo->test_func);
+ if (!test)
+ return CU_get_error();
+ }
+ }
+
+ return 0;
+}
+
+static int cunit_update_test(CU_pSuite suite,
+ odp_suiteinfo_t *sinfo,
+ odp_testinfo_t *updated_tinfo)
+{
+ CU_pTest test = NULL;
+ CU_ErrorCode err;
+ odp_testinfo_t *tinfo;
+ const char *test_name = updated_tinfo->name;
+
+ tinfo = cunit_get_test_info(sinfo, test_name);
+ if (tinfo)
+ test = CU_get_test(suite, test_name);
+
+ if (!tinfo || !test) {
+ fprintf(stderr, "%s: unable to find existing test named %s\n",
+ __func__, test_name);
+ return -1;
+ }
+
+ err = CU_set_test_func(test, updated_tinfo->test_func);
+ if (err != CUE_SUCCESS) {
+ fprintf(stderr, "%s: failed to update test func for %s\n",
+ __func__, test_name);
+ return -1;
+ }
+
+ tinfo->check_active = updated_tinfo->check_active;
+
+ return 0;
+}
+
+static int cunit_update_suite(odp_suiteinfo_t *updated_sinfo)
+{
+ CU_pSuite suite = NULL;
+ CU_ErrorCode err;
+ odp_suiteinfo_t *sinfo;
+ odp_testinfo_t *tinfo;
+
+ /* find previously registered suite with matching name */
+ sinfo = cunit_get_suite_info(updated_sinfo->name);
+
+ if (sinfo) {
+ /* lookup the associated CUnit suite */
+ suite = CU_get_suite_by_name(updated_sinfo->name,
+ CU_get_registry());
+ }
+
+ if (!sinfo || !suite) {
+ fprintf(stderr, "%s: unable to find existing suite named %s\n",
+ __func__, updated_sinfo->name);
+ return -1;
+ }
+
+ sinfo->init_func = updated_sinfo->init_func;
+ sinfo->term_func = updated_sinfo->term_func;
+
+ err = CU_set_suite_cleanupfunc(suite, updated_sinfo->term_func);
+ if (err != CUE_SUCCESS) {
+ fprintf(stderr, "%s: failed to update cleanup func for %s\n",
+ __func__, updated_sinfo->name);
+ return -1;
+ }
+
+ for (tinfo = updated_sinfo->testinfo_tbl; tinfo->name; tinfo++) {
+ int ret;
+
+ ret = cunit_update_test(suite, sinfo, tinfo);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Run tests previously registered via odp_cunit_register()
+ */
+int odp_cunit_run(void)
+{
+ int ret;
+
+ printf("\tODP API version: %s\n", odp_version_api_str());
+ printf("\tODP implementation name: %s\n", odp_version_impl_name());
+ printf("\tODP implementation version: %s\n", odp_version_impl_str());
+
+ if (getenv("ODP_TEST_OUT_XML")) {
+ CU_set_output_filename(progname);
+ CU_automated_run_tests();
+ } else {
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ }
+
+ ret = CU_get_number_of_failure_records();
+
+ CU_cleanup_registry();
+
+ /* call test executable termination hook, if any */
+ if (global_init_term.global_term_ptr &&
+ ((*global_init_term.global_term_ptr)(instance) != 0))
+ return -1;
+
+ return (ret) ? -1 : 0;
+}
+
+/*
+ * Update suites/tests previously registered via odp_cunit_register().
+ *
+ * Note that this is intended for modifying the properties of already
+ * registered suites/tests. New suites/tests can only be registered via
+ * odp_cunit_register().
+ */
+int odp_cunit_update(odp_suiteinfo_t testsuites[])
+{
+ int ret = 0;
+ odp_suiteinfo_t *sinfo;
+
+ for (sinfo = testsuites; sinfo->name && ret == 0; sinfo++)
+ ret = cunit_update_suite(sinfo);
+
+ return ret;
+}
+
+/*
+ * Register test suites to be run via odp_cunit_run()
+ */
+int odp_cunit_register(odp_suiteinfo_t testsuites[])
+{
+ if (threads_init())
+ return -1;
+
+ /* call test executable init hook, if any */
+ if (global_init_term.global_init_ptr) {
+ if ((*global_init_term.global_init_ptr)(&instance) == 0) {
+ /* After ODP initialization, set main thread's
+ * CPU affinity to the 1st available control CPU core
+ */
+ int cpu = 0;
+ odp_cpumask_t cpuset;
+
+ odp_cpumask_zero(&cpuset);
+ if (odp_cpumask_default_control(&cpuset, 1) == 1) {
+ cpu = odp_cpumask_first(&cpuset);
+ odph_odpthread_setaffinity(cpu);
+ }
+ } else {
+ /* ODP initialization failed */
+ return -1;
+ }
+ }
+
+ CU_set_error_action(CUEA_ABORT);
+
+ CU_initialize_registry();
+ global_testsuites = testsuites;
+ cunit_register_suites(testsuites);
+ CU_set_fail_on_inactive(CU_FALSE);
+
+ return 0;
+}
+
+/*
+ * Parse command line options to extract options affecting cunit_common.
+ * (hence also helpers options as cunit_common uses the helpers)
+ * Options private to the test calling cunit_common are not parsed here.
+ */
+int odp_cunit_parse_options(int *argc, char *argv[])
+{
+ const char *ctrl_thread_env = getenv("CI_THREAD_TYPE_CONTROL");
+ const char *env = getenv("CI");
+
+ progname = argv[0];
+ *argc = odph_parse_options(*argc, argv);
+ /* Check if we need to use control thread */
+ if (ctrl_thread_env && !strcmp(ctrl_thread_env, "true"))
+ control_thread = true;
+
+ if (env && !strcmp(env, "true")) {
+ running_in_ci = 1;
+ ODPH_DBG("\nWARNING: test result can be used for code coverage only.\n"
+ "CI=true env variable is set!\n");
+ }
+
+ return 0;
+}
+
+int odp_cunit_ret(int val)
+{
+ return running_in_ci ? 0 : val;
+}
+
+int odp_cunit_ci(void)
+{
+ return running_in_ci;
+}
+
+int odp_cunit_ci_skip(const char *test_name)
+{
+ const char *ci_skip;
+ const char *found;
+
+ ci_skip = getenv("CI_SKIP");
+ if (ci_skip == NULL)
+ return 0;
+
+ found = strstr(ci_skip, test_name);
+
+ return found != NULL;
+}
diff --git a/test/common/odp_cunit_common.h b/test/common/odp_cunit_common.h
new file mode 100644
index 000000000..63e95d5fb
--- /dev/null
+++ b/test/common/odp_cunit_common.h
@@ -0,0 +1,201 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2020-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP test application common headers
+ */
+
+#ifndef ODP_CUNICT_COMMON_H
+#define ODP_CUNICT_COMMON_H
+
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <CUnit/Basic.h>
+#include <odp_api.h>
+
+typedef int (*cunit_test_check_active)(void);
+
+typedef struct {
+ const char *name;
+ CU_TestFunc test_func;
+ cunit_test_check_active check_active;
+} odp_testinfo_t;
+
+typedef struct {
+ const char *name;
+ CU_InitializeFunc init_func;
+ CU_CleanupFunc term_func;
+ odp_testinfo_t *testinfo_tbl;
+} odp_suiteinfo_t;
+
+static inline int odp_cunit_test_inactive(void) { return 0; }
+static inline void odp_cunit_test_missing(void) { }
+
+/* An active test case, with the test name matching the test function name */
+#define ODP_TEST_INFO(test_func) \
+ {#test_func, test_func, NULL}
+
+/* A test case that is unconditionally inactive. Its name will be registered
+ * with CUnit but it won't be executed and will be reported as inactive in
+ * the result summary. */
+#define ODP_TEST_INFO_INACTIVE(test_func, ...) \
+ {#test_func, odp_cunit_test_missing, odp_cunit_test_inactive}
+
+#define ODP_TEST_INACTIVE 0
+#define ODP_TEST_ACTIVE 1
+
+/* A test case that may be marked as inactive at runtime based on the
+ * return value of the cond_func function. A return value of ODP_TEST_INACTIVE
+ * means inactive, ODP_TEST_ACTIVE means active. */
+#define ODP_TEST_INFO_CONDITIONAL(test_func, cond_func) \
+ {#test_func, test_func, cond_func}
+
+#define ODP_TEST_INFO_NULL {NULL, NULL, NULL}
+#define ODP_SUITE_INFO_NULL {NULL, NULL, NULL, NULL}
+
+typedef struct {
+ uint32_t foo;
+ uint32_t bar;
+} test_shared_data_t;
+
+/* parse parameters that affect the behaviour of odp_cunit_common */
+int odp_cunit_parse_options(int *argc, char *argv[]);
+/* register suites to be run via odp_cunit_run() */
+int odp_cunit_register(odp_suiteinfo_t testsuites[]);
+/* update tests previously registered via odp_cunit_register() */
+int odp_cunit_update(odp_suiteinfo_t testsuites[]);
+/* the function, called by module main(), to run the testsuites: */
+int odp_cunit_run(void);
+
+/* Create threads for a validation test
+ *
+ * Thread arguments table (arg[]) can be set to NULL, when there are no arguments.
+ * When 'priv' is 0, the same argument pointer (arg[0]) is passed to all threads. Otherwise,
+ * a pointer is passed (from arg[]) to each thread. When 'sync' is 1, thread
+ * creation is synchronized (odph_thread_common_param_t.sync). Returns 0 on success.
+ */
+int odp_cunit_thread_create(int num, int func_ptr(void *arg), void *const arg[],
+ int priv, int sync);
+
+/* Wait for previously created threads to exit */
+int odp_cunit_thread_join(int num);
+
+/**
+ * Global tests initialization/termination.
+ *
+ * Initialize global resources needed by the test executable. Default
+ * definition does ODP init / term (both global and local).
+ * Test executables can override it by calling one of the register function
+ * below.
+ * The functions are called at the very beginning and very end of the test
+ * execution. Passing NULL to odp_cunit_register_global_init() and/or
+ * odp_cunit_register_global_term() is legal and will simply prevent the
+ * default (ODP init/term) to be done.
+ */
+void odp_cunit_register_global_init(int (*func_init_ptr)(odp_instance_t *inst));
+
+void odp_cunit_register_global_term(int (*func_term_ptr)(odp_instance_t inst));
+
+int odp_cunit_ret(int val);
+int odp_cunit_ci(void);
+int odp_cunit_print_inactive(void);
+int odp_cunit_set_inactive(void);
+
+/* Check from CI_SKIP environment variable if the test case should be skipped by CI */
+int odp_cunit_ci_skip(const char *test_name);
+
+void odp_cu_assert(CU_BOOL value, unsigned int line,
+ const char *condition, const char *file, CU_BOOL fatal);
+
+/*
+ * Wrapper for CU_assertImplementation for the fatal asserts to show the
+ * compiler and static analyzers that the function does not return if the
+ * assertion fails. This reduces bogus warnings generated from the code
+ * after the fatal assert.
+ */
+static inline void odp_cu_assert_fatal(CU_BOOL value, unsigned int line,
+ const char *condition, const char *file)
+{
+ odp_cu_assert(value, line, condition, file, CU_TRUE);
+
+ if (!value) {
+ /* not reached */
+ abort(); /* this has noreturn function attribute */
+ for (;;) /* this also shows that return is not possible */
+ ;
+ }
+}
+
+/*
+ * Redefine the macros used in ODP. Do it without the do-while idiom for
+ * compatibility with CU and existing code that assumes this kind of macros.
+ */
+
+#undef CU_ASSERT
+#define CU_ASSERT(value) \
+ { odp_cu_assert((value), __LINE__, #value, __FILE__, CU_FALSE); }
+
+#undef CU_ASSERT_FATAL
+#define CU_ASSERT_FATAL(value) \
+ { odp_cu_assert_fatal((value), __LINE__, #value, __FILE__); }
+
+#undef CU_FAIL
+#define CU_FAIL(msg) \
+ { odp_cu_assert(CU_FALSE, __LINE__, ("CU_FAIL(" #msg ")"), __FILE__, CU_FALSE); }
+
+#undef CU_FAIL_FATAL
+#define CU_FAIL_FATAL(msg) \
+ { odp_cu_assert_fatal(CU_FALSE, __LINE__, ("CU_FAIL_FATAL(" #msg ")"), __FILE__); }
+
+#undef CU_ASSERT_EQUAL
+#define CU_ASSERT_EQUAL(actual, expected) \
+ { odp_cu_assert(((actual) == (expected)), __LINE__, \
+ ("CU_ASSERT_EQUAL(" #actual "," #expected ")"), \
+ __FILE__, CU_FALSE); }
+
+#undef CU_ASSERT_EQUAL_FATAL
+#define CU_ASSERT_EQUAL_FATAL(actual, expected) \
+ { odp_cu_assert_fatal(((actual) == (expected)), __LINE__, \
+ ("CU_ASSERT_EQUAL_FATAL(" #actual "," #expected ")"), \
+ __FILE__); }
+
+#undef CU_ASSERT_NOT_EQUAL
+#define CU_ASSERT_NOT_EQUAL(actual, expected) \
+ { odp_cu_assert(((actual) != (expected)), __LINE__, \
+ ("CU_ASSERT_NOT_EQUAL(" #actual "," #expected ")"), \
+ __FILE__, CU_FALSE); }
+
+#undef CU_ASSERT_NOT_EQUAL_FATAL
+#define CU_ASSERT_NOT_EQUAL_FATAL(actual, expected) \
+ { odp_cu_assert_fatal(((actual) != (expected)), __LINE__, \
+ ("CU_ASSERT_NOT_EQUAL_FATAL(" #actual "," #expected ")"), \
+ __FILE__); }
+
+#undef CU_ASSERT_PTR_NULL
+#define CU_ASSERT_PTR_NULL(value) \
+ { odp_cu_assert((NULL == (const void *)(value)), __LINE__, \
+ ("CU_ASSERT_PTR_NULL(" #value ")"), __FILE__, CU_FALSE); }
+
+#undef CU_ASSERT_PTR_NULL_FATAL
+#define CU_ASSERT_PTR_NULL_FATAL(value) \
+ { odp_cu_assert_fatal((NULL == (const void *)(value)), __LINE__, \
+ ("CU_ASSERT_PTR_NULL_FATAL(" #value ")"), __FILE__); }
+
+#undef CU_ASSERT_PTR_NOT_NULL
+#define CU_ASSERT_PTR_NOT_NULL(value) \
+ { odp_cu_assert((NULL != (const void *)(value)), __LINE__, \
+ ("CU_ASSERT_PTR_NOT_NULL_FATAL(" #value ")"), __FILE__, CU_FALSE); }
+
+#undef CU_ASSERT_PTR_NOT_NULL_FATAL
+#define CU_ASSERT_PTR_NOT_NULL_FATAL(value) \
+ { odp_cu_assert_fatal((NULL != (const void *)(value)), __LINE__, \
+ ("CU_ASSERT_PTR_NOT_NULL_FATAL(" #value ")"), __FILE__); }
+
+#endif /* ODP_CUNICT_COMMON_H */
diff --git a/test/common/packet_common.c b/test/common/packet_common.c
new file mode 100644
index 000000000..e0bca3147
--- /dev/null
+++ b/test/common/packet_common.c
@@ -0,0 +1,133 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <packet_common.h>
+#include <string.h>
+
+void test_packet_set_md(odp_packet_t pkt)
+{
+ const int binary_flag = 1;
+ const uint32_t offset = 1;
+ uint8_t *uarea = odp_packet_user_area(pkt);
+ uint32_t uarea_size = odp_packet_user_area_size(pkt);
+
+ for (uint32_t n = 0; n < uarea_size; n++)
+ uarea[n] = n;
+
+ /*
+ * Some of these flags cannot be set simultaneously, but we do not
+ * care here if some flags get cleared.
+ */
+ odp_packet_has_l2_set(pkt, binary_flag);
+ odp_packet_has_l3_set(pkt, binary_flag);
+ odp_packet_has_l4_set(pkt, binary_flag);
+ odp_packet_has_eth_set(pkt, binary_flag);
+ odp_packet_has_eth_bcast_set(pkt, binary_flag);
+ odp_packet_has_eth_mcast_set(pkt, !binary_flag);
+ odp_packet_has_jumbo_set(pkt, binary_flag);
+ odp_packet_has_vlan_set(pkt, binary_flag);
+ odp_packet_has_vlan_qinq_set(pkt, binary_flag);
+ odp_packet_has_arp_set(pkt, binary_flag);
+ odp_packet_has_ipv4_set(pkt, binary_flag);
+ odp_packet_has_ipv6_set(pkt, !binary_flag);
+ odp_packet_has_ip_bcast_set(pkt, binary_flag);
+ odp_packet_has_ip_mcast_set(pkt, binary_flag);
+ odp_packet_has_ipfrag_set(pkt, binary_flag);
+ odp_packet_has_ipopt_set(pkt, binary_flag);
+ odp_packet_has_ipsec_set(pkt, binary_flag);
+ odp_packet_has_udp_set(pkt, binary_flag);
+ odp_packet_has_tcp_set(pkt, !binary_flag);
+ odp_packet_has_sctp_set(pkt, binary_flag);
+ odp_packet_has_icmp_set(pkt, binary_flag);
+
+ odp_packet_user_ptr_set(pkt, &pkt);
+ odp_packet_user_flag_set(pkt, binary_flag);
+ (void)odp_packet_l2_offset_set(pkt, offset);
+ (void)odp_packet_l3_offset_set(pkt, offset);
+ (void)odp_packet_l4_offset_set(pkt, offset);
+ odp_packet_flow_hash_set(pkt, 0x12345678);
+ odp_packet_ts_set(pkt, odp_time_local_from_ns(ODP_TIME_SEC_IN_NS));
+ odp_packet_color_set(pkt, ODP_PACKET_YELLOW);
+ odp_packet_drop_eligible_set(pkt, binary_flag);
+ odp_packet_shaper_len_adjust_set(pkt, -42);
+ (void)odp_packet_payload_offset_set(pkt, offset);
+}
+
+void test_packet_get_md(odp_packet_t pkt, test_packet_md_t *md)
+{
+ uint8_t *uarea = odp_packet_user_area(pkt);
+ uint32_t uarea_size = odp_packet_user_area_size(pkt);
+
+ memset(md, 0, sizeof(*md));
+
+ if (uarea)
+ md->user_area_chksum = odp_chksum_ones_comp16(uarea, uarea_size);
+
+ md->has_error = !!odp_packet_has_error(pkt);
+ md->has_l2_error = !!odp_packet_has_l2_error(pkt);
+ md->has_l3_error = !!odp_packet_has_l3_error(pkt);
+ md->has_l4_error = !!odp_packet_has_l4_error(pkt);
+ md->has_l2 = !!odp_packet_has_l2(pkt);
+ md->has_l3 = !!odp_packet_has_l3(pkt);
+ md->has_l4 = !!odp_packet_has_l4(pkt);
+ md->has_eth = !!odp_packet_has_eth(pkt);
+ md->has_eth_bcast = !!odp_packet_has_eth_bcast(pkt);
+ md->has_eth_mcast = !!odp_packet_has_eth_mcast(pkt);
+ md->has_jumbo = !!odp_packet_has_jumbo(pkt);
+ md->has_vlan = !!odp_packet_has_vlan(pkt);
+ md->has_vlan_qinq = !!odp_packet_has_vlan_qinq(pkt);
+ md->has_arp = !!odp_packet_has_arp(pkt);
+ md->has_ipv4 = !!odp_packet_has_ipv4(pkt);
+ md->has_ipv6 = !!odp_packet_has_ipv6(pkt);
+ md->has_ip_bcast = !!odp_packet_has_ip_bcast(pkt);
+ md->has_ip_mcast = !!odp_packet_has_ip_mcast(pkt);
+ md->has_ipfrag = !!odp_packet_has_ipfrag(pkt);
+ md->has_ipopt = !!odp_packet_has_ipopt(pkt);
+ md->has_ipsec = !!odp_packet_has_ipsec(pkt);
+ md->has_udp = !!odp_packet_has_udp(pkt);
+ md->has_tcp = !!odp_packet_has_tcp(pkt);
+ md->has_sctp = !!odp_packet_has_sctp(pkt);
+ md->has_icmp = !!odp_packet_has_icmp(pkt);
+ md->has_flow_hash = !!odp_packet_has_flow_hash(pkt);
+ md->has_ts = !!odp_packet_has_ts(pkt);
+
+ md->len = odp_packet_len(pkt);
+ md->packet_pool = odp_packet_pool(pkt);
+ md->packet_input = odp_packet_input(pkt);
+ md->user_ptr = odp_packet_user_ptr(pkt);
+ md->user_flag = odp_packet_user_flag(pkt);
+ md->l2_offset = odp_packet_l2_offset(pkt);
+ md->l3_offset = odp_packet_l3_offset(pkt);
+ md->l4_offset = odp_packet_l4_offset(pkt);
+ md->l2_type = odp_packet_l2_type(pkt);
+ md->l3_type = odp_packet_l3_type(pkt);
+ md->l4_type = odp_packet_l4_type(pkt);
+ md->l3_chksum_status = odp_packet_l3_chksum_status(pkt);
+ md->l4_chksum_status = odp_packet_l4_chksum_status(pkt);
+ md->flow_hash = md->has_flow_hash ? odp_packet_flow_hash(pkt) : 0;
+ md->ts = md->has_ts ? odp_packet_ts(pkt) : odp_time_global_from_ns(0);
+ md->color = odp_packet_color(pkt);
+ md->drop_eligible = !!odp_packet_drop_eligible(pkt);
+ md->shaper_len_adjust = odp_packet_shaper_len_adjust(pkt);
+ md->cls_mark = odp_packet_cls_mark(pkt);
+ md->has_lso_request = !!odp_packet_has_lso_request(pkt);
+ md->payload_offset = odp_packet_payload_offset(pkt);
+ md->aging_tmo = odp_packet_aging_tmo(pkt);
+ md->has_tx_compl_request = !!odp_packet_has_tx_compl_request(pkt);
+ md->proto_stats = odp_packet_proto_stats(pkt);
+}
+
+int test_packet_is_md_equal(const test_packet_md_t *md_1,
+ const test_packet_md_t *md_2)
+{
+ /*
+ * With certain assumptions this should typically work. If it does
+ * not, we would get false negatives which we should spot as test
+ * failures.
+ */
+
+ return (!memcmp(md_1, md_2, sizeof(*md_1)));
+}
diff --git a/test/common/packet_common.h b/test/common/packet_common.h
new file mode 100644
index 000000000..c7cd5e27f
--- /dev/null
+++ b/test/common/packet_common.h
@@ -0,0 +1,67 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+
+typedef struct test_packet_md_t {
+ int has_error;
+ int has_l2_error;
+ int has_l3_error;
+ int has_l4_error;
+ int has_l2;
+ int has_l3;
+ int has_l4;
+ int has_eth;
+ int has_eth_bcast;
+ int has_eth_mcast;
+ int has_jumbo;
+ int has_vlan;
+ int has_vlan_qinq;
+ int has_arp;
+ int has_ipv4;
+ int has_ipv6;
+ int has_ip_bcast;
+ int has_ip_mcast;
+ int has_ipfrag;
+ int has_ipopt;
+ int has_ipsec;
+ int has_udp;
+ int has_tcp;
+ int has_sctp;
+ int has_icmp;
+ int has_flow_hash;
+ int has_ts;
+ uint32_t len;
+ odp_pool_t packet_pool;
+ odp_pktio_t packet_input;
+ void *user_ptr;
+ uint16_t user_area_chksum;
+ int user_flag;
+ uint32_t l2_offset;
+ uint32_t l3_offset;
+ uint32_t l4_offset;
+ odp_proto_l2_type_t l2_type;
+ odp_proto_l3_type_t l3_type;
+ odp_proto_l4_type_t l4_type;
+ odp_packet_chksum_status_t l3_chksum_status;
+ odp_packet_chksum_status_t l4_chksum_status;
+ uint32_t flow_hash;
+ odp_time_t ts;
+ odp_packet_color_t color;
+ odp_bool_t drop_eligible;
+ int8_t shaper_len_adjust;
+ uint64_t cls_mark;
+ int has_lso_request;
+ uint32_t payload_offset;
+ uint64_t aging_tmo;
+ int has_tx_compl_request;
+ odp_proto_stats_t proto_stats;
+} test_packet_md_t;
+
+void test_packet_set_md(odp_packet_t pkt);
+void test_packet_get_md(odp_packet_t pkt, test_packet_md_t *md);
+int test_packet_is_md_equal(const test_packet_md_t *md_1,
+ const test_packet_md_t *md_2);
diff --git a/test/common/test_common_macros.h b/test/common/test_common_macros.h
new file mode 100644
index 000000000..344ac8159
--- /dev/null
+++ b/test/common/test_common_macros.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEST_COMMON_MACROS_H_
+#define TEST_COMMON_MACROS_H_
+
+/*
+ * Common macros for validation tests
+ */
+
+/* Check if 'x' is a power of two value */
+#define TEST_CHECK_POW2(x) ((((x) - 1) & (x)) == 0)
+
+#endif
diff --git a/test/common/test_packet_custom.h b/test/common/test_packet_custom.h
new file mode 100644
index 000000000..7ff652bd8
--- /dev/null
+++ b/test/common/test_packet_custom.h
@@ -0,0 +1,124 @@
+/* Copyright (c) 2020-2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEST_PACKET_CUSTOM_H_
+#define TEST_PACKET_CUSTOM_H_
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Test packets without CRC */
+
+/* Ethernet type 0x88B5: EthernetIEEE Std 802 - Local Experimental Ethertype 1
+ *
+ * Imaginary, custom protocol on top of Ethernet.
+ */
+static const uint8_t test_packet_custom_eth_1[] = {
+ /* Ethernet */
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00,
+ 0x00, 0x00, 0x09, 0x00, 0x04, 0x00, 0x88, 0xB5,
+ /* Header fields (16 bit):
+ * packet length, segment number, segment offset, port number */
+ 0x00, 0xA8, 0x00, 0x00, 0x00, 0x00, 0x05, 0x67,
+ /* Payload 701 bytes */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F,
+ 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+ 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
+ 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
+ 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
+ 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
+ 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03,
+ 0x00, 0x04, 0x00, 0x05, 0x00, 0x06, 0x00, 0x07,
+ 0x00, 0x08, 0x00, 0x09, 0x00, 0x0A, 0x00, 0x0B,
+ 0x00, 0x0C, 0x00, 0x0D, 0x00, 0x0E, 0x00, 0x0F,
+ 0x00, 0x10, 0x00, 0x11, 0x00, 0x12, 0x00, 0x13,
+ 0x00, 0x14, 0x00, 0x15, 0x00, 0x16, 0x00, 0x17,
+ 0x00, 0x18, 0x00, 0x19, 0x00, 0x1A, 0x00, 0x1B,
+ 0x00, 0x1C, 0x00, 0x1D, 0x00, 0x1E, 0x00, 0x1F,
+ 0x00, 0x20, 0x00, 0x21, 0x00, 0x22, 0x00, 0x23,
+ 0x00, 0x24, 0x00, 0x25, 0x00, 0x26, 0x00, 0x27,
+ 0x00, 0x28, 0x00, 0x29, 0x00, 0x2A, 0x00, 0x2B,
+ 0x00, 0x2C, 0x00, 0x2D, 0x00, 0x2E, 0x00, 0x2F,
+ 0x00, 0x30, 0x00, 0x31, 0x00, 0x32, 0x00, 0x33,
+ 0x00, 0x34, 0x00, 0x35, 0x00, 0x36, 0x00, 0x37,
+ 0x00, 0x38, 0x00, 0x39, 0x00, 0x3A, 0x00, 0x3B,
+ 0x00, 0x3C, 0x00, 0x3D, 0x00, 0x3E, 0x00, 0x3F,
+ 0x00, 0x40, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43,
+ 0x00, 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47,
+ 0x00, 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B,
+ 0x00, 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F,
+ 0x00, 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53,
+ 0x00, 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57,
+ 0x00, 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x5B,
+ 0x00, 0x5C, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x5F,
+ 0x00, 0x60, 0x00, 0x61, 0x00, 0x62, 0x00, 0x63,
+ 0x00, 0x64, 0x00, 0x65, 0x00, 0x66, 0x00, 0x67,
+ 0x00, 0x68, 0x00, 0x69, 0x00, 0x6A, 0x00, 0x6B,
+ 0x00, 0x6C, 0x00, 0x6D, 0x00, 0x6E, 0x00, 0x6F,
+ 0x00, 0x70, 0x00, 0x71, 0x00, 0x72, 0x00, 0x73,
+ 0x00, 0x74, 0x00, 0x75, 0x00, 0x76, 0x00, 0x77,
+ 0x00, 0x78, 0x00, 0x79, 0x00, 0x7A, 0x00, 0x7B,
+ 0x00, 0x7C, 0x00, 0x7D, 0x00, 0x7E, 0x00, 0x7F,
+ 0x80, 0x00, 0x81, 0x00, 0x82, 0x00, 0x83, 0x00,
+ 0x84, 0x00, 0x85, 0x00, 0x86, 0x00, 0x87, 0x00,
+ 0x88, 0x00, 0x89, 0x00, 0x8A, 0x00, 0x8B, 0x00,
+ 0x8C, 0x00, 0x8D, 0x00, 0x8E, 0x00, 0x8F, 0x00,
+ 0x90, 0x00, 0x91, 0x00, 0x92, 0x00, 0x93, 0x00,
+ 0x94, 0x00, 0x95, 0x00, 0x96, 0x00, 0x97, 0x00,
+ 0x98, 0x00, 0x99, 0x00, 0x9A, 0x00, 0x9B, 0x00,
+ 0x9C, 0x00, 0x9D, 0x00, 0x9E, 0x00, 0x9F, 0x00,
+ 0xA0, 0x00, 0xA1, 0x00, 0xA2, 0x00, 0xA3, 0x00,
+ 0xA4, 0x00, 0xA5, 0x00, 0xA6, 0x00, 0xA7, 0x00,
+ 0xA8, 0x00, 0xA9, 0x00, 0xAA, 0x00, 0xAB, 0x00,
+ 0xAC, 0x00, 0xAD, 0x00, 0xAE, 0x00, 0xAF, 0x00,
+ 0xB0, 0x00, 0xB1, 0x00, 0xB2, 0x00, 0xB3, 0x00,
+ 0xB4, 0x00, 0xB5, 0x00, 0xB6, 0x00, 0xB7, 0x00,
+ 0xB8, 0x00, 0xB9, 0x00, 0xBA, 0x00, 0xBB, 0x00,
+ 0xBC, 0x00, 0xBD, 0x00, 0xBE, 0x00, 0xBF, 0x00,
+ 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00,
+ 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00,
+ 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00,
+ 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00,
+ 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00,
+ 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xD7, 0x00,
+ 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00,
+ 0xDC, 0x00, 0xDD, 0x00, 0xDE
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/test/common/test_packet_ipsec.h b/test/common/test_packet_ipsec.h
new file mode 100644
index 000000000..918870c99
--- /dev/null
+++ b/test/common/test_packet_ipsec.h
@@ -0,0 +1,188 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEST_PACKET_IPSEC_H_
+#define TEST_PACKET_IPSEC_H_
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Test packets without CRC */
+
+static const uint8_t test_packet_ipv4_ipsec_ah[] = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IPv4 */
+ 0x45, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x33, 0xab, 0xd9, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* AH */
+ 0x01, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x6c, 0x2e, 0xf7, 0x1f, 0x7c, 0x70, 0x39, 0xa3,
+ 0x4a, 0x77, 0x01, 0x47, 0x9e, 0x45, 0x73, 0x51,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b
+};
+
+static const uint8_t test_packet_ipv4_ipsec_esp[] = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IPv4 */
+ 0x45, 0x00, 0x00, 0xb0, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0x19, 0x18, 0x0a, 0x00, 0x6f, 0x02,
+ 0x0a, 0x00, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* IPv4 */
+ 0x45, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x01, 0xac, 0x27, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+
+ /* ESP TRL */
+ 0x01, 0x02, 0x02, 0x04,
+
+ /* ICV */
+ 0x73, 0x8d, 0xf6, 0x9a, 0x26, 0x06, 0x4d, 0xa1,
+ 0x88, 0x37, 0x65, 0xab, 0x0d, 0xe9, 0x95, 0x3b
+};
+
+static const uint8_t test_packet_ipv6_ipsec_ah[] = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IPv6 */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x33, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* AH */
+ 0x29, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x62, 0x96, 0x2b, 0x40, 0x3e, 0x53, 0x76, 0x4a,
+ 0x4d, 0x7f, 0xf6, 0x22, 0x35, 0x3c, 0x74, 0xe2,
+ 0x00, 0x00, 0x00, 0x00,
+
+ /* IPv6 */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x74, 0x00, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* Hop-by-Hop */
+ 0x3a, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b
+};
+
+static const uint8_t test_packet_ipv6_ipsec_esp[] = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IPv6 */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x32, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* IPv4 */
+ 0x45, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x01, 0xac, 0x27, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+
+ /* ESP TRL */
+ 0x01, 0x02, 0x02, 0x04,
+
+ /* ICV */
+ 0x73, 0x8d, 0xf6, 0x9a, 0x26, 0x06, 0x4d, 0xa1,
+ 0x88, 0x37, 0x65, 0xab, 0x0d, 0xe9, 0x95, 0x3b
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/test/common/test_packet_ipv4.h b/test/common/test_packet_ipv4.h
new file mode 100644
index 000000000..8dd98d60d
--- /dev/null
+++ b/test/common/test_packet_ipv4.h
@@ -0,0 +1,459 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2021-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEST_PACKET_IPV4_H_
+#define TEST_PACKET_IPV4_H_
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Test packets without CRC */
+
+/* ARP request */
+static const uint8_t test_packet_arp[] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x08, 0x06, 0x00, 0x01,
+ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0xC0, 0xA8, 0x01, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0xA8,
+ 0x01, 0x02, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x10, 0x11
+};
+
+/* ICMPv4 echo reply */
+static const uint8_t test_packet_ipv4_icmp[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x40, 0x01,
+ 0xF3, 0x7B, 0xC0, 0xA8, 0x01, 0x01, 0xC4, 0xA8,
+ 0x01, 0x02, 0x00, 0x00, 0xB7, 0xAB, 0x00, 0x01,
+ 0x00, 0x02, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x10, 0x11
+};
+
+/* IPv4 TCP */
+static const uint8_t test_packet_ipv4_tcp[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x40, 0x06,
+ 0xF3, 0x76, 0xC0, 0xA8, 0x01, 0x02, 0xC4, 0xA8,
+ 0x01, 0x01, 0x04, 0xD2, 0x10, 0xE1, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x50, 0x02,
+ 0x00, 0x00, 0x0C, 0xCA, 0x00, 0x00, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05
+};
+
+/* IPv4 UDP */
+static const uint8_t test_packet_ipv4_udp[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xF3, 0x6B, 0xC0, 0xA8, 0x01, 0x02, 0xC4, 0xA8,
+ 0x01, 0x01, 0x00, 0x3F, 0x00, 0x3F, 0x00, 0x1A,
+ 0x2F, 0x97, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x10, 0x11
+};
+
+/* ETH SNAP IPv4 UDP */
+static const uint8_t test_packet_snap_ipv4_udp[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x00, 0x36, 0xAA, 0xAA,
+ 0x03, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xF7, 0x6B, 0xC0, 0xA8, 0x01, 0x02, 0xC0, 0xA8,
+ 0x01, 0x01, 0x00, 0x3F, 0x00, 0x3F, 0x00, 0x1A,
+ 0x33, 0x97, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x10, 0x11
+};
+
+/* VLAN IPv4 UDP
+ * - type 0x8100, tag 23
+ */
+static const uint8_t test_packet_vlan_ipv4_udp[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x81, 0x00, 0x00, 0x17,
+ 0x08, 0x00, 0x45, 0x00, 0x00, 0x2A, 0x00, 0x00,
+ 0x00, 0x00, 0x40, 0x11, 0xF3, 0x6F, 0xC0, 0xA8,
+ 0x01, 0x02, 0xC4, 0xA8, 0x01, 0x01, 0x00, 0x3F,
+ 0x00, 0x3F, 0x00, 0x16, 0x4D, 0xBF, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
+ 0x0A, 0x0B, 0x0C, 0x0D
+};
+
+/* VLAN Q-in-Q IPv4 UDP
+ * - Outer: type 0x88a8, tag 1
+ * - Inner: type 0x8100, tag 2
+ */
+static const uint8_t test_packet_vlan_qinq_ipv4_udp[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x88, 0xA8, 0x00, 0x01,
+ 0x81, 0x00, 0x00, 0x02, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x26, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xF3, 0x73, 0xC0, 0xA8, 0x01, 0x02, 0xC4, 0xA8,
+ 0x01, 0x01, 0x00, 0x3F, 0x00, 0x3F, 0x00, 0x12,
+ 0x63, 0xDF, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09
+};
+
+/* IPv4 SCTP
+ * - chunk type: payload data
+ */
+static const uint8_t test_packet_ipv4_sctp[] = {
+ 0x00, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x77, 0x00, 0x01, 0x00, 0x00, 0x40, 0x84,
+ 0xF8, 0xAE, 0xC0, 0xA8, 0x00, 0x01, 0xC0, 0xA8,
+ 0x00, 0x02, 0x04, 0xD2, 0x16, 0x2E, 0xDE, 0xAD,
+ 0xBE, 0xEF, 0x31, 0x44, 0xE3, 0xFE, 0x00, 0x00,
+ 0x00, 0x57, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x68,
+ 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x6D, 0x79,
+ 0x20, 0x64, 0x75, 0x6D, 0x6D, 0x79, 0x20, 0x70,
+ 0x61, 0x79, 0x6C, 0x6F, 0x61, 0x64, 0x20, 0x73,
+ 0x74, 0x72, 0x69, 0x6E, 0x67, 0x2E, 0x20, 0x54,
+ 0x68, 0x65, 0x20, 0x6C, 0x65, 0x6E, 0x67, 0x74,
+ 0x68, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x69,
+ 0x73, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67,
+ 0x20, 0x69, 0x73, 0x20, 0x37, 0x31, 0x20, 0x62,
+ 0x79, 0x74, 0x65, 0x73, 0x2E
+};
+
+static const uint8_t test_packet_mcast_eth_ipv4_udp[] = {
+ 0x03, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x02, 0x00,
+ 0x00, 0x03, 0x04, 0x05, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x63, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11,
+ 0xC8, 0xDB, 0xC0, 0xA8, 0x00, 0x01, 0xEF, 0x01,
+ 0x02, 0x03, 0x04, 0xD2, 0x16, 0x2E, 0x00, 0x4F,
+ 0x25, 0x61, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69,
+ 0x73, 0x20, 0x6D, 0x79, 0x20, 0x64, 0x75, 0x6D,
+ 0x6D, 0x79, 0x20, 0x70, 0x61, 0x79, 0x6C, 0x6F,
+ 0x61, 0x64, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6E,
+ 0x67, 0x2E, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6C,
+ 0x65, 0x6E, 0x67, 0x74, 0x68, 0x20, 0x6F, 0x66,
+ 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x73, 0x74,
+ 0x72, 0x69, 0x6E, 0x67, 0x20, 0x69, 0x73, 0x20,
+ 0x37, 0x31, 0x20, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x2E
+};
+
+static const uint8_t test_packet_bcast_eth_ipv4_udp[] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x02, 0x00,
+ 0x00, 0x03, 0x04, 0x05, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x63, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11,
+ 0xB9, 0xE0, 0xC0, 0xA8, 0x00, 0x01, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x04, 0xD2, 0x16, 0x2E, 0x00, 0x4F,
+ 0x16, 0x66, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69,
+ 0x73, 0x20, 0x6D, 0x79, 0x20, 0x64, 0x75, 0x6D,
+ 0x6D, 0x79, 0x20, 0x70, 0x61, 0x79, 0x6C, 0x6F,
+ 0x61, 0x64, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6E,
+ 0x67, 0x2E, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6C,
+ 0x65, 0x6E, 0x67, 0x74, 0x68, 0x20, 0x6F, 0x66,
+ 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x73, 0x74,
+ 0x72, 0x69, 0x6E, 0x67, 0x20, 0x69, 0x73, 0x20,
+ 0x37, 0x31, 0x20, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x2E
+};
+
+static const uint8_t test_packet_ipv4_udp_first_frag[] = {
+ 0x02, 0x00, 0x00, 0x04, 0x05, 0x06, 0x02, 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x63, 0x00, 0x01, 0x20, 0x00, 0x40, 0x11,
+ 0xD9, 0x35, 0xC0, 0xA8, 0x00, 0x01, 0xC0, 0xA8,
+ 0x00, 0x02, 0x04, 0xD2, 0x16, 0x2E, 0x01, 0x17,
+ 0x54, 0xF3, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69,
+ 0x73, 0x20, 0x6D, 0x79, 0x20, 0x64, 0x75, 0x6D,
+ 0x6D, 0x79, 0x20, 0x70, 0x61, 0x79, 0x6C, 0x6F,
+ 0x61, 0x64, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6E,
+ 0x67, 0x2E, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6C,
+ 0x65, 0x6E, 0x67, 0x74, 0x68, 0x20, 0x6F, 0x66,
+ 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x73, 0x74,
+ 0x72, 0x69, 0x6E, 0x67, 0x20, 0x69, 0x73, 0x20,
+ 0x37, 0x31, 0x20, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x2E
+};
+
+static const uint8_t test_packet_ipv4_udp_last_frag[] = {
+ 0x02, 0x00, 0x00, 0x04, 0x05, 0x06, 0x02, 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x5B, 0x00, 0x01, 0x00, 0x0A, 0x40, 0x11,
+ 0xF9, 0x33, 0xC0, 0xA8, 0x00, 0x01, 0xC0, 0xA8,
+ 0x00, 0x02, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69,
+ 0x73, 0x20, 0x6D, 0x79, 0x20, 0x64, 0x75, 0x6D,
+ 0x6D, 0x79, 0x20, 0x70, 0x61, 0x79, 0x6C, 0x6F,
+ 0x61, 0x64, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6E,
+ 0x67, 0x2E, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6C,
+ 0x65, 0x6E, 0x67, 0x74, 0x68, 0x20, 0x6F, 0x66,
+ 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x73, 0x74,
+ 0x72, 0x69, 0x6E, 0x67, 0x20, 0x69, 0x73, 0x20,
+ 0x37, 0x31, 0x20, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x2E
+};
+
+/* IPv4 / Record Route + NOP options / ICMP */
+static const uint8_t test_packet_ipv4_rr_nop_icmp[] = {
+ 0x02, 0x00, 0x00, 0x04, 0x05, 0x06, 0x02, 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x08, 0x00, 0x49, 0x00,
+ 0x00, 0x2C, 0x00, 0x01, 0x00, 0x00, 0x40, 0x01,
+ 0x8E, 0xE2, 0xC0, 0xA8, 0x00, 0x01, 0xC0, 0xA8,
+ 0x00, 0x02, 0x07, 0x0F, 0x0C, 0xC0, 0xA8, 0x04,
+ 0x01, 0xC0, 0xA8, 0x05, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x08, 0x00, 0xF7, 0xFF, 0x00, 0x00,
+ 0x00, 0x00
+};
+
+/* Ethernet/IPv4/UDP packet. Ethernet frame length 325 bytes (+ CRC).
+ * - source IP addr: 192.168.1.2
+ * - destination IP addr: 192.168.1.1
+ */
+static const uint8_t test_packet_ipv4_udp_325[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x01, 0x37, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xF6, 0x62, 0xC0, 0xA8, 0x01, 0x02, 0xC0, 0xA8,
+ 0x01, 0x01, 0x00, 0x3F, 0x00, 0x3F, 0x01, 0x23,
+ 0x02, 0xED, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15,
+ 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
+ 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
+ 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D,
+ 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
+ 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D,
+ 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45,
+ 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D,
+ 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55,
+ 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D,
+ 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65,
+ 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D,
+ 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75,
+ 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D,
+ 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85,
+ 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D,
+ 0x8E, 0x8F, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95,
+ 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D,
+ 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5,
+ 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD,
+ 0xAE, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5,
+ 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD,
+ 0xBE, 0xBF, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5,
+ 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD,
+ 0xCE, 0xCF, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5,
+ 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD,
+ 0xDE, 0xDF, 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5,
+ 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED,
+ 0xEE, 0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5,
+ 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD,
+ 0xFE, 0xFF, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15,
+ 0x16, 0x17, 0x18, 0x19, 0x1A
+};
+
+/* Ethernet/IPv4/UDP packet. Ethernet frame length 1500 bytes (+ CRC). */
+static const uint8_t test_packet_ipv4_udp_1500[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x05, 0xCE, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xF1, 0xCB, 0xC0, 0xA8, 0x01, 0x02, 0xC0, 0xA8,
+ 0x01, 0x01, 0x00, 0x3F, 0x00, 0x3F, 0x05, 0xBA,
+ 0xF8, 0x59, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15,
+ 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
+ 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
+ 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D,
+ 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
+ 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D,
+ 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45,
+ 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D,
+ 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55,
+ 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D,
+ 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65,
+ 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D,
+ 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75,
+ 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D,
+ 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85,
+ 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D,
+ 0x8E, 0x8F, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95,
+ 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D,
+ 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5,
+ 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD,
+ 0xAE, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5,
+ 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD,
+ 0xBE, 0xBF, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5,
+ 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD,
+ 0xCE, 0xCF, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5,
+ 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD,
+ 0xDE, 0xDF, 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5,
+ 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED,
+ 0xEE, 0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5,
+ 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD,
+ 0xFE, 0xFF, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15,
+ 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
+ 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
+ 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D,
+ 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
+ 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D,
+ 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45,
+ 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D,
+ 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55,
+ 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D,
+ 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65,
+ 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D,
+ 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75,
+ 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D,
+ 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85,
+ 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D,
+ 0x8E, 0x8F, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95,
+ 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D,
+ 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5,
+ 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD,
+ 0xAE, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5,
+ 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD,
+ 0xBE, 0xBF, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5,
+ 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD,
+ 0xCE, 0xCF, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5,
+ 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD,
+ 0xDE, 0xDF, 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5,
+ 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED,
+ 0xEE, 0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5,
+ 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD,
+ 0xFE, 0xFF, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15,
+ 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
+ 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
+ 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D,
+ 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
+ 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D,
+ 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45,
+ 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D,
+ 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55,
+ 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D,
+ 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65,
+ 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D,
+ 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75,
+ 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D,
+ 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85,
+ 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D,
+ 0x8E, 0x8F, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95,
+ 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D,
+ 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5,
+ 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD,
+ 0xAE, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5,
+ 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD,
+ 0xBE, 0xBF, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5,
+ 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD,
+ 0xCE, 0xCF, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5,
+ 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD,
+ 0xDE, 0xDF, 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5,
+ 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED,
+ 0xEE, 0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5,
+ 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD,
+ 0xFE, 0xFF, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15,
+ 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
+ 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
+ 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D,
+ 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
+ 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D,
+ 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45,
+ 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D,
+ 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55,
+ 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D,
+ 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65,
+ 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D,
+ 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75,
+ 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D,
+ 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85,
+ 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D,
+ 0x8E, 0x8F, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95,
+ 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D,
+ 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5,
+ 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD,
+ 0xAE, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5,
+ 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD,
+ 0xBE, 0xBF, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5,
+ 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD,
+ 0xCE, 0xCF, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5,
+ 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD,
+ 0xDE, 0xDF, 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5,
+ 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED,
+ 0xEE, 0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5,
+ 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD,
+ 0xFE, 0xFF, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15,
+ 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
+ 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
+ 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D,
+ 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
+ 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D,
+ 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45,
+ 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D,
+ 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55,
+ 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D,
+ 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65,
+ 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D,
+ 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75,
+ 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D,
+ 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85,
+ 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D,
+ 0x8E, 0x8F, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95,
+ 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D,
+ 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5,
+ 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD,
+ 0xAE, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5,
+ 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD,
+ 0xBE, 0xBF, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5,
+ 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD,
+ 0xCE, 0xCF, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5,
+ 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD,
+ 0xDE, 0xDF, 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5,
+ 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED,
+ 0xEE, 0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5,
+ 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD,
+ 0xFE, 0xFF, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15,
+ 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
+ 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
+ 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D,
+ 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
+ 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D,
+ 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45,
+ 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D,
+ 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55,
+ 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D,
+ 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65,
+ 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D,
+ 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75,
+ 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D,
+ 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85,
+ 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D,
+ 0x8E, 0x8F, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95,
+ 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D,
+ 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5,
+ 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD,
+ 0xAE, 0xAF, 0xB0, 0xB1
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/test/common/test_packet_ipv4_with_crc.h b/test/common/test_packet_ipv4_with_crc.h
new file mode 100644
index 000000000..f10c405e1
--- /dev/null
+++ b/test/common/test_packet_ipv4_with_crc.h
@@ -0,0 +1,234 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEST_PACKET_IPV4_WITH_CRC_H_
+#define TEST_PACKET_IPV4_WITH_CRC_H_
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Ethernet/IPv4/UDP test packets with CRC. Last 4 bytes are the Ethernet FCS. */
+
+/* Frame length is 64 bytes with CRC. */
+static const uint8_t test_packet_ipv4_udp_64_crc[] = {
+ 0x12, 0xA8, 0x87, 0x93, 0x25, 0x39, 0x1A, 0x29,
+ 0x92, 0x49, 0x00, 0x32, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xAB, 0x8D, 0xC0, 0xA8, 0xDE, 0xC7, 0xC0, 0xA8,
+ 0x6F, 0x19, 0x00, 0x3F, 0x00, 0x3F, 0x00, 0x1A,
+ 0xA8, 0x69, 0xD3, 0xFA, 0x53, 0xCD, 0xFF, 0xF7,
+ 0x11, 0xB0, 0x3B, 0xD1, 0x1F, 0xF4, 0x64, 0xBB,
+ 0x70, 0x11, 0x1D, 0x9E, 0x00, 0xA8, 0xC6, 0xBA
+};
+
+/* Frame length is 68 bytes with CRC. */
+static const uint8_t test_packet_ipv4_udp_68_crc[] = {
+ 0x12, 0x69, 0x5C, 0xDF, 0xB0, 0xDB, 0x1A, 0x38,
+ 0xC8, 0x96, 0x1E, 0x5A, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xAA, 0xBA, 0xC0, 0xA8, 0xDE, 0xF8, 0xC0, 0xA8,
+ 0x6F, 0xB7, 0x00, 0x3F, 0x00, 0x3F, 0x00, 0x1E,
+ 0x06, 0xFC, 0x73, 0x90, 0xFC, 0x24, 0x58, 0xE6,
+ 0x2F, 0x29, 0xA7, 0x18, 0x77, 0xF8, 0x8D, 0xB6,
+ 0xF5, 0xB6, 0xE4, 0x3A, 0x09, 0xD8, 0x9F, 0xE0,
+ 0x88, 0xEB, 0x77, 0x75
+};
+
+/* Frame length is 70 bytes with CRC. */
+static const uint8_t test_packet_ipv4_udp_70_crc[] = {
+ 0x12, 0x7F, 0x60, 0x61, 0x5F, 0x98, 0x1A, 0x18,
+ 0x74, 0x06, 0x52, 0x94, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x34, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xAB, 0xDD, 0xC0, 0xA8, 0xDE, 0x89, 0xC0, 0xA8,
+ 0x6F, 0x01, 0x00, 0x3F, 0x00, 0x3F, 0x00, 0x20,
+ 0xDC, 0xBC, 0xC2, 0x76, 0xB8, 0x05, 0x50, 0x86,
+ 0x90, 0xA3, 0x46, 0x76, 0x89, 0x9B, 0xF8, 0xD9,
+ 0x3A, 0x36, 0x58, 0x64, 0xD5, 0xF2, 0xBE, 0xA2,
+ 0x07, 0xD5, 0x80, 0x25, 0x82, 0x94
+};
+
+/* Frame length is 71 bytes with CRC. */
+static const uint8_t test_packet_ipv4_udp_71_crc[] = {
+ 0x12, 0x37, 0x9F, 0x5A, 0x81, 0x77, 0x1A, 0xB2,
+ 0x2F, 0x2F, 0xF8, 0xAE, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x35, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xAB, 0x6B, 0xC0, 0xA8, 0xDE, 0xA2, 0xC0, 0xA8,
+ 0x6F, 0x59, 0x00, 0x3F, 0x00, 0x3F, 0x00, 0x21,
+ 0xAA, 0x78, 0x09, 0xE6, 0xC7, 0x2B, 0x99, 0x6D,
+ 0xC7, 0xA9, 0xE2, 0x8E, 0xB7, 0x21, 0xE0, 0x9C,
+ 0xAC, 0x23, 0x77, 0x44, 0xB0, 0x61, 0x1C, 0x70,
+ 0x15, 0xB7, 0xD3, 0x4D, 0x8E, 0xB3, 0xA4
+};
+
+/* Frame length is 287 bytes with CRC. */
+static const uint8_t test_packet_ipv4_udp_287_crc[] = {
+ 0x12, 0x16, 0xB7, 0x09, 0x63, 0x96, 0x1A, 0xDE,
+ 0xBE, 0x52, 0xEC, 0x53, 0x08, 0x00, 0x45, 0x00,
+ 0x01, 0x0D, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xAA, 0x5F, 0xC0, 0xA8, 0xDE, 0xB6, 0xC0, 0xA8,
+ 0x6F, 0x79, 0x00, 0x3F, 0x00, 0x3F, 0x00, 0xF9,
+ 0x99, 0x92, 0xA3, 0x3E, 0x1A, 0xAA, 0xFF, 0xA6,
+ 0xAB, 0xDD, 0xF5, 0x55, 0x28, 0x08, 0xCA, 0x0E,
+ 0x99, 0x27, 0x61, 0xB5, 0x9D, 0x65, 0x01, 0x07,
+ 0x55, 0x79, 0x57, 0x89, 0x7D, 0x35, 0xBF, 0xBE,
+ 0xDC, 0x5C, 0xBF, 0x9D, 0xDF, 0xB5, 0xD4, 0x42,
+ 0x81, 0x5D, 0x72, 0xE4, 0x6D, 0x61, 0x46, 0x07,
+ 0x67, 0x8C, 0xBE, 0xF3, 0xE8, 0x7E, 0xCB, 0x64,
+ 0x08, 0xFE, 0xE6, 0x83, 0xA1, 0x92, 0x51, 0x15,
+ 0xB2, 0x4C, 0x9F, 0xF3, 0x01, 0xE6, 0x76, 0xBA,
+ 0x05, 0x12, 0x94, 0xC3, 0x02, 0x0B, 0x10, 0x56,
+ 0x76, 0x70, 0xE2, 0x1A, 0xB4, 0x52, 0xA6, 0xD0,
+ 0xCF, 0x8C, 0x9D, 0x41, 0xB9, 0x52, 0xF5, 0x75,
+ 0xAC, 0x0D, 0x4A, 0x26, 0xC9, 0x66, 0x6C, 0x74,
+ 0x00, 0xA1, 0x63, 0xDB, 0x2F, 0x2D, 0xB0, 0x61,
+ 0x8E, 0x79, 0xD6, 0x14, 0x4A, 0x09, 0x19, 0xB3,
+ 0x70, 0xC8, 0x86, 0xAC, 0x0D, 0xA0, 0x33, 0x46,
+ 0x94, 0x48, 0xC8, 0x20, 0x7F, 0x5D, 0x3E, 0xDA,
+ 0x39, 0xB5, 0xE7, 0x12, 0x3C, 0xF0, 0xAF, 0x92,
+ 0x76, 0x4F, 0xA1, 0xC7, 0xF2, 0xCA, 0xAD, 0x76,
+ 0xB4, 0x5C, 0xA8, 0xAA, 0xE5, 0xA2, 0x94, 0xF1,
+ 0x30, 0xA4, 0x22, 0xC7, 0x6B, 0xF3, 0x75, 0x53,
+ 0x7A, 0xF4, 0x29, 0x51, 0x70, 0x7B, 0x94, 0x50,
+ 0xF8, 0x9B, 0x4B, 0x1D, 0xF4, 0xBD, 0xE8, 0x7F,
+ 0x63, 0xF0, 0x0B, 0x24, 0x88, 0x80, 0x9F, 0xDC,
+ 0x49, 0xCA, 0x5F, 0x05, 0xD5, 0x4E, 0x98, 0x46,
+ 0x89, 0x06, 0x30, 0x81, 0x15, 0xF7, 0xE7, 0x02,
+ 0xDA, 0x05, 0xDD, 0xFD, 0x97, 0x0B, 0x55, 0x37,
+ 0x45, 0x2B, 0xB8, 0x03, 0x3F, 0x63, 0xDD, 0x70,
+ 0xA6, 0x61, 0x87, 0xC1, 0x04, 0x99, 0x2F, 0x1D,
+ 0x2F, 0x94, 0x04, 0x88, 0x71, 0x8B, 0x31, 0x12,
+ 0xE5, 0x34, 0x5E, 0x01, 0x24, 0x62, 0x28
+};
+
+/* Frame length is 400 bytes with CRC. */
+static const uint8_t test_packet_ipv4_udp_400_crc[] = {
+ 0x12, 0x61, 0x73, 0x60, 0x9D, 0x86, 0x1A, 0x85,
+ 0x0E, 0xF2, 0x3B, 0x2E, 0x08, 0x00, 0x45, 0x00,
+ 0x01, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xAA, 0xB3, 0xC0, 0xA8, 0xDE, 0x0B, 0xC0, 0xA8,
+ 0x6F, 0x5F, 0x00, 0x3F, 0x00, 0x3F, 0x01, 0x6A,
+ 0x16, 0x66, 0xE1, 0xC0, 0xC1, 0x8E, 0xA5, 0x1A,
+ 0x01, 0x2A, 0x00, 0xBB, 0x41, 0x3B, 0x9C, 0xB8,
+ 0x18, 0x8A, 0x70, 0x3C, 0xD6, 0xBA, 0xE3, 0x1C,
+ 0xC6, 0x7B, 0x34, 0xB1, 0xB0, 0x00, 0x19, 0xE6,
+ 0xA2, 0xB2, 0xA6, 0x90, 0xF0, 0x26, 0x71, 0xFE,
+ 0x7A, 0x4C, 0xF4, 0xD1, 0x2D, 0xEA, 0x32, 0x0E,
+ 0xCD, 0x49, 0x9E, 0x72, 0xF1, 0x2F, 0x38, 0x06,
+ 0x4F, 0x0C, 0xF9, 0xF3, 0x39, 0x78, 0x71, 0x96,
+ 0x68, 0xDD, 0xAF, 0xD7, 0xF9, 0x71, 0x61, 0xB7,
+ 0xB5, 0x68, 0x3C, 0x29, 0x95, 0x67, 0x9E, 0x23,
+ 0x85, 0x3B, 0x72, 0xF4, 0x69, 0xCB, 0x55, 0xD8,
+ 0x5E, 0x4B, 0xF6, 0xCA, 0x42, 0xB3, 0xC3, 0x99,
+ 0x76, 0x70, 0xC2, 0x3E, 0xE2, 0x59, 0xBC, 0x6D,
+ 0x3A, 0xE4, 0xA1, 0x6A, 0x80, 0x9A, 0x28, 0x1E,
+ 0xCB, 0xC8, 0xB6, 0x6A, 0x46, 0x78, 0x81, 0xBB,
+ 0x7B, 0x9F, 0xF5, 0xDF, 0xD2, 0x98, 0x57, 0x17,
+ 0x54, 0xD1, 0xA8, 0x6D, 0xB5, 0xC5, 0xCC, 0x47,
+ 0x92, 0x2A, 0xEB, 0x3D, 0xF7, 0x6B, 0x18, 0x28,
+ 0x24, 0x58, 0x30, 0x7B, 0x91, 0x1D, 0x05, 0xD7,
+ 0x2F, 0x70, 0xBC, 0xD9, 0xF1, 0x0F, 0x74, 0x37,
+ 0x8B, 0x6A, 0x29, 0x0B, 0x7A, 0x9C, 0xD7, 0x6E,
+ 0x44, 0xA0, 0xE2, 0x49, 0x01, 0xC2, 0xB5, 0x68,
+ 0x1A, 0x53, 0xA9, 0xD0, 0x51, 0xA1, 0x29, 0x53,
+ 0x01, 0x27, 0x15, 0x61, 0xA7, 0x00, 0x63, 0x21,
+ 0xA2, 0xA2, 0x0C, 0xC0, 0x37, 0xC8, 0x26, 0x0A,
+ 0xD8, 0xB0, 0x4D, 0x37, 0xA6, 0x87, 0x48, 0x07,
+ 0x34, 0x22, 0xEA, 0x11, 0x8E, 0xEE, 0x35, 0x57,
+ 0x7A, 0x2A, 0xC6, 0x1F, 0xFD, 0x53, 0x6D, 0xFE,
+ 0x21, 0xE0, 0x1B, 0x36, 0xF6, 0x30, 0x01, 0x42,
+ 0xD7, 0xC1, 0xF6, 0xAE, 0xEE, 0xA2, 0x19, 0x2C,
+ 0xFB, 0x2B, 0xB8, 0xE5, 0x50, 0xEB, 0x71, 0x0D,
+ 0x20, 0xE2, 0x97, 0xBA, 0xFA, 0xF0, 0xD8, 0xF6,
+ 0x91, 0x8E, 0x1C, 0x12, 0xBE, 0xBC, 0xAF, 0x3E,
+ 0xC8, 0x3A, 0xA3, 0x57, 0xE2, 0xFB, 0x70, 0x00,
+ 0xF5, 0xD7, 0xDE, 0xF4, 0xA0, 0x80, 0x25, 0x9B,
+ 0x7E, 0xB7, 0x52, 0xDB, 0xA7, 0xC0, 0xEC, 0x30,
+ 0x79, 0x13, 0xD8, 0xFF, 0x98, 0x54, 0x7A, 0x27,
+ 0x33, 0x85, 0x1D, 0xDA, 0x89, 0x7B, 0x95, 0xAB,
+ 0xAC, 0x8E, 0x22, 0xE7, 0x85, 0x95, 0x98, 0x29,
+ 0x19, 0x12, 0xBD, 0x29, 0x0A, 0xA9, 0xF3, 0xD5,
+ 0x61, 0xD7, 0x17, 0xA3, 0x8A, 0xE0, 0xA8, 0x25,
+ 0xA0, 0x09, 0x2C, 0xDE, 0xEC, 0x08, 0xCF, 0x54,
+ 0xA8, 0xB9, 0x4E, 0x66, 0x08, 0x12, 0x13, 0xE0,
+ 0x7B, 0x59, 0xA2, 0x4D, 0x2E, 0x94, 0x33, 0x0C,
+ 0xD1, 0x42, 0xA0, 0xA6, 0xCC, 0x80, 0x7F, 0xA8
+};
+
+/* Frame length is 503 bytes with CRC. */
+static const uint8_t test_packet_ipv4_udp_503_crc[] = {
+ 0x12, 0xA5, 0xD4, 0xC0, 0xC6, 0xC4, 0x1A, 0xCA,
+ 0xB9, 0x97, 0x2B, 0x57, 0x08, 0x00, 0x45, 0x00,
+ 0x01, 0xE5, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xAA, 0x01, 0xC0, 0xA8, 0xDE, 0x34, 0xC0, 0xA8,
+ 0x6F, 0x81, 0x00, 0x3F, 0x00, 0x3F, 0x01, 0xD1,
+ 0xD3, 0xC9, 0x46, 0x40, 0x04, 0x0F, 0x98, 0xEF,
+ 0x43, 0xB0, 0xDC, 0xEC, 0x33, 0x67, 0x66, 0x85,
+ 0xFD, 0x87, 0xC9, 0x58, 0xD7, 0x41, 0x02, 0xB8,
+ 0xD7, 0x24, 0xF2, 0x53, 0x04, 0xE5, 0x12, 0x11,
+ 0xB3, 0x5C, 0x62, 0x3F, 0x11, 0xD0, 0xA2, 0xEC,
+ 0xFC, 0x74, 0xC5, 0x93, 0x17, 0x31, 0xA3, 0xFB,
+ 0x1D, 0xA6, 0xEE, 0x5C, 0x75, 0x6E, 0x67, 0xC3,
+ 0x07, 0xA1, 0x51, 0xB7, 0x4F, 0x9D, 0x26, 0x1D,
+ 0xAF, 0x06, 0x8A, 0x59, 0x0A, 0x0B, 0x7D, 0xB6,
+ 0x8E, 0xEB, 0xD4, 0x08, 0xD1, 0xB8, 0xEA, 0x90,
+ 0x20, 0x5C, 0x93, 0x1F, 0x14, 0xD9, 0x51, 0x7E,
+ 0x65, 0xD5, 0xCB, 0x0E, 0x03, 0x55, 0x7A, 0xAC,
+ 0x63, 0xC9, 0xA6, 0xD7, 0x17, 0x49, 0x91, 0x15,
+ 0xA2, 0x1E, 0xF2, 0x92, 0x8A, 0x84, 0xA4, 0x0B,
+ 0xAF, 0xAE, 0xA0, 0xEA, 0xDA, 0x0B, 0x29, 0xB3,
+ 0x99, 0xC8, 0x46, 0x9E, 0x4B, 0x96, 0x75, 0x86,
+ 0x77, 0xAD, 0x9E, 0x01, 0x62, 0x10, 0x46, 0xD1,
+ 0xE0, 0x13, 0x05, 0x7B, 0x6A, 0x1B, 0x3A, 0x35,
+ 0x71, 0xA6, 0xFD, 0x05, 0xF2, 0x8B, 0x55, 0x28,
+ 0x4B, 0x82, 0xAB, 0xB1, 0x4D, 0xE6, 0x7F, 0x72,
+ 0x92, 0xBA, 0x5A, 0x1F, 0x10, 0xEB, 0x04, 0xB1,
+ 0xEF, 0xD4, 0xF6, 0x09, 0x98, 0x07, 0x12, 0xD6,
+ 0x0F, 0x4B, 0x92, 0xB8, 0x82, 0xE1, 0x3F, 0xA6,
+ 0x22, 0x0C, 0xE3, 0x8D, 0x31, 0xD0, 0x00, 0x39,
+ 0x5C, 0xF9, 0xC2, 0x79, 0x4C, 0x5F, 0x33, 0x7E,
+ 0x78, 0x69, 0xAE, 0x85, 0x3D, 0xD0, 0x96, 0xB6,
+ 0x30, 0xA5, 0x47, 0x4B, 0xB3, 0x96, 0x4D, 0xF4,
+ 0xC6, 0x6D, 0xD5, 0x7A, 0x20, 0xD9, 0x60, 0xA3,
+ 0x7F, 0x71, 0xBF, 0x57, 0x3C, 0xF6, 0x3B, 0x00,
+ 0x22, 0xD8, 0x14, 0x37, 0x80, 0xFC, 0x2D, 0x9C,
+ 0x7D, 0xBD, 0x05, 0x05, 0xAC, 0x31, 0xE9, 0xDC,
+ 0xE0, 0xAD, 0x68, 0xC2, 0x42, 0x8B, 0x08, 0x78,
+ 0xA0, 0x2A, 0x37, 0x00, 0x08, 0x37, 0x84, 0xFF,
+ 0x96, 0x2B, 0x0F, 0x66, 0x8A, 0x15, 0x3E, 0x51,
+ 0x9D, 0x9A, 0xB2, 0x30, 0x96, 0x3A, 0x7A, 0x24,
+ 0x18, 0xD5, 0x86, 0xAC, 0xBE, 0x6D, 0x5E, 0x80,
+ 0x6A, 0x2D, 0x14, 0xBD, 0xD9, 0xAA, 0x76, 0x43,
+ 0x7B, 0x6A, 0x89, 0x5B, 0x82, 0xA3, 0x33, 0x9E,
+ 0x39, 0x44, 0x38, 0x12, 0x98, 0x39, 0x68, 0x95,
+ 0x15, 0xEB, 0x16, 0x7F, 0xBC, 0x07, 0xCB, 0x83,
+ 0x82, 0x81, 0x3C, 0xD6, 0xD7, 0xD8, 0x7A, 0x93,
+ 0x7A, 0x9B, 0x69, 0x5F, 0x91, 0x2C, 0x73, 0x49,
+ 0xF9, 0xC4, 0x7D, 0xF3, 0xDB, 0xB7, 0x1A, 0xF7,
+ 0x80, 0xFA, 0xFF, 0x84, 0x66, 0xE2, 0xB8, 0x48,
+ 0x93, 0x2E, 0x99, 0x93, 0x29, 0x48, 0xF6, 0xB9,
+ 0x3B, 0xC8, 0x97, 0xB8, 0xDF, 0x3A, 0x66, 0x1A,
+ 0x84, 0x21, 0x6B, 0x1D, 0x86, 0x3C, 0xFA, 0x12,
+ 0x00, 0x07, 0x2B, 0x03, 0xE2, 0x85, 0x8B, 0x98,
+ 0x43, 0x3D, 0x11, 0x3B, 0xF8, 0x82, 0x54, 0x7B,
+ 0x65, 0xF8, 0xFA, 0xAE, 0x93, 0x54, 0x74, 0xDB,
+ 0x83, 0x63, 0xE9, 0xD7, 0xC2, 0x4E, 0x6F, 0xAD,
+ 0x3E, 0x1C, 0x81, 0x43, 0x58, 0x78, 0xAE, 0x3B,
+ 0x3A, 0xB5, 0x8E, 0x18, 0x6B, 0x0F, 0xFA, 0xA2,
+ 0xA0, 0x34, 0x7C, 0x8B, 0xD6, 0x03, 0x05, 0x52,
+ 0x9D, 0x93, 0xDE, 0x68, 0xB7, 0x77, 0xE1, 0x92,
+ 0xE1, 0x40, 0xE9, 0x8E, 0xF1, 0x44, 0x87, 0xF9,
+ 0x21, 0x9E, 0xF7, 0x70, 0xAB, 0x76, 0x52, 0xF6,
+ 0x96, 0x83, 0x04, 0x4C, 0x80, 0xEF, 0x86
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/test/common/test_packet_ipv6.h b/test/common/test_packet_ipv6.h
new file mode 100644
index 000000000..8703aab34
--- /dev/null
+++ b/test/common/test_packet_ipv6.h
@@ -0,0 +1,123 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEST_PACKET_IPV6_H_
+#define TEST_PACKET_IPV6_H_
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Test packets without CRC */
+
+/* ICMPv6 echo request */
+static const uint8_t test_packet_ipv6_icmp[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x86, 0xDD, 0x60, 0x30,
+ 0x00, 0x00, 0x00, 0x08, 0x3A, 0xFF, 0xFE, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x09, 0xFF, 0xFE, 0x00, 0x04, 0x00, 0x35, 0x55,
+ 0x55, 0x55, 0x66, 0x66, 0x66, 0x66, 0x77, 0x77,
+ 0x77, 0x77, 0x88, 0x88, 0x88, 0x88, 0x80, 0x00,
+ 0x1B, 0xC2, 0x00, 0x01, 0x00, 0x02
+};
+
+/* IPv6 TCP */
+static const uint8_t test_packet_ipv6_tcp[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x86, 0xDD, 0x60, 0x30,
+ 0x00, 0x00, 0x00, 0x14, 0x06, 0xFF, 0xFE, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x09, 0xFF, 0xFE, 0x00, 0x04, 0x00, 0x35, 0x55,
+ 0x55, 0x55, 0x66, 0x66, 0x66, 0x66, 0x77, 0x77,
+ 0x77, 0x77, 0x88, 0x88, 0x88, 0x88, 0x04, 0xD2,
+ 0x10, 0xE1, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x02, 0x50, 0x02, 0x00, 0x00, 0x36, 0x35,
+ 0x00, 0x00
+};
+
+/* IPv6 UDP */
+static const uint8_t test_packet_ipv6_udp[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x86, 0xDD, 0x60, 0x30,
+ 0x00, 0x00, 0x00, 0x08, 0x11, 0xFF, 0xFE, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x09, 0xFF, 0xFE, 0x00, 0x04, 0x00, 0x35, 0x55,
+ 0x55, 0x55, 0x66, 0x66, 0x66, 0x66, 0x77, 0x77,
+ 0x77, 0x77, 0x88, 0x88, 0x88, 0x88, 0x00, 0x3F,
+ 0x00, 0x3F, 0x00, 0x08, 0x9B, 0x68
+};
+
+/* VLAN IPv6
+ * - type 0x8100, tag 23
+ */
+static const uint8_t test_packet_vlan_ipv6_udp[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x81, 0x00, 0x00, 0x17,
+ 0x86, 0xDD, 0x60, 0x30, 0x00, 0x00, 0x00, 0x08,
+ 0x11, 0xFF, 0xFE, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x09, 0xFF, 0xFE, 0x00,
+ 0x04, 0x00, 0x35, 0x55, 0x55, 0x55, 0x66, 0x66,
+ 0x66, 0x66, 0x77, 0x77, 0x77, 0x77, 0x88, 0x88,
+ 0x88, 0x88, 0x00, 0x3F, 0x00, 0x3F, 0x00, 0x08,
+ 0x9B, 0x68
+};
+
+/* IPv6 SCTP
+ * - chunk type: payload data
+ */
+static const uint8_t test_packet_ipv6_sctp[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x86, 0xDD, 0x60, 0x30,
+ 0x00, 0x00, 0x00, 0x63, 0x84, 0xFF, 0xFE, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x09, 0xFF, 0xFE, 0x00, 0x04, 0x00, 0x35, 0x55,
+ 0x55, 0x55, 0x66, 0x66, 0x66, 0x66, 0x77, 0x77,
+ 0x77, 0x77, 0x88, 0x88, 0x88, 0x88, 0x04, 0xD2,
+ 0x16, 0x2E, 0xDE, 0xAD, 0xBE, 0xEF, 0x31, 0x44,
+ 0xE3, 0xFE, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00,
+ 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69,
+ 0x73, 0x20, 0x6D, 0x79, 0x20, 0x64, 0x75, 0x6D,
+ 0x6D, 0x79, 0x20, 0x70, 0x61, 0x79, 0x6C, 0x6F,
+ 0x61, 0x64, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6E,
+ 0x67, 0x2E, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6C,
+ 0x65, 0x6E, 0x67, 0x74, 0x68, 0x20, 0x6F, 0x66,
+ 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x73, 0x74,
+ 0x72, 0x69, 0x6E, 0x67, 0x20, 0x69, 0x73, 0x20,
+ 0x37, 0x31, 0x20, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x2E
+};
+
+/* Multi-cast Ethernet, IPv6 UDP */
+static const uint8_t test_packet_mcast_eth_ipv6_udp[] = {
+ 0x33, 0x33, 0x01, 0x02, 0x03, 0x04, 0x02, 0x00,
+ 0x00, 0x03, 0x04, 0x05, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x4F, 0x11, 0x40, 0xFE, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xFF, 0xFE, 0x03, 0x04, 0x05, 0xFF, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x02, 0x00, 0x03, 0x00, 0x04, 0x04, 0xD2,
+ 0x16, 0x2E, 0x00, 0x4F, 0xD6, 0x79, 0x54, 0x68,
+ 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x6D, 0x79,
+ 0x20, 0x64, 0x75, 0x6D, 0x6D, 0x79, 0x20, 0x70,
+ 0x61, 0x79, 0x6C, 0x6F, 0x61, 0x64, 0x20, 0x73,
+ 0x74, 0x72, 0x69, 0x6E, 0x67, 0x2E, 0x20, 0x54,
+ 0x68, 0x65, 0x20, 0x6C, 0x65, 0x6E, 0x67, 0x74,
+ 0x68, 0x20, 0x6F, 0x66, 0x20, 0x74, 0x68, 0x69,
+ 0x73, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6E, 0x67,
+ 0x20, 0x69, 0x73, 0x20, 0x37, 0x31, 0x20, 0x62,
+ 0x79, 0x74, 0x65, 0x73, 0x2E
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/test/common_plat/Makefile.am b/test/common_plat/Makefile.am
deleted file mode 100644
index af78bb653..000000000
--- a/test/common_plat/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-SUBDIRS =
-
-if cunit_support
-SUBDIRS += common
-endif
-
-SUBDIRS += performance miscellaneous validation
diff --git a/test/common_plat/common/Makefile.am b/test/common_plat/common/Makefile.am
deleted file mode 100644
index fd41fb428..000000000
--- a/test/common_plat/common/Makefile.am
+++ /dev/null
@@ -1,13 +0,0 @@
-AUTOMAKE_OPTIONS = foreign
-include $(top_srcdir)/test/Makefile.inc
-
-noinst_LTLIBRARIES = libcunit_common.la libcpumask_common.la libthrmask_common.la
-
-libcunit_common_la_SOURCES = odp_cunit_common.c
-
-libcpumask_common_la_SOURCES = mask_common.c
-
-libthrmask_common_la_SOURCES = mask_common.c
-libthrmask_common_la_CFLAGS = $(AM_CFLAGS) -DTEST_THRMASK
-
-EXTRA_DIST = mask_common.h odp_cunit_common.h
diff --git a/test/common_plat/common/odp_cunit_common.c b/test/common_plat/common/odp_cunit_common.c
deleted file mode 100644
index d3328af6c..000000000
--- a/test/common_plat/common/odp_cunit_common.c
+++ /dev/null
@@ -1,373 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <string.h>
-#include <odp_api.h>
-#include <odp_cunit_common.h>
-#include <odp/helper/odph_api.h>
-/* Globals */
-static odph_odpthread_t thread_tbl[MAX_WORKERS];
-static odp_instance_t instance;
-
-/*
- * global init/term functions which may be registered
- * defaults to functions performing odp init/term.
- */
-static int tests_global_init(odp_instance_t *inst);
-static int tests_global_term(odp_instance_t inst);
-static struct {
- int (*global_init_ptr)(odp_instance_t *inst);
- int (*global_term_ptr)(odp_instance_t inst);
-} global_init_term = {tests_global_init, tests_global_term};
-
-static odp_suiteinfo_t *global_testsuites;
-
-/** create test thread */
-int odp_cunit_thread_create(int func_ptr(void *), pthrd_arg *arg)
-{
- odp_cpumask_t cpumask;
- odph_odpthread_params_t thr_params;
-
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.start = func_ptr;
- thr_params.arg = arg;
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
-
- /* Create and init additional threads */
- odp_cpumask_default_worker(&cpumask, arg->numthrds);
-
- return odph_odpthreads_create(thread_tbl, &cpumask, &thr_params);
-}
-
-/** exit from test thread */
-int odp_cunit_thread_exit(pthrd_arg *arg)
-{
- /* Wait for other threads to exit */
- if (odph_odpthreads_join(thread_tbl) != arg->numthrds) {
- fprintf(stderr,
- "error: odph_odpthreads_join() failed.\n");
- return -1;
- }
-
- return 0;
-}
-
-static int tests_global_init(odp_instance_t *inst)
-{
- if (0 != odp_init_global(inst, NULL, NULL)) {
- fprintf(stderr, "error: odp_init_global() failed.\n");
- return -1;
- }
- if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
- fprintf(stderr, "error: odp_init_local() failed.\n");
- return -1;
- }
-
- return 0;
-}
-
-static int tests_global_term(odp_instance_t inst)
-{
- if (0 != odp_term_local()) {
- fprintf(stderr, "error: odp_term_local() failed.\n");
- return -1;
- }
-
- if (0 != odp_term_global(inst)) {
- fprintf(stderr, "error: odp_term_global() failed.\n");
- return -1;
- }
-
- return 0;
-}
-
-/*
- * register tests_global_init and tests_global_term functions.
- * If some of these functions are not registered, the defaults functions
- * (tests_global_init() and tests_global_term()) defined above are used.
- * One should use these register functions when defining these hooks.
- * Note that passing NULL as function pointer is valid and will simply
- * prevent the default (odp init/term) to be done.
- */
-void odp_cunit_register_global_init(int (*func_init_ptr)(odp_instance_t *inst))
-{
- global_init_term.global_init_ptr = func_init_ptr;
-}
-
-void odp_cunit_register_global_term(int (*func_term_ptr)(odp_instance_t inst))
-{
- global_init_term.global_term_ptr = func_term_ptr;
-}
-
-static odp_suiteinfo_t *cunit_get_suite_info(const char *suite_name)
-{
- odp_suiteinfo_t *sinfo;
-
- for (sinfo = global_testsuites; sinfo->pName; sinfo++)
- if (strcmp(sinfo->pName, suite_name) == 0)
- return sinfo;
-
- return NULL;
-}
-
-static odp_testinfo_t *cunit_get_test_info(odp_suiteinfo_t *sinfo,
- const char *test_name)
-{
- odp_testinfo_t *tinfo;
-
- for (tinfo = sinfo->pTests; tinfo->pName; tinfo++)
- if (strcmp(tinfo->pName, test_name) == 0)
- return tinfo;
-
- return NULL;
-}
-
-/* A wrapper for the suite's init function. This is done to allow for a
- * potential runtime check to determine whether each test in the suite
- * is active (enabled by using ODP_TEST_INFO_CONDITIONAL()). If present,
- * the conditional check is run after the suite's init function.
- */
-static int _cunit_suite_init(void)
-{
- int ret = 0;
- CU_pSuite cur_suite = CU_get_current_suite();
- odp_suiteinfo_t *sinfo;
- odp_testinfo_t *tinfo;
-
- /* find the suite currently being run */
- cur_suite = CU_get_current_suite();
- if (!cur_suite)
- return -1;
-
- sinfo = cunit_get_suite_info(cur_suite->pName);
- if (!sinfo)
- return -1;
-
- /* execute its init function */
- if (sinfo->pInitFunc) {
- ret = sinfo->pInitFunc();
- if (ret)
- return ret;
- }
-
- /* run any configured conditional checks and mark inactive tests */
- for (tinfo = sinfo->pTests; tinfo->pName; tinfo++) {
- CU_pTest ptest;
- CU_ErrorCode err;
-
- if (!tinfo->check_active || tinfo->check_active())
- continue;
-
- /* test is inactive, mark it as such */
- ptest = CU_get_test_by_name(tinfo->pName, cur_suite);
- if (ptest)
- err = CU_set_test_active(ptest, CU_FALSE);
- else
- err = CUE_NOTEST;
-
- if (err != CUE_SUCCESS) {
- fprintf(stderr, "%s: failed to set test %s inactive\n",
- __func__, tinfo->pName);
- return -1;
- }
- }
-
- return ret;
-}
-
-/*
- * Register suites and tests with CUnit.
- *
- * Similar to CU_register_suites() but using locally defined wrapper
- * types.
- */
-static int cunit_register_suites(odp_suiteinfo_t testsuites[])
-{
- odp_suiteinfo_t *sinfo;
- odp_testinfo_t *tinfo;
- CU_pSuite suite;
- CU_pTest test;
-
- for (sinfo = testsuites; sinfo->pName; sinfo++) {
- suite = CU_add_suite(sinfo->pName,
- _cunit_suite_init, sinfo->pCleanupFunc);
- if (!suite)
- return CU_get_error();
-
- for (tinfo = sinfo->pTests; tinfo->pName; tinfo++) {
- test = CU_add_test(suite, tinfo->pName,
- tinfo->pTestFunc);
- if (!test)
- return CU_get_error();
- }
- }
-
- return 0;
-}
-
-static int cunit_update_test(CU_pSuite suite,
- odp_suiteinfo_t *sinfo,
- odp_testinfo_t *updated_tinfo)
-{
- CU_pTest test = NULL;
- CU_ErrorCode err;
- odp_testinfo_t *tinfo;
- const char *test_name = updated_tinfo->pName;
-
- tinfo = cunit_get_test_info(sinfo, test_name);
- if (tinfo)
- test = CU_get_test(suite, test_name);
-
- if (!tinfo || !test) {
- fprintf(stderr, "%s: unable to find existing test named %s\n",
- __func__, test_name);
- return -1;
- }
-
- err = CU_set_test_func(test, updated_tinfo->pTestFunc);
- if (err != CUE_SUCCESS) {
- fprintf(stderr, "%s: failed to update test func for %s\n",
- __func__, test_name);
- return -1;
- }
-
- tinfo->check_active = updated_tinfo->check_active;
-
- return 0;
-}
-
-static int cunit_update_suite(odp_suiteinfo_t *updated_sinfo)
-{
- CU_pSuite suite = NULL;
- CU_ErrorCode err;
- odp_suiteinfo_t *sinfo;
- odp_testinfo_t *tinfo;
-
- /* find previously registered suite with matching name */
- sinfo = cunit_get_suite_info(updated_sinfo->pName);
-
- if (sinfo) {
- /* lookup the associated CUnit suite */
- suite = CU_get_suite_by_name(updated_sinfo->pName,
- CU_get_registry());
- }
-
- if (!sinfo || !suite) {
- fprintf(stderr, "%s: unable to find existing suite named %s\n",
- __func__, updated_sinfo->pName);
- return -1;
- }
-
- sinfo->pInitFunc = updated_sinfo->pInitFunc;
- sinfo->pCleanupFunc = updated_sinfo->pCleanupFunc;
-
- err = CU_set_suite_cleanupfunc(suite, updated_sinfo->pCleanupFunc);
- if (err != CUE_SUCCESS) {
- fprintf(stderr, "%s: failed to update cleanup func for %s\n",
- __func__, updated_sinfo->pName);
- return -1;
- }
-
- for (tinfo = updated_sinfo->pTests; tinfo->pName; tinfo++) {
- int ret;
-
- ret = cunit_update_test(suite, sinfo, tinfo);
- if (ret != 0)
- return ret;
- }
-
- return 0;
-}
-
-/*
- * Run tests previously registered via odp_cunit_register()
- */
-int odp_cunit_run(void)
-{
- int ret;
-
- printf("\tODP API version: %s\n", odp_version_api_str());
- printf("\tODP implementation name: %s\n", odp_version_impl_name());
- printf("\tODP implementation version: %s\n", odp_version_impl_str());
-
- CU_basic_set_mode(CU_BRM_VERBOSE);
- CU_basic_run_tests();
-
- ret = CU_get_number_of_failure_records();
-
- CU_cleanup_registry();
-
- /* call test executable terminason hook, if any */
- if (global_init_term.global_term_ptr &&
- ((*global_init_term.global_term_ptr)(instance) != 0))
- return -1;
-
- return (ret) ? -1 : 0;
-}
-
-/*
- * Update suites/tests previously registered via odp_cunit_register().
- *
- * Note that this is intended for modifying the properties of already
- * registered suites/tests. New suites/tests can only be registered via
- * odp_cunit_register().
- */
-int odp_cunit_update(odp_suiteinfo_t testsuites[])
-{
- int ret = 0;
- odp_suiteinfo_t *sinfo;
-
- for (sinfo = testsuites; sinfo->pName && ret == 0; sinfo++)
- ret = cunit_update_suite(sinfo);
-
- return ret;
-}
-
-/*
- * Register test suites to be run via odp_cunit_run()
- */
-int odp_cunit_register(odp_suiteinfo_t testsuites[])
-{
- /* call test executable init hook, if any */
- if (global_init_term.global_init_ptr) {
- if ((*global_init_term.global_init_ptr)(&instance) == 0) {
- /* After ODP initialization, set main thread's
- * CPU affinity to the 1st available control CPU core
- */
- int cpu = 0;
- odp_cpumask_t cpuset;
-
- odp_cpumask_zero(&cpuset);
- if (odp_cpumask_default_control(&cpuset, 1) == 1) {
- cpu = odp_cpumask_first(&cpuset);
- odph_odpthread_setaffinity(cpu);
- }
- } else {
- /* ODP initialization failed */
- return -1;
- }
- }
-
- CU_set_error_action(CUEA_ABORT);
-
- CU_initialize_registry();
- global_testsuites = testsuites;
- cunit_register_suites(testsuites);
- CU_set_fail_on_inactive(CU_FALSE);
-
- return 0;
-}
-
-/*
- * Parse command line options to extract options affectiong cunit_common.
- * (hence also helpers options as cunit_common uses the helpers)
- * Options private to the test calling cunit_common are not parsed here.
- */
-int odp_cunit_parse_options(int argc, char *argv[])
-{
- return odph_parse_options(argc, argv, NULL, NULL);
-}
diff --git a/test/common_plat/common/odp_cunit_common.h b/test/common_plat/common/odp_cunit_common.h
deleted file mode 100644
index 486a5ec51..000000000
--- a/test/common_plat/common/odp_cunit_common.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP test application common headers
- */
-
-#ifndef ODP_CUNICT_COMMON_H
-#define ODP_CUNICT_COMMON_H
-
-#include <stdint.h>
-#include <inttypes.h>
-#include "CUnit/Basic.h"
-#include "CUnit/TestDB.h"
-#include <odp_api.h>
-
-#define MAX_WORKERS 32 /**< Maximum number of work threads */
-
-typedef int (*cunit_test_check_active)(void);
-
-typedef struct {
- const char *pName;
- CU_TestFunc pTestFunc;
- cunit_test_check_active check_active;
-} odp_testinfo_t;
-
-typedef struct {
- const char *pName;
- CU_InitializeFunc pInitFunc;
- CU_CleanupFunc pCleanupFunc;
- odp_testinfo_t *pTests;
-} odp_suiteinfo_t;
-
-static inline int odp_cunit_test_inactive(void) { return 0; }
-static inline void odp_cunit_test_missing(void) { }
-
-/* An active test case, with the test name matching the test function name */
-#define ODP_TEST_INFO(test_func) \
- {#test_func, test_func, NULL}
-
-/* A test case that is unconditionally inactive. Its name will be registered
- * with CUnit but it won't be executed and will be reported as inactive in
- * the result summary. */
-#define ODP_TEST_INFO_INACTIVE(test_func, args...) \
- {#test_func, odp_cunit_test_missing, odp_cunit_test_inactive}
-
-#define ODP_TEST_INACTIVE 0
-#define ODP_TEST_ACTIVE 1
-
-/* A test case that may be marked as inactive at runtime based on the
- * return value of the cond_func function. A return value of ODP_TEST_INACTIVE
- * means inactive, ODP_TEST_ACTIVE means active. */
-#define ODP_TEST_INFO_CONDITIONAL(test_func, cond_func) \
- {#test_func, test_func, cond_func}
-
-#define ODP_TEST_INFO_NULL {NULL, NULL, NULL}
-#define ODP_SUITE_INFO_NULL {NULL, NULL, NULL, NULL}
-
-typedef struct {
- uint32_t foo;
- uint32_t bar;
-} test_shared_data_t;
-
-/**
- * Thread argument
- */
-typedef struct {
- int testcase; /**< specifies which set of API's to exercise */
- int numthrds; /**< no of pthreads to create */
-} pthrd_arg;
-
-/* parse parameters that affect the behaviour of odp_cunit_common */
-int odp_cunit_parse_options(int argc, char *argv[]);
-/* register suites to be run via odp_cunit_run() */
-int odp_cunit_register(odp_suiteinfo_t testsuites[]);
-/* update tests previously registered via odp_cunit_register() */
-int odp_cunit_update(odp_suiteinfo_t testsuites[]);
-/* the function, called by module main(), to run the testsuites: */
-int odp_cunit_run(void);
-
-/** create thread for start_routine function (which returns 0 on success) */
-int odp_cunit_thread_create(int func_ptr(void *), pthrd_arg *arg);
-int odp_cunit_thread_exit(pthrd_arg *);
-
-/**
- * Global tests initialization/termination.
- *
- * Initialize global resources needed by the test executable. Default
- * definition does ODP init / term (both global and local).
- * Test executables can override it by calling one of the register function
- * below.
- * The functions are called at the very beginning and very end of the test
- * execution. Passing NULL to odp_cunit_register_global_init() and/or
- * odp_cunit_register_global_term() is legal and will simply prevent the
- * default (ODP init/term) to be done.
- */
-void odp_cunit_register_global_init(int (*func_init_ptr)(odp_instance_t *inst));
-
-void odp_cunit_register_global_term(int (*func_term_ptr)(odp_instance_t inst));
-
-#endif /* ODP_CUNICT_COMMON_H */
diff --git a/test/common_plat/m4/configure.m4 b/test/common_plat/m4/configure.m4
deleted file mode 100644
index be878bd7d..000000000
--- a/test/common_plat/m4/configure.m4
+++ /dev/null
@@ -1,33 +0,0 @@
-m4_include([test/common_plat/m4/miscellaneous.m4])
-m4_include([test/common_plat/m4/performance.m4])
-m4_include([test/common_plat/m4/validation.m4])
-
-AC_CONFIG_FILES([test/common_plat/Makefile
- test/common_plat/common/Makefile
- test/common_plat/miscellaneous/Makefile
- test/common_plat/performance/Makefile
- test/common_plat/validation/Makefile
- test/common_plat/validation/api/atomic/Makefile
- test/common_plat/validation/api/barrier/Makefile
- test/common_plat/validation/api/buffer/Makefile
- test/common_plat/validation/api/classification/Makefile
- test/common_plat/validation/api/cpumask/Makefile
- test/common_plat/validation/api/crypto/Makefile
- test/common_plat/validation/api/errno/Makefile
- test/common_plat/validation/api/hash/Makefile
- test/common_plat/validation/api/init/Makefile
- test/common_plat/validation/api/lock/Makefile
- test/common_plat/validation/api/Makefile
- test/common_plat/validation/api/packet/Makefile
- test/common_plat/validation/api/pktio/Makefile
- test/common_plat/validation/api/pool/Makefile
- test/common_plat/validation/api/queue/Makefile
- test/common_plat/validation/api/random/Makefile
- test/common_plat/validation/api/scheduler/Makefile
- test/common_plat/validation/api/shmem/Makefile
- test/common_plat/validation/api/std_clib/Makefile
- test/common_plat/validation/api/system/Makefile
- test/common_plat/validation/api/thread/Makefile
- test/common_plat/validation/api/time/Makefile
- test/common_plat/validation/api/timer/Makefile
- test/common_plat/validation/api/traffic_mngr/Makefile])
diff --git a/test/common_plat/m4/miscellaneous.m4 b/test/common_plat/m4/miscellaneous.m4
deleted file mode 100644
index cc881edb7..000000000
--- a/test/common_plat/m4/miscellaneous.m4
+++ /dev/null
@@ -1,9 +0,0 @@
-##########################################################################
-# Enable/disable test-cpp
-##########################################################################
-test_cpp=no
-AC_ARG_ENABLE([test-cpp],
- [ --enable-test-cpp run basic test aginast cpp],
- [if test "x$enableval" = "xyes"; then
- test_cpp=yes
- fi])
diff --git a/test/common_plat/m4/performance.m4 b/test/common_plat/m4/performance.m4
deleted file mode 100644
index 1e2000d97..000000000
--- a/test/common_plat/m4/performance.m4
+++ /dev/null
@@ -1,9 +0,0 @@
-##########################################################################
-# Enable/disable test-perf
-##########################################################################
-test_perf=no
-AC_ARG_ENABLE([test-perf],
- [ --enable-test-perf run test in test/performance],
- [if test "x$enableval" = "xyes"; then
- test_perf=yes
- fi])
diff --git a/test/common_plat/m4/validation.m4 b/test/common_plat/m4/validation.m4
deleted file mode 100644
index d32f675ae..000000000
--- a/test/common_plat/m4/validation.m4
+++ /dev/null
@@ -1,58 +0,0 @@
-##########################################################################
-# Enable/disable Unit tests
-##########################################################################
-cunit_support=no
-test_vald=no
-AC_ARG_ENABLE([test_vald],
- [ --enable-test-vald run test in test/validation],
- [if test x$enableval = xyes; then
- test_vald=yes
- cunit_support=yes
- fi])
-
-##########################################################################
-# Enable/disable Unit tests
-##########################################################################
-AC_ARG_ENABLE([cunit_support],
- [ --enable-cunit-support include cunit infrastructure],
- [if test x$enableval = xyes; then
- cunit_support=yes
- fi])
-
-##########################################################################
-# Set optional CUnit path
-##########################################################################
-AC_ARG_WITH([cunit-path],
-AC_HELP_STRING([--with-cunit-path=DIR path to CUnit libs and headers],
- [(or in the default path if not specified).]),
- [CUNIT_PATH=$withval
- AM_CPPFLAGS="$AM_CPPFLAGS -I$CUNIT_PATH/include"
- AM_LDFLAGS="$AM_LDFLAGS -L$CUNIT_PATH/lib"
- cunit_support=yes],[])
-
-##########################################################################
-# Save and set temporary compilation flags
-##########################################################################
-OLD_LDFLAGS=$LDFLAGS
-OLD_CPPFLAGS=$CPPFLAGS
-LDFLAGS="$AM_LDFLAGS $LDFLAGS"
-CPPFLAGS="$AM_CPPFLAGS $CPPFLAGS"
-
-##########################################################################
-# Check for CUnit availability
-##########################################################################
-if test x$cunit_support = xyes
-then
- AC_CHECK_LIB([cunit],[CU_get_error], [],
- [AC_MSG_ERROR([CUnit libraries required])])
- AC_CHECK_HEADERS([CUnit/Basic.h], [],
- [AC_MSG_FAILURE(["can't find cunit headers"])])
-else
- cunit_support=no
-fi
-
-##########################################################################
-# Restore old saved variables
-##########################################################################
-LDFLAGS=$OLD_LDFLAGS
-CPPFLAGS=$OLD_CPPFLAGS
diff --git a/test/common_plat/miscellaneous/Makefile.am b/test/common_plat/miscellaneous/Makefile.am
deleted file mode 100644
index 7d8cf3531..000000000
--- a/test/common_plat/miscellaneous/Makefile.am
+++ /dev/null
@@ -1,12 +0,0 @@
-include $(top_srcdir)/test/Makefile.inc
-
-if test_cpp
-bin_PROGRAMS = odp_api_from_cpp$(EXEEXT)
-TESTS = odp_api_from_cpp$(EXEEXT)
-endif
-
-odp_api_from_cpp_CXXFLAGS = $(AM_CXXFLAGS)
-
-odp_api_from_cpp_LDFLAGS = $(AM_LDFLAGS) -static
-
-dist_odp_api_from_cpp_SOURCES = odp_api_from_cpp.cpp
diff --git a/test/common_plat/miscellaneous/odp_api_from_cpp.cpp b/test/common_plat/miscellaneous/odp_api_from_cpp.cpp
deleted file mode 100644
index 2b3078642..000000000
--- a/test/common_plat/miscellaneous/odp_api_from_cpp.cpp
+++ /dev/null
@@ -1,12 +0,0 @@
-#include <cstdio>
-#include <odp_api.h>
-#include <odp/helper/threads.h>
-
-int main(int argc ODP_UNUSED, const char *argv[] ODP_UNUSED)
-{
-
- printf("\tODP API version: %s\n", odp_version_api_str());
- printf("\tODP implementation version: %s\n", odp_version_impl_str());
-
- return 0;
-}
diff --git a/test/common_plat/performance/.gitignore b/test/common_plat/performance/.gitignore
deleted file mode 100644
index 72035e002..000000000
--- a/test/common_plat/performance/.gitignore
+++ /dev/null
@@ -1,10 +0,0 @@
-*.log
-*.trs
-odp_atomic
-odp_bench_packet
-odp_crypto
-odp_l2fwd
-odp_pktio_ordered
-odp_pktio_perf
-odp_sched_latency
-odp_scheduling
diff --git a/test/common_plat/performance/Makefile.am b/test/common_plat/performance/Makefile.am
deleted file mode 100644
index 9111c0c2d..000000000
--- a/test/common_plat/performance/Makefile.am
+++ /dev/null
@@ -1,53 +0,0 @@
-include $(top_srcdir)/test/Makefile.inc
-
-TESTS_ENVIRONMENT += TEST_DIR=${builddir}
-
-EXECUTABLES = odp_bench_packet$(EXEEXT) \
- odp_crypto$(EXEEXT) \
- odp_pktio_perf$(EXEEXT)
-
-COMPILE_ONLY = odp_l2fwd$(EXEEXT) \
- odp_pktio_ordered$(EXEEXT) \
- odp_sched_latency$(EXEEXT) \
- odp_scheduling$(EXEEXT)
-
-TESTSCRIPTS = odp_l2fwd_run.sh \
- odp_pktio_ordered_run.sh \
- odp_sched_latency_run.sh \
- odp_scheduling_run.sh
-
-TEST_EXTENSIONS = .sh
-
-if test_perf
-TESTS = $(EXECUTABLES) $(TESTSCRIPTS)
-endif
-
-bin_PROGRAMS = $(EXECUTABLES) $(COMPILE_ONLY)
-
-odp_bench_packet_LDFLAGS = $(AM_LDFLAGS) -static
-odp_bench_packet_CFLAGS = $(AM_CFLAGS) -I${top_srcdir}/test
-odp_crypto_LDFLAGS = $(AM_LDFLAGS) -static
-odp_crypto_CFLAGS = $(AM_CFLAGS) -I${top_srcdir}/test
-odp_pktio_perf_LDFLAGS = $(AM_LDFLAGS) -static
-odp_pktio_perf_CFLAGS = $(AM_CFLAGS) -I${top_srcdir}/test
-odp_l2fwd_LDFLAGS = $(AM_LDFLAGS) -static
-odp_l2fwd_CFLAGS = $(AM_CFLAGS) -I${top_srcdir}/test
-odp_pktio_ordered_LDFLAGS = $(AM_LDFLAGS) -static
-odp_pktio_ordered_CFLAGS = $(AM_CFLAGS) -I${top_srcdir}/test
-odp_sched_latency_LDFLAGS = $(AM_LDFLAGS) -static
-odp_sched_latency_CFLAGS = $(AM_CFLAGS) -I${top_srcdir}/test
-odp_scheduling_LDFLAGS = $(AM_LDFLAGS) -static
-odp_scheduling_CFLAGS = $(AM_CFLAGS) -I${top_srcdir}/test
-
-noinst_HEADERS = \
- $(top_srcdir)/test/test_debug.h \
- dummy_crc.h
-
-dist_odp_bench_packet_SOURCES = odp_bench_packet.c
-dist_odp_crypto_SOURCES = odp_crypto.c
-dist_odp_pktio_ordered_SOURCES = odp_pktio_ordered.c
-dist_odp_sched_latency_SOURCES = odp_sched_latency.c
-dist_odp_scheduling_SOURCES = odp_scheduling.c
-dist_odp_pktio_perf_SOURCES = odp_pktio_perf.c
-
-EXTRA_DIST = $(TESTSCRIPTS)
diff --git a/test/common_plat/performance/odp_crypto.c b/test/common_plat/performance/odp_crypto.c
deleted file mode 100644
index 954bdb794..000000000
--- a/test/common_plat/performance/odp_crypto.c
+++ /dev/null
@@ -1,984 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#endif /* _GNU_SOURCE */
-
-#include <stdlib.h>
-#include <string.h>
-#include <getopt.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-
-#include <odp_api.h>
-#include <odp/helper/odph_api.h>
-
-#define app_err(fmt, ...) \
- fprintf(stderr, "%s:%d:%s(): Error: " fmt, __FILE__, \
- __LINE__, __func__, ##__VA_ARGS__)
-
-/** @def POOL_NUM_PKT
- * Number of packets in the pool
- */
-#define POOL_NUM_PKT 64
-
-static uint8_t test_iv[8] = "01234567";
-
-static uint8_t test_key16[16] = { 0x01, 0x02, 0x03, 0x04, 0x05,
- 0x06, 0x07, 0x08, 0x09, 0x0a,
- 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10,
-};
-
-static uint8_t test_key24[24] = { 0x01, 0x02, 0x03, 0x04, 0x05,
- 0x06, 0x07, 0x08, 0x09, 0x0a,
- 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10, 0x11, 0x12, 0x13, 0x14,
- 0x15, 0x16, 0x17, 0x18
-};
-
-/**
- * Structure that holds template for session create call
- * for different algorithms supported by test
- */
-typedef struct {
- const char *name; /**< Algorithm name */
- odp_crypto_session_param_t session; /**< Prefilled crypto session params */
- unsigned int hash_adjust; /**< Size of hash */
-} crypto_alg_config_t;
-
-/**
- * Parsed command line crypto arguments. Describes test configuration.
- */
-typedef struct {
- /**
- * If non zero prints content of packets. Enabled by -d or
- * --debug option.
- */
- int debug_packets;
-
- /**
- * If non zero Try to run crypto operation in place. Note some
- * implementation may not support such mode. Enabled by -n or
- * --inplace option.
- */
- int in_place;
-
- /**
- * If non zeor output of previous operation taken as input for
- * next encrypt operations. Enabled by -r or --reuse option.
- */
- int reuse_packet;
-
- /**
- * Maximum number of outstanding encryption requests. Note code
- * poll for results over queue and if nothing is available it can
- * submit more encryption requests up to maximum number specified by
- * this option. Specified through -f or --flight option.
- */
- int in_flight;
-
- /**
- * Number of iteration to repeat crypto operation to get good
- * average number. Specified through -i or --terations option.
- * Default is 10000.
- */
- int iteration_count;
-
- /**
- * Maximum sessions. Currently is not used.
- */
- int max_sessions;
-
- /**
- * Payload size to test. If 0 set of predefined payload sizes
- * is tested. Specified through -p or --payload option.
- */
- int payload_length;
-
- /**
- * Pointer to selected algorithm to test. If NULL all available
- * alogorthims are tested. Name of algorithm is passed through
- * -a or --algorithm option.
- */
- crypto_alg_config_t *alg_config;
-
- /**
- * Use scheduler to get completion events from crypto operation.
- * Specified through -s argument.
- * */
- int schedule;
-
- /*
- * Poll completion queue for crypto completion events.
- * Specified through -p argument.
- */
- int poll;
-} crypto_args_t;
-
-/*
- * Helper structure that holds averages for test of one algorithm
- * for given payload size.
- */
-typedef struct {
- /**
- * Elapsed time for one crypto operation.
- */
- double elapsed;
-
- /**
- * CPU time spent pre one crypto operation by whole process
- * i.e include current and all other threads in process.
- * It is filled with 'getrusage(RUSAGE_SELF, ...)' call.
- */
- double rusage_self;
-
- /**
- * CPU time spent per one crypto operation by current thread
- * only. It is filled with 'getrusage(RUSAGE_THREAD, ...)'
- * call.
- */
- double rusage_thread;
-} crypto_run_result_t;
-
-/**
- * Structure holds one snap to misc times of current process.
- */
-typedef struct {
- struct timeval tv; /**< Elapsed time */
- struct rusage ru_self; /**< Rusage value for whole process */
- struct rusage ru_thread; /**< Rusage value for current thread */
-} time_record_t;
-
-static void parse_args(int argc, char *argv[], crypto_args_t *cargs);
-static void usage(char *progname);
-
-/**
- * Set of predefined payloads.
- */
-static unsigned int payloads[] = {
- 16,
- 64,
- 256,
- 1024,
- 8192,
- 16384
-};
-
-/** Number of payloads used in the test */
-static unsigned num_payloads;
-
-/**
- * Set of known algorithms to test
- */
-static crypto_alg_config_t algs_config[] = {
- {
- .name = "3des-cbc-null",
- .session = {
- .cipher_alg = ODP_CIPHER_ALG_3DES_CBC,
- .cipher_key = {
- .data = test_key24,
- .length = sizeof(test_key24)
- },
- .iv = {
- .data = test_iv,
- .length = 8,
- },
- .auth_alg = ODP_AUTH_ALG_NULL
- },
- },
- {
- .name = "3des-cbc-hmac-md5-96",
- .session = {
- .cipher_alg = ODP_CIPHER_ALG_3DES_CBC,
- .cipher_key = {
- .data = test_key24,
- .length = sizeof(test_key24)
- },
- .iv = {
- .data = test_iv,
- .length = 8,
- },
- .auth_alg = ODP_AUTH_ALG_MD5_96,
- .auth_key = {
- .data = test_key16,
- .length = sizeof(test_key16)
- }
- },
- .hash_adjust = 12
- },
- {
- .name = "null-hmac-md5-96",
- .session = {
- .cipher_alg = ODP_CIPHER_ALG_NULL,
- .auth_alg = ODP_AUTH_ALG_MD5_96,
- .auth_key = {
- .data = test_key16,
- .length = sizeof(test_key16)
- }
- },
- .hash_adjust = 12
- },
-};
-
-/**
- * Find corresponding config for given name. Returns NULL
- * if config for given name is not found.
- */
-static crypto_alg_config_t *
-find_config_by_name(const char *name) {
- unsigned int i;
- crypto_alg_config_t *ret = NULL;
-
- for (i = 0; i < (sizeof(algs_config) / sizeof(crypto_alg_config_t));
- i++) {
- if (strcmp(algs_config[i].name, name) == 0) {
- ret = algs_config + i;
- break;
- }
- }
- return ret;
-}
-
-/**
- * Helper function that prints list of algorithms that this
- * test understands.
- */
-static void
-print_config_names(const char *prefix) {
- unsigned int i;
-
- for (i = 0; i < (sizeof(algs_config) / sizeof(crypto_alg_config_t));
- i++) {
- printf("%s %s\n", prefix, algs_config[i].name);
- }
-}
-
-/**
- * Snap current time values and put them into 'rec'.
- */
-static void
-fill_time_record(time_record_t *rec)
-{
- gettimeofday(&rec->tv, NULL);
- getrusage(RUSAGE_SELF, &rec->ru_self);
- getrusage(RUSAGE_THREAD, &rec->ru_thread);
-}
-
-/**
- * Calculated CPU time difference for given two rusage structures.
- * Note it adds user space and system time together.
- */
-static unsigned long long
-get_rusage_diff(struct rusage *start, struct rusage *end)
-{
- unsigned long long rusage_diff;
- unsigned long long rusage_start;
- unsigned long long rusage_end;
-
- rusage_start = (start->ru_utime.tv_sec * 1000000) +
- (start->ru_utime.tv_usec);
- rusage_start += (start->ru_stime.tv_sec * 1000000) +
- (start->ru_stime.tv_usec);
-
- rusage_end = (end->ru_utime.tv_sec * 1000000) +
- (end->ru_utime.tv_usec);
- rusage_end += (end->ru_stime.tv_sec * 1000000) +
- (end->ru_stime.tv_usec);
-
- rusage_diff = rusage_end - rusage_start;
-
- return rusage_diff;
-}
-
-/**
- * Get diff for RUSAGE_SELF (whole process) between two time snap
- * records.
- */
-static unsigned long long
-get_rusage_self_diff(time_record_t *start, time_record_t *end)
-{
- return get_rusage_diff(&start->ru_self, &end->ru_self);
-}
-
-/**
- * Get diff for RUSAGE_THREAD (current thread only) between two
- * time snap records.
- */
-static unsigned long long
-get_rusage_thread_diff(time_record_t *start, time_record_t *end)
-{
- return get_rusage_diff(&start->ru_thread, &end->ru_thread);
-}
-
-/**
- * Get diff of elapsed time between two time snap records
- */
-static unsigned long long
-get_elapsed_usec(time_record_t *start, time_record_t *end)
-{
- unsigned long long s;
- unsigned long long e;
-
- s = (start->tv.tv_sec * 1000000) +
- (start->tv.tv_usec);
- e = (end->tv.tv_sec * 1000000) +
- (end->tv.tv_usec);
-
- return e - s;
-}
-
-#define REPORT_HEADER "\n%30.30s %15s %15s %15s %15s %15s %15s\n"
-#define REPORT_LINE "%30.30s %15d %15d %15.3f %15.3f %15.3f %15d\n"
-
-/**
- * Print header line for our report.
- */
-static void
-print_result_header(void)
-{
- printf(REPORT_HEADER,
- "algorithm", "avg over #", "payload (bytes)", "elapsed (us)",
- "rusg self (us)", "rusg thrd (us)", "throughput (Kb)");
-}
-
-/**
- * Print one line of our report.
- */
-static void
-print_result(crypto_args_t *cargs,
- unsigned int payload_length,
- crypto_alg_config_t *config,
- crypto_run_result_t *result)
-{
- unsigned int throughput;
-
- throughput = (1000000.0 / result->elapsed) * payload_length / 1024;
- printf(REPORT_LINE,
- config->name, cargs->iteration_count, payload_length,
- result->elapsed, result->rusage_self, result->rusage_thread,
- throughput);
-}
-
-/**
- * Print piece of memory with given size.
- */
-static void
-print_mem(const char *msg,
- const unsigned char *ptr,
- unsigned int len)
-{
- unsigned i, j;
- char c;
- char line[81];
- char *p;
-
- if (msg)
- printf("\n%s (bytes size = %d)", msg, len);
-
- for (i = 0; i < len; i += 16) {
- p = line;
- sprintf(p, "\n%04x ", i); p += 8;
-
- for (j = 0; j < 16; j++) {
- if (i + j == len)
- break;
-
- sprintf(p, " %02x", (ptr)[i + j]); p += 3;
- }
-
- for (; j < 16; j++) {
- sprintf(p, " "); p += 3;
- }
-
- sprintf(p, " "); p += 3;
-
- for (j = 0; j < 16; j++) {
- if (i + j == len)
- break;
- c = (ptr)[i + j];
- *p++ = (' ' <= c && c <= '~') ? c : '.';
- }
-
- *p = '\0';
- printf("%s", line);
- }
- printf("\n");
-}
-
-/**
- * Create ODP crypto session for given config.
- */
-static int
-create_session_from_config(odp_crypto_session_t *session,
- crypto_alg_config_t *config,
- crypto_args_t *cargs)
-{
- odp_crypto_session_param_t params;
- odp_crypto_ses_create_err_t ses_create_rc;
- odp_pool_t pkt_pool;
- odp_queue_t out_queue;
-
- odp_crypto_session_param_init(&params);
- memcpy(&params, &config->session, sizeof(odp_crypto_session_param_t));
- params.op = ODP_CRYPTO_OP_ENCODE;
- params.pref_mode = ODP_CRYPTO_SYNC;
-
- /* Lookup the packet pool */
- pkt_pool = odp_pool_lookup("packet_pool");
- if (pkt_pool == ODP_POOL_INVALID) {
- app_err("packet_pool pool not found\n");
- return -1;
- }
- params.output_pool = pkt_pool;
-
- if (cargs->schedule || cargs->poll) {
- out_queue = odp_queue_lookup("crypto-out");
- if (out_queue == ODP_QUEUE_INVALID) {
- app_err("crypto-out queue not found\n");
- return -1;
- }
- params.compl_queue = out_queue;
-
- } else {
- params.compl_queue = ODP_QUEUE_INVALID;
- }
- if (odp_crypto_session_create(&params, session,
- &ses_create_rc)) {
- app_err("crypto session create failed.\n");
- return -1;
- }
-
- return 0;
-}
-
-/**
- * Run measurement iterations for given config and payload size.
- * Result of run returned in 'result' out parameter.
- */
-static int
-run_measure_one(crypto_args_t *cargs,
- crypto_alg_config_t *config,
- odp_crypto_session_t *session,
- unsigned int payload_length,
- crypto_run_result_t *result)
-{
- odp_crypto_op_param_t params;
-
- odp_pool_t pkt_pool;
- odp_queue_t out_queue;
- odp_packet_t pkt;
- int rc = 0;
-
- odp_bool_t posted = 0;
-
- pkt_pool = odp_pool_lookup("packet_pool");
- if (pkt_pool == ODP_POOL_INVALID) {
- app_err("pkt_pool not found\n");
- return -1;
- }
-
- out_queue = odp_queue_lookup("crypto-out");
- if (cargs->schedule || cargs->poll) {
- if (out_queue == ODP_QUEUE_INVALID) {
- app_err("crypto-out queue not found\n");
- return -1;
- }
- }
-
- pkt = odp_packet_alloc(pkt_pool, payload_length);
- if (pkt == ODP_PACKET_INVALID) {
- app_err("failed to allocate buffer\n");
- return -1;
- }
-
- void *mem = odp_packet_data(pkt);
-
- memset(mem, 1, payload_length);
-
- time_record_t start, end;
- int packets_sent = 0;
- int packets_received = 0;
-
- /* Initialize parameters block */
- memset(&params, 0, sizeof(params));
- params.session = *session;
-
- params.cipher_range.offset = 0;
- params.cipher_range.length = payload_length;
-
- params.auth_range.offset = 0;
- params.auth_range.length = payload_length;
- params.hash_result_offset = payload_length;
-
- if (cargs->reuse_packet) {
- params.pkt = pkt;
- params.out_pkt = cargs->in_place ? pkt :
- ODP_PACKET_INVALID;
- }
-
- fill_time_record(&start);
-
- while ((packets_sent < cargs->iteration_count) ||
- (packets_received < cargs->iteration_count)) {
- void *mem;
- odp_crypto_op_result_t result;
-
- if ((packets_sent < cargs->iteration_count) &&
- (packets_sent - packets_received <
- cargs->in_flight)) {
- if (!cargs->reuse_packet) {
- /*
- * For in place test we use just one
- * statically allocated buffer.
- * For now in place test we have to
- * allocate and initialize packet
- * every time.
- * Note we leaked one packet here.
- */
- odp_packet_t newpkt;
-
- newpkt = odp_packet_alloc(pkt_pool,
- payload_length);
- if (newpkt == ODP_PACKET_INVALID) {
- app_err("failed to allocate buffer\n");
- return -1;
- }
- mem = odp_packet_data(newpkt);
- memset(mem, 1, payload_length);
- params.pkt = newpkt;
- params.out_pkt = cargs->in_place ? newpkt :
- ODP_PACKET_INVALID;
- }
-
- if (cargs->debug_packets) {
- mem = odp_packet_data(params.pkt);
- print_mem("Packet before encryption:",
- mem, payload_length);
- }
-
- rc = odp_crypto_operation(&params, &posted,
- &result);
- if (rc)
- app_err("failed odp_crypto_operation: rc = %d\n",
- rc);
- else
- packets_sent++;
- }
-
- if (!posted) {
- packets_received++;
- if (cargs->debug_packets) {
- mem = odp_packet_data(params.out_pkt);
- print_mem("Immediately encrypted packet", mem,
- payload_length +
- config->hash_adjust);
- }
- if (!cargs->in_place) {
- if (cargs->reuse_packet) {
- params.pkt = params.out_pkt;
- params.out_pkt = ODP_PACKET_INVALID;
- } else {
- odp_packet_free(params.out_pkt);
- }
- }
- } else {
- odp_event_t ev;
- odp_crypto_compl_t compl;
- odp_crypto_op_result_t result;
- odp_packet_t out_pkt;
-
- if (cargs->schedule)
- ev = odp_schedule(NULL,
- ODP_SCHED_NO_WAIT);
- else
- ev = odp_queue_deq(out_queue);
-
- while (ev != ODP_EVENT_INVALID) {
- compl = odp_crypto_compl_from_event(ev);
- odp_crypto_compl_result(compl, &result);
- odp_crypto_compl_free(compl);
- out_pkt = result.pkt;
-
- if (cargs->debug_packets) {
- mem = odp_packet_data(out_pkt);
- print_mem("Receieved encrypted packet",
- mem,
- payload_length +
- config->hash_adjust);
- }
- if (cargs->reuse_packet) {
- params.pkt = out_pkt;
- params.out_pkt = ODP_PACKET_INVALID;
- } else {
- odp_packet_free(out_pkt);
- }
- packets_received++;
- if (cargs->schedule)
- ev = odp_schedule(NULL,
- ODP_SCHED_NO_WAIT);
- else
- ev = odp_queue_deq(out_queue);
- };
- }
- }
-
- fill_time_record(&end);
-
- {
- double count;
-
- count = get_elapsed_usec(&start, &end);
- result->elapsed = count /
- cargs->iteration_count;
-
- count = get_rusage_self_diff(&start, &end);
- result->rusage_self = count /
- cargs->iteration_count;
-
- count = get_rusage_thread_diff(&start, &end);
- result->rusage_thread = count /
- cargs->iteration_count;
- }
-
- odp_packet_free(pkt);
-
- return rc;
-}
-
-/**
- * Process one algorithm. Note if paload size is specicified it is
- * only one run. Or iterate over set of predefined payloads.
- */
-static int
-run_measure_one_config(crypto_args_t *cargs,
- crypto_alg_config_t *config)
-{
- crypto_run_result_t result;
- odp_crypto_session_t session;
- int rc = 0;
-
- if (create_session_from_config(&session, config, cargs))
- rc = -1;
-
- if (!rc) {
- if (cargs->payload_length) {
- rc = run_measure_one(cargs, config, &session,
- cargs->payload_length, &result);
- if (!rc) {
- print_result_header();
- print_result(cargs, cargs->payload_length,
- config, &result);
- }
- } else {
- unsigned i;
-
- print_result_header();
- for (i = 0; i < num_payloads; i++) {
- rc = run_measure_one(cargs, config, &session,
- payloads[i], &result);
- if (rc)
- break;
- print_result(cargs, payloads[i],
- config, &result);
- }
- }
- }
-
- if (session != ODP_CRYPTO_SESSION_INVALID)
- odp_crypto_session_destroy(session);
- return rc;
-}
-
-typedef struct thr_arg {
- crypto_args_t crypto_args;
- crypto_alg_config_t *crypto_alg_config;
-} thr_arg_t;
-
-static int run_thr_func(void *arg)
-{
- thr_arg_t *thr_args = (thr_arg_t *)arg;
-
- run_measure_one_config(&thr_args->crypto_args,
- thr_args->crypto_alg_config);
- return 0;
-}
-
-int main(int argc, char *argv[])
-{
- crypto_args_t cargs;
- odp_pool_t pool;
- odp_queue_param_t qparam;
- odp_pool_param_t params;
- odp_queue_t out_queue = ODP_QUEUE_INVALID;
- thr_arg_t thr_arg;
- odp_cpumask_t cpumask;
- char cpumaskstr[ODP_CPUMASK_STR_SIZE];
- int num_workers = 1;
- odph_odpthread_t thr[num_workers];
- odp_instance_t instance;
- odp_pool_capability_t capa;
- uint32_t max_seg_len;
- unsigned i;
-
- memset(&cargs, 0, sizeof(cargs));
-
- /* Parse and store the application arguments */
- parse_args(argc, argv, &cargs);
-
- /* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
- app_err("ODP global init failed.\n");
- exit(EXIT_FAILURE);
- }
-
- /* Init this thread */
- odp_init_local(instance, ODP_THREAD_WORKER);
-
- if (odp_pool_capability(&capa)) {
- app_err("Pool capability request failed.\n");
- exit(EXIT_FAILURE);
- }
-
- max_seg_len = capa.pkt.max_seg_len;
-
- for (i = 0; i < sizeof(payloads) / sizeof(unsigned int); i++) {
- if (payloads[i] > max_seg_len)
- break;
- }
-
- num_payloads = i;
-
- /* Create packet pool */
- odp_pool_param_init(&params);
- params.pkt.seg_len = max_seg_len;
- params.pkt.len = max_seg_len;
- params.pkt.num = POOL_NUM_PKT;
- params.type = ODP_POOL_PACKET;
- pool = odp_pool_create("packet_pool", &params);
-
- if (pool == ODP_POOL_INVALID) {
- app_err("packet pool create failed.\n");
- exit(EXIT_FAILURE);
- }
- odp_pool_print(pool);
-
- odp_queue_param_init(&qparam);
- if (cargs.schedule) {
- qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
- qparam.sched.group = ODP_SCHED_GROUP_ALL;
- out_queue = odp_queue_create("crypto-out", &qparam);
- } else if (cargs.poll) {
- qparam.type = ODP_QUEUE_TYPE_PLAIN;
- out_queue = odp_queue_create("crypto-out", &qparam);
- }
- if (cargs.schedule || cargs.poll) {
- if (out_queue == ODP_QUEUE_INVALID) {
- app_err("crypto-out queue create failed.\n");
- exit(EXIT_FAILURE);
- }
- }
-
- if (cargs.schedule) {
- printf("Run in async scheduled mode\n");
-
- thr_arg.crypto_args = cargs;
- thr_arg.crypto_alg_config = cargs.alg_config;
- num_workers = odp_cpumask_default_worker(&cpumask,
- num_workers);
- (void)odp_cpumask_to_str(&cpumask, cpumaskstr,
- sizeof(cpumaskstr));
- printf("num worker threads: %i\n",
- num_workers);
- printf("first CPU: %i\n",
- odp_cpumask_first(&cpumask));
- printf("cpu mask: %s\n",
- cpumaskstr);
- } else if (cargs.poll) {
- printf("Run in async poll mode\n");
- } else {
- printf("Run in sync mode\n");
- }
-
- memset(thr, 0, sizeof(thr));
-
- if (cargs.alg_config) {
- odph_odpthread_params_t thr_params;
-
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.start = run_thr_func;
- thr_params.arg = &thr_arg;
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
-
- if (cargs.schedule) {
- odph_odpthreads_create(&thr[0], &cpumask, &thr_params);
- odph_odpthreads_join(&thr[0]);
- } else {
- run_measure_one_config(&cargs, cargs.alg_config);
- }
- } else {
- unsigned int i;
-
- for (i = 0;
- i < (sizeof(algs_config) / sizeof(crypto_alg_config_t));
- i++) {
- run_measure_one_config(&cargs, algs_config + i);
- }
- }
-
- if (odp_pool_destroy(pool)) {
- app_err("Error: pool destroy\n");
- exit(EXIT_FAILURE);
- }
-
- if (odp_term_local()) {
- app_err("Error: term local\n");
- exit(EXIT_FAILURE);
- }
-
- if (odp_term_global(instance)) {
- app_err("Error: term global\n");
- exit(EXIT_FAILURE);
- }
-
- return 0;
-}
-
-static void parse_args(int argc, char *argv[], crypto_args_t *cargs)
-{
- int opt;
- int long_index;
- static const struct option longopts[] = {
- {"algorithm", optional_argument, NULL, 'a'},
- {"debug", no_argument, NULL, 'd'},
- {"flight", optional_argument, NULL, 'f'},
- {"help", no_argument, NULL, 'h'},
- {"iterations", optional_argument, NULL, 'i'},
- {"inplace", no_argument, NULL, 'n'},
- {"payload", optional_argument, NULL, 'l'},
- {"sessions", optional_argument, NULL, 'm'},
- {"reuse", no_argument, NULL, 'r'},
- {"poll", no_argument, NULL, 'p'},
- {"schedule", no_argument, NULL, 's'},
- {NULL, 0, NULL, 0}
- };
-
- static const char *shortopts = "+a:c:df:hi:m:nl:spr";
-
- /* let helper collect its own arguments (e.g. --odph_proc) */
- odph_parse_options(argc, argv, shortopts, longopts);
-
- cargs->in_place = 0;
- cargs->in_flight = 1;
- cargs->debug_packets = 0;
- cargs->iteration_count = 10000;
- cargs->payload_length = 0;
- cargs->alg_config = NULL;
- cargs->reuse_packet = 0;
- cargs->schedule = 0;
-
- opterr = 0; /* do not issue errors on helper options */
-
- while (1) {
- opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
-
- if (opt == -1)
- break; /* No more options */
-
- switch (opt) {
- case 'a':
- cargs->alg_config = find_config_by_name(optarg);
- if (!cargs->alg_config) {
- printf("cannot test crypto '%s' configuration\n",
- optarg);
- usage(argv[0]);
- exit(-1);
- }
- break;
- case 'd':
- cargs->debug_packets = 1;
- break;
- case 'i':
- cargs->iteration_count = atoi(optarg);
- break;
- case 'f':
- cargs->in_flight = atoi(optarg);
- break;
- case 'h':
- usage(argv[0]);
- exit(EXIT_SUCCESS);
- break;
- case 'm':
- cargs->max_sessions = atoi(optarg);
- break;
- case 'n':
- cargs->in_place = 1;
- break;
- case 'l':
- cargs->payload_length = atoi(optarg);
- break;
- case 'r':
- cargs->reuse_packet = 1;
- break;
- case 's':
- cargs->schedule = 1;
- break;
- case 'p':
- cargs->poll = 1;
- break;
- default:
- break;
- }
- }
-
- optind = 1; /* reset 'extern optind' from the getopt lib */
-
- if ((cargs->in_flight > 1) && cargs->reuse_packet) {
- printf("-f (in flight > 1) and -r (reuse packet) options are not compatible\n");
- usage(argv[0]);
- exit(-1);
- }
- if (cargs->schedule && cargs->poll) {
- printf("-s (schedule) and -p (poll) options are not compatible\n");
- usage(argv[0]);
- exit(-1);
- }
-}
-
-/**
- * Prinf usage information
- */
-static void usage(char *progname)
-{
- printf("\n"
- "Usage: %s OPTIONS\n"
- " E.g. %s -i 100000\n"
- "\n"
- "OpenDataPlane crypto speed measure.\n"
- "Optional OPTIONS\n"
- " -a, --algorithm <name> Specify algorithm name (default all)\n"
- " Supported values are:\n",
- progname, progname);
-
- print_config_names(" ");
- printf(" -d, --debug Enable dump of processed packets.\n"
- " -f, --flight <number> Max number of packet processed in parallel (default 1)\n"
- " -i, --iterations <number> Number of iterations.\n"
- " -n, --inplace Encrypt on place.\n"
- " -l, --payload Payload length.\n"
- " -r, --reuse Output encrypted packet is passed as input\n"
- " to next encrypt iteration.\n"
- " -s, --schedule Use scheduler for completion events.\n"
- " -p, --poll Poll completion queue for completion events.\n"
- " -h, --help Display help and exit.\n"
- "\n");
-}
diff --git a/test/common_plat/performance/odp_l2fwd.c b/test/common_plat/performance/odp_l2fwd.c
deleted file mode 100644
index 8f5c5e152..000000000
--- a/test/common_plat/performance/odp_l2fwd.c
+++ /dev/null
@@ -1,1531 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * @example odp_l2fwd.c ODP basic forwarding application
- */
-
-/** enable strtok */
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#endif
-
-#include <stdlib.h>
-#include <getopt.h>
-#include <unistd.h>
-#include <errno.h>
-#include <inttypes.h>
-#include <assert.h>
-
-#include <test_debug.h>
-
-#include <odp_api.h>
-#include <odp/helper/odph_api.h>
-
-/** @def MAX_WORKERS
- * @brief Maximum number of worker threads
- */
-#define MAX_WORKERS 32
-
-/** @def SHM_PKT_POOL_SIZE
- * @brief Size of the shared memory block
- */
-#define SHM_PKT_POOL_SIZE 8192
-
-/** @def SHM_PKT_POOL_BUF_SIZE
- * @brief Buffer size of the packet pool buffer
- */
-#define SHM_PKT_POOL_BUF_SIZE 1856
-
-/** @def MAX_PKT_BURST
- * @brief Maximum number of packet in a burst
- */
-#define MAX_PKT_BURST 32
-
-/** Maximum number of pktio queues per interface */
-#define MAX_QUEUES 32
-
-/** Maximum number of pktio interfaces */
-#define MAX_PKTIOS 8
-
-/** Maximum pktio index table size */
-#define MAX_PKTIO_INDEXES 1024
-
-/**
- * Packet input mode
- */
-typedef enum pktin_mode_t {
- DIRECT_RECV,
- PLAIN_QUEUE,
- SCHED_PARALLEL,
- SCHED_ATOMIC,
- SCHED_ORDERED,
-} pktin_mode_t;
-
-/**
- * Packet output modes
- */
-typedef enum pktout_mode_t {
- PKTOUT_DIRECT,
- PKTOUT_QUEUE
-} pktout_mode_t;
-
-static inline int sched_mode(pktin_mode_t in_mode)
-{
- return (in_mode == SCHED_PARALLEL) ||
- (in_mode == SCHED_ATOMIC) ||
- (in_mode == SCHED_ORDERED);
-}
-
-/** Get rid of path in filename - only for unix-type paths using '/' */
-#define NO_PATH(file_name) (strrchr((file_name), '/') ? \
- strrchr((file_name), '/') + 1 : (file_name))
-/**
- * Parsed command line application arguments
- */
-typedef struct {
- int cpu_count;
- int if_count; /**< Number of interfaces to be used */
- int addr_count; /**< Number of dst addresses to be used */
- int num_workers; /**< Number of worker threads */
- char **if_names; /**< Array of pointers to interface names */
- odph_ethaddr_t addrs[MAX_PKTIOS]; /**< Array of dst addresses */
- pktin_mode_t in_mode; /**< Packet input mode */
- pktout_mode_t out_mode; /**< Packet output mode */
- int time; /**< Time in seconds to run. */
- int accuracy; /**< Number of seconds to get and print statistics */
- char *if_str; /**< Storage for interface names */
- int dst_change; /**< Change destination eth addresses */
- int src_change; /**< Change source eth addresses */
- int error_check; /**< Check packet errors */
- int sched_mode; /**< Scheduler mode */
-} appl_args_t;
-
-static int exit_threads; /**< Break workers loop if set to 1 */
-
-/**
- * Statistics
- */
-typedef union {
- struct {
- /** Number of forwarded packets */
- uint64_t packets;
- /** Packets dropped due to receive error */
- uint64_t rx_drops;
- /** Packets dropped due to transmit error */
- uint64_t tx_drops;
- } s;
-
- uint8_t padding[ODP_CACHE_LINE_SIZE];
-} stats_t ODP_ALIGNED_CACHE;
-
-/**
- * Thread specific arguments
- */
-typedef struct thread_args_t {
- int thr_idx;
- int num_pktio;
-
- struct {
- odp_pktin_queue_t pktin;
- odp_pktout_queue_t pktout;
- odp_queue_t rx_queue;
- odp_queue_t tx_queue;
- int rx_idx;
- int tx_idx;
- int rx_queue_idx;
- int tx_queue_idx;
- } pktio[MAX_PKTIOS];
-
- stats_t *stats; /**< Pointer to per thread stats */
-} thread_args_t;
-
-/**
- * Grouping of all global data
- */
-typedef struct {
- /** Per thread packet stats */
- stats_t stats[MAX_WORKERS];
- /** Application (parsed) arguments */
- appl_args_t appl;
- /** Thread specific arguments */
- thread_args_t thread[MAX_WORKERS];
- /** Table of port ethernet addresses */
- odph_ethaddr_t port_eth_addr[MAX_PKTIOS];
- /** Table of dst ethernet addresses */
- odph_ethaddr_t dst_eth_addr[MAX_PKTIOS];
- /** Table of dst ports. This is used by non-sched modes. */
- int dst_port[MAX_PKTIOS];
- /** Table of pktio handles */
- struct {
- odp_pktio_t pktio;
- odp_pktin_queue_t pktin[MAX_QUEUES];
- odp_pktout_queue_t pktout[MAX_QUEUES];
- odp_queue_t rx_q[MAX_QUEUES];
- odp_queue_t tx_q[MAX_QUEUES];
- int num_rx_thr;
- int num_tx_thr;
- int num_rx_queue;
- int num_tx_queue;
- int next_rx_queue;
- int next_tx_queue;
- } pktios[MAX_PKTIOS];
-
- /** Destination port lookup table.
- * Table index is pktio_index of the API. This is used by the sched
- * mode. */
- uint8_t dst_port_from_idx[MAX_PKTIO_INDEXES];
-
-} args_t;
-
-/** Global pointer to args */
-static args_t *gbl_args;
-/** Global barrier to synchronize main and workers */
-static odp_barrier_t barrier;
-
-/**
- * Drop packets which input parsing marked as containing errors.
- *
- * Frees packets with error and modifies pkt_tbl[] to only contain packets with
- * no detected errors.
- *
- * @param pkt_tbl Array of packets
- * @param num Number of packets in pkt_tbl[]
- *
- * @return Number of packets dropped
- */
-static inline int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned num)
-{
- odp_packet_t pkt;
- unsigned dropped = 0;
- unsigned i, j;
-
- for (i = 0, j = 0; i < num; ++i) {
- pkt = pkt_tbl[i];
-
- if (odp_unlikely(odp_packet_has_error(pkt))) {
- odp_packet_free(pkt); /* Drop */
- dropped++;
- } else if (odp_unlikely(i != j++)) {
- pkt_tbl[j - 1] = pkt;
- }
- }
-
- return dropped;
-}
-
-/**
- * Fill packets' eth addresses according to the destination port
- *
- * @param pkt_tbl Array of packets
- * @param num Number of packets in the array
- * @param dst_port Destination port
- */
-static inline void fill_eth_addrs(odp_packet_t pkt_tbl[],
- unsigned num, int dst_port)
-{
- odp_packet_t pkt;
- odph_ethhdr_t *eth;
- unsigned i;
-
- if (!gbl_args->appl.dst_change && !gbl_args->appl.src_change)
- return;
-
- for (i = 0; i < num; ++i) {
- pkt = pkt_tbl[i];
-
- odp_packet_prefetch(pkt, 0, ODPH_ETHHDR_LEN);
-
- eth = odp_packet_data(pkt);
-
- if (gbl_args->appl.src_change)
- eth->src = gbl_args->port_eth_addr[dst_port];
-
- if (gbl_args->appl.dst_change)
- eth->dst = gbl_args->dst_eth_addr[dst_port];
- }
-}
-
-static inline int event_queue_send(odp_queue_t queue, odp_packet_t *pkt_tbl,
- unsigned pkts)
-{
- int ret;
- unsigned i;
- unsigned sent = 0;
- odp_event_t ev_tbl[pkts];
-
- for (i = 0; i < pkts; i++)
- ev_tbl[i] = odp_packet_to_event(pkt_tbl[i]);
-
- while (sent < pkts) {
- ret = odp_queue_enq_multi(queue, &ev_tbl[sent], pkts - sent);
-
- if (ret < 0) {
- LOG_ERR("Failed to send packet as events\n");
- break;
- }
-
- sent += ret;
- }
-
- return sent;
-}
-
-/**
- * Packet IO worker thread using scheduled queues
- *
- * @param arg thread arguments of type 'thread_args_t *'
- */
-static int run_worker_sched_mode(void *arg)
-{
- int pkts;
- int thr;
- int dst_idx;
- int i;
- int pktio, num_pktio;
- odp_pktout_queue_t pktout[MAX_PKTIOS];
- odp_queue_t tx_queue[MAX_PKTIOS];
- thread_args_t *thr_args = arg;
- stats_t *stats = thr_args->stats;
- int use_event_queue = gbl_args->appl.out_mode;
- pktin_mode_t in_mode = gbl_args->appl.in_mode;
-
- thr = odp_thread_id();
-
- num_pktio = thr_args->num_pktio;
-
- if (num_pktio > MAX_PKTIOS) {
- LOG_ERR("Too many pktios %i\n", num_pktio);
- return -1;
- }
-
- for (pktio = 0; pktio < num_pktio; pktio++) {
- tx_queue[pktio] = thr_args->pktio[pktio].tx_queue;
- pktout[pktio] = thr_args->pktio[pktio].pktout;
- }
-
- printf("[%02i] PKTIN_SCHED_%s, %s\n", thr,
- (in_mode == SCHED_PARALLEL) ? "PARALLEL" :
- ((in_mode == SCHED_ATOMIC) ? "ATOMIC" : "ORDERED"),
- (use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT");
-
- odp_barrier_wait(&barrier);
-
- /* Loop packets */
- while (!exit_threads) {
- odp_event_t ev_tbl[MAX_PKT_BURST];
- odp_packet_t pkt_tbl[MAX_PKT_BURST];
- int sent;
- unsigned tx_drops;
- int src_idx;
-
- pkts = odp_schedule_multi(NULL, ODP_SCHED_NO_WAIT, ev_tbl,
- MAX_PKT_BURST);
-
- if (pkts <= 0)
- continue;
-
- for (i = 0; i < pkts; i++)
- pkt_tbl[i] = odp_packet_from_event(ev_tbl[i]);
-
- if (gbl_args->appl.error_check) {
- int rx_drops;
-
- /* Drop packets with errors */
- rx_drops = drop_err_pkts(pkt_tbl, pkts);
-
- if (odp_unlikely(rx_drops)) {
- stats->s.rx_drops += rx_drops;
- if (pkts == rx_drops)
- continue;
-
- pkts -= rx_drops;
- }
- }
-
- /* packets from the same queue are from the same interface */
- src_idx = odp_packet_input_index(pkt_tbl[0]);
- assert(src_idx >= 0);
- dst_idx = gbl_args->dst_port_from_idx[src_idx];
- fill_eth_addrs(pkt_tbl, pkts, dst_idx);
-
- if (odp_unlikely(use_event_queue))
- sent = event_queue_send(tx_queue[dst_idx], pkt_tbl,
- pkts);
- else
- sent = odp_pktout_send(pktout[dst_idx], pkt_tbl, pkts);
-
- sent = odp_unlikely(sent < 0) ? 0 : sent;
- tx_drops = pkts - sent;
-
- if (odp_unlikely(tx_drops)) {
- stats->s.tx_drops += tx_drops;
-
- /* Drop rejected packets */
- for (i = sent; i < pkts; i++)
- odp_packet_free(pkt_tbl[i]);
- }
-
- stats->s.packets += pkts;
- }
-
- /* Make sure that latest stat writes are visible to other threads */
- odp_mb_full();
-
- return 0;
-}
-
-/**
- * Packet IO worker thread using plain queues
- *
- * @param arg thread arguments of type 'thread_args_t *'
- */
-static int run_worker_plain_queue_mode(void *arg)
-{
- int thr;
- int pkts;
- odp_packet_t pkt_tbl[MAX_PKT_BURST];
- int dst_idx, num_pktio;
- odp_queue_t queue;
- odp_pktout_queue_t pktout;
- odp_queue_t tx_queue;
- int pktio = 0;
- thread_args_t *thr_args = arg;
- stats_t *stats = thr_args->stats;
- int use_event_queue = gbl_args->appl.out_mode;
-
- thr = odp_thread_id();
-
- num_pktio = thr_args->num_pktio;
- dst_idx = thr_args->pktio[pktio].tx_idx;
- queue = thr_args->pktio[pktio].rx_queue;
- pktout = thr_args->pktio[pktio].pktout;
- tx_queue = thr_args->pktio[pktio].tx_queue;
-
- printf("[%02i] num pktios %i, PKTIN_QUEUE, %s\n", thr, num_pktio,
- (use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT");
-
- odp_barrier_wait(&barrier);
-
- /* Loop packets */
- while (!exit_threads) {
- int sent;
- unsigned tx_drops;
- odp_event_t event[MAX_PKT_BURST];
- int i;
-
- if (num_pktio > 1) {
- dst_idx = thr_args->pktio[pktio].tx_idx;
- queue = thr_args->pktio[pktio].rx_queue;
- pktout = thr_args->pktio[pktio].pktout;
- if (odp_unlikely(use_event_queue))
- tx_queue = thr_args->pktio[pktio].tx_queue;
-
- pktio++;
- if (pktio == num_pktio)
- pktio = 0;
- }
-
- pkts = odp_queue_deq_multi(queue, event, MAX_PKT_BURST);
- if (odp_unlikely(pkts <= 0))
- continue;
-
- for (i = 0; i < pkts; i++)
- pkt_tbl[i] = odp_packet_from_event(event[i]);
-
- if (gbl_args->appl.error_check) {
- int rx_drops;
-
- /* Drop packets with errors */
- rx_drops = drop_err_pkts(pkt_tbl, pkts);
-
- if (odp_unlikely(rx_drops)) {
- stats->s.rx_drops += rx_drops;
- if (pkts == rx_drops)
- continue;
-
- pkts -= rx_drops;
- }
- }
-
- fill_eth_addrs(pkt_tbl, pkts, dst_idx);
-
- if (odp_unlikely(use_event_queue))
- sent = event_queue_send(tx_queue, pkt_tbl, pkts);
- else
- sent = odp_pktout_send(pktout, pkt_tbl, pkts);
-
- sent = odp_unlikely(sent < 0) ? 0 : sent;
- tx_drops = pkts - sent;
-
- if (odp_unlikely(tx_drops)) {
- int i;
-
- stats->s.tx_drops += tx_drops;
-
- /* Drop rejected packets */
- for (i = sent; i < pkts; i++)
- odp_packet_free(pkt_tbl[i]);
- }
-
- stats->s.packets += pkts;
- }
-
- /* Make sure that latest stat writes are visible to other threads */
- odp_mb_full();
-
- return 0;
-}
-
-/**
- * Packet IO worker thread accessing IO resources directly
- *
- * @param arg thread arguments of type 'thread_args_t *'
- */
-static int run_worker_direct_mode(void *arg)
-{
- int thr;
- int pkts;
- odp_packet_t pkt_tbl[MAX_PKT_BURST];
- int dst_idx, num_pktio;
- odp_pktin_queue_t pktin;
- odp_pktout_queue_t pktout;
- odp_queue_t tx_queue;
- int pktio = 0;
- thread_args_t *thr_args = arg;
- stats_t *stats = thr_args->stats;
- int use_event_queue = gbl_args->appl.out_mode;
-
- thr = odp_thread_id();
-
- num_pktio = thr_args->num_pktio;
- dst_idx = thr_args->pktio[pktio].tx_idx;
- pktin = thr_args->pktio[pktio].pktin;
- pktout = thr_args->pktio[pktio].pktout;
- tx_queue = thr_args->pktio[pktio].tx_queue;
-
- printf("[%02i] num pktios %i, PKTIN_DIRECT, %s\n", thr, num_pktio,
- (use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT");
-
- odp_barrier_wait(&barrier);
-
- /* Loop packets */
- while (!exit_threads) {
- int sent;
- unsigned tx_drops;
-
- if (num_pktio > 1) {
- dst_idx = thr_args->pktio[pktio].tx_idx;
- pktin = thr_args->pktio[pktio].pktin;
- pktout = thr_args->pktio[pktio].pktout;
- if (odp_unlikely(use_event_queue))
- tx_queue = thr_args->pktio[pktio].tx_queue;
-
- pktio++;
- if (pktio == num_pktio)
- pktio = 0;
- }
-
- pkts = odp_pktin_recv(pktin, pkt_tbl, MAX_PKT_BURST);
- if (odp_unlikely(pkts <= 0))
- continue;
-
- if (gbl_args->appl.error_check) {
- int rx_drops;
-
- /* Drop packets with errors */
- rx_drops = drop_err_pkts(pkt_tbl, pkts);
-
- if (odp_unlikely(rx_drops)) {
- stats->s.rx_drops += rx_drops;
- if (pkts == rx_drops)
- continue;
-
- pkts -= rx_drops;
- }
- }
-
- fill_eth_addrs(pkt_tbl, pkts, dst_idx);
-
- if (odp_unlikely(use_event_queue))
- sent = event_queue_send(tx_queue, pkt_tbl, pkts);
- else
- sent = odp_pktout_send(pktout, pkt_tbl, pkts);
-
- sent = odp_unlikely(sent < 0) ? 0 : sent;
- tx_drops = pkts - sent;
-
- if (odp_unlikely(tx_drops)) {
- int i;
-
- stats->s.tx_drops += tx_drops;
-
- /* Drop rejected packets */
- for (i = sent; i < pkts; i++)
- odp_packet_free(pkt_tbl[i]);
- }
-
- stats->s.packets += pkts;
- }
-
- /* Make sure that latest stat writes are visible to other threads */
- odp_mb_full();
-
- return 0;
-}
-
-/**
- * Create a pktio handle, optionally associating a default input queue.
- *
- * @param dev Name of device to open
- * @param index Pktio index
- * @param pool Pool to associate with device for packet RX/TX
- *
- * @retval 0 on success
- * @retval -1 on failure
- */
-static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
- odp_pool_t pool)
-{
- odp_pktio_t pktio;
- odp_pktio_param_t pktio_param;
- odp_schedule_sync_t sync_mode;
- odp_pktio_capability_t capa;
- odp_pktin_queue_param_t pktin_param;
- odp_pktout_queue_param_t pktout_param;
- odp_pktio_op_mode_t mode_rx;
- odp_pktio_op_mode_t mode_tx;
- pktin_mode_t in_mode = gbl_args->appl.in_mode;
- odp_pktio_info_t info;
-
- odp_pktio_param_init(&pktio_param);
-
- if (in_mode == PLAIN_QUEUE)
- pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE;
- else if (in_mode != DIRECT_RECV) /* pktin_mode SCHED_* */
- pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
-
- if (gbl_args->appl.out_mode != PKTOUT_DIRECT)
- pktio_param.out_mode = ODP_PKTOUT_MODE_QUEUE;
-
- pktio = odp_pktio_open(dev, pool, &pktio_param);
- if (pktio == ODP_PKTIO_INVALID) {
- LOG_ERR("Error: failed to open %s\n", dev);
- return -1;
- }
-
- if (odp_pktio_info(pktio, &info)) {
- LOG_ERR("Error: pktio info failed %s\n", dev);
- return -1;
- }
-
- printf("created pktio %" PRIu64 ", dev: %s, drv: %s\n",
- odp_pktio_to_u64(pktio), dev, info.drv_name);
-
- if (odp_pktio_capability(pktio, &capa)) {
- LOG_ERR("Error: capability query failed %s\n", dev);
- return -1;
- }
-
- odp_pktin_queue_param_init(&pktin_param);
- odp_pktout_queue_param_init(&pktout_param);
-
- /* By default use a queue per worker. Sched mode ignores rx side
- * setting. */
- mode_rx = ODP_PKTIO_OP_MT_UNSAFE;
- mode_tx = ODP_PKTIO_OP_MT_UNSAFE;
-
- if (gbl_args->appl.sched_mode) {
- if (gbl_args->appl.in_mode == SCHED_ATOMIC)
- sync_mode = ODP_SCHED_SYNC_ATOMIC;
- else if (gbl_args->appl.in_mode == SCHED_ORDERED)
- sync_mode = ODP_SCHED_SYNC_ORDERED;
- else
- sync_mode = ODP_SCHED_SYNC_PARALLEL;
-
- pktin_param.queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- pktin_param.queue_param.sched.sync = sync_mode;
- pktin_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
- }
-
- if (num_rx > (int)capa.max_input_queues) {
- printf("Sharing %i input queues between %i workers\n",
- capa.max_input_queues, num_rx);
- num_rx = capa.max_input_queues;
- mode_rx = ODP_PKTIO_OP_MT;
- }
-
- if (num_tx > (int)capa.max_output_queues) {
- printf("Sharing %i output queues between %i workers\n",
- capa.max_output_queues, num_tx);
- num_tx = capa.max_output_queues;
- mode_tx = ODP_PKTIO_OP_MT;
- }
-
- pktin_param.hash_enable = 1;
- pktin_param.hash_proto.proto.ipv4_udp = 1;
- pktin_param.num_queues = num_rx;
- pktin_param.op_mode = mode_rx;
-
- pktout_param.op_mode = mode_tx;
- pktout_param.num_queues = num_tx;
-
- if (odp_pktin_queue_config(pktio, &pktin_param)) {
- LOG_ERR("Error: input queue config failed %s\n", dev);
- return -1;
- }
-
- if (odp_pktout_queue_config(pktio, &pktout_param)) {
- LOG_ERR("Error: output queue config failed %s\n", dev);
- return -1;
- }
-
- if (gbl_args->appl.in_mode == DIRECT_RECV) {
- if (odp_pktin_queue(pktio, gbl_args->pktios[idx].pktin,
- num_rx) != num_rx) {
- LOG_ERR("Error: pktin queue query failed %s\n",
- dev);
- return -1;
- }
- } else {
- if (odp_pktin_event_queue(pktio,
- gbl_args->pktios[idx].rx_q,
- num_rx) != num_rx) {
- LOG_ERR("Error: pktin event queue query failed %s\n",
- dev);
- return -1;
- }
- }
-
- if (gbl_args->appl.out_mode == PKTOUT_DIRECT) {
- if (odp_pktout_queue(pktio,
- gbl_args->pktios[idx].pktout,
- num_tx) != num_tx) {
- LOG_ERR("Error: pktout queue query failed %s\n", dev);
- return -1;
- }
- } else {
- if (odp_pktout_event_queue(pktio,
- gbl_args->pktios[idx].tx_q,
- num_tx) != num_tx) {
- LOG_ERR("Error: event queue query failed %s\n", dev);
- return -1;
- }
- }
-
- printf("created %i input and %i output queues on (%s)\n",
- num_rx, num_tx, dev);
-
- gbl_args->pktios[idx].num_rx_queue = num_rx;
- gbl_args->pktios[idx].num_tx_queue = num_tx;
- gbl_args->pktios[idx].pktio = pktio;
-
- return 0;
-}
-
-/**
- * Print statistics
- *
- * @param num_workers Number of worker threads
- * @param thr_stats Pointer to stats storage
- * @param duration Number of seconds to loop in
- * @param timeout Number of seconds for stats calculation
- *
- */
-static int print_speed_stats(int num_workers, stats_t *thr_stats,
- int duration, int timeout)
-{
- uint64_t pkts = 0;
- uint64_t pkts_prev = 0;
- uint64_t pps;
- uint64_t rx_drops, tx_drops;
- uint64_t maximum_pps = 0;
- int i;
- int elapsed = 0;
- int stats_enabled = 1;
- int loop_forever = (duration == 0);
-
- if (timeout <= 0) {
- stats_enabled = 0;
- timeout = 1;
- }
- /* Wait for all threads to be ready*/
- odp_barrier_wait(&barrier);
-
- do {
- pkts = 0;
- rx_drops = 0;
- tx_drops = 0;
-
- sleep(timeout);
-
- for (i = 0; i < num_workers; i++) {
- pkts += thr_stats[i].s.packets;
- rx_drops += thr_stats[i].s.rx_drops;
- tx_drops += thr_stats[i].s.tx_drops;
- }
- if (stats_enabled) {
- pps = (pkts - pkts_prev) / timeout;
- if (pps > maximum_pps)
- maximum_pps = pps;
- printf("%" PRIu64 " pps, %" PRIu64 " max pps, ", pps,
- maximum_pps);
-
- printf(" %" PRIu64 " rx drops, %" PRIu64 " tx drops\n",
- rx_drops, tx_drops);
-
- pkts_prev = pkts;
- }
- elapsed += timeout;
- } while (loop_forever || (elapsed < duration));
-
- if (stats_enabled)
- printf("TEST RESULT: %" PRIu64 " maximum packets per second.\n",
- maximum_pps);
-
- return pkts > 100 ? 0 : -1;
-}
-
-static void print_port_mapping(void)
-{
- int if_count;
- int pktio;
-
- if_count = gbl_args->appl.if_count;
-
- printf("\nPort config\n--------------------\n");
-
- for (pktio = 0; pktio < if_count; pktio++) {
- const char *dev = gbl_args->appl.if_names[pktio];
-
- printf("Port %i (%s)\n", pktio, dev);
- printf(" rx workers %i\n",
- gbl_args->pktios[pktio].num_rx_thr);
- printf(" tx workers %i\n",
- gbl_args->pktios[pktio].num_tx_thr);
- printf(" rx queues %i\n",
- gbl_args->pktios[pktio].num_rx_queue);
- printf(" tx queues %i\n",
- gbl_args->pktios[pktio].num_tx_queue);
- }
-
- printf("\n");
-}
-
-/**
- * Find the destination port for a given input port
- *
- * @param port Input port index
- */
-static int find_dest_port(int port)
-{
- /* Even number of ports */
- if (gbl_args->appl.if_count % 2 == 0)
- return (port % 2 == 0) ? port + 1 : port - 1;
-
- /* Odd number of ports */
- if (port == gbl_args->appl.if_count - 1)
- return 0;
- else
- return port + 1;
-}
-
-/*
- * Bind worker threads to interfaces and calculate number of queues needed
- *
- * less workers (N) than interfaces (M)
- * - assign each worker to process every Nth interface
- * - workers process inequal number of interfaces, when M is not divisible by N
- * - needs only single queue per interface
- * otherwise
- * - assign an interface to every Mth worker
- * - interfaces are processed by inequal number of workers, when N is not
- * divisible by M
- * - tries to configure a queue per worker per interface
- * - shares queues, if interface capability does not allows a queue per worker
- */
-static void bind_workers(void)
-{
- int if_count, num_workers;
- int rx_idx, tx_idx, thr, pktio, i;
- thread_args_t *thr_args;
-
- if_count = gbl_args->appl.if_count;
- num_workers = gbl_args->appl.num_workers;
-
- if (gbl_args->appl.sched_mode) {
- /* all threads receive and send on all pktios */
- for (i = 0; i < if_count; i++) {
- gbl_args->pktios[i].num_rx_thr = num_workers;
- gbl_args->pktios[i].num_tx_thr = num_workers;
- }
-
- for (thr = 0; thr < num_workers; thr++) {
- thr_args = &gbl_args->thread[thr];
- thr_args->num_pktio = if_count;
-
- /* In sched mode, pktios are not cross connected with
- * local pktio indexes */
- for (i = 0; i < if_count; i++) {
- thr_args->pktio[i].rx_idx = i;
- thr_args->pktio[i].tx_idx = i;
- }
- }
- } else {
- /* initialize port forwarding table */
- for (rx_idx = 0; rx_idx < if_count; rx_idx++)
- gbl_args->dst_port[rx_idx] = find_dest_port(rx_idx);
-
- if (if_count > num_workers) {
- /* Less workers than pktios. Assign single worker per
- * pktio. */
- thr = 0;
-
- for (rx_idx = 0; rx_idx < if_count; rx_idx++) {
- thr_args = &gbl_args->thread[thr];
- pktio = thr_args->num_pktio;
- /* Cross connect rx to tx */
- tx_idx = gbl_args->dst_port[rx_idx];
- thr_args->pktio[pktio].rx_idx = rx_idx;
- thr_args->pktio[pktio].tx_idx = tx_idx;
- thr_args->num_pktio++;
-
- gbl_args->pktios[rx_idx].num_rx_thr++;
- gbl_args->pktios[tx_idx].num_tx_thr++;
-
- thr++;
- if (thr >= num_workers)
- thr = 0;
- }
- } else {
- /* More workers than pktios. Assign at least one worker
- * per pktio. */
- rx_idx = 0;
-
- for (thr = 0; thr < num_workers; thr++) {
- thr_args = &gbl_args->thread[thr];
- pktio = thr_args->num_pktio;
- /* Cross connect rx to tx */
- tx_idx = gbl_args->dst_port[rx_idx];
- thr_args->pktio[pktio].rx_idx = rx_idx;
- thr_args->pktio[pktio].tx_idx = tx_idx;
- thr_args->num_pktio++;
-
- gbl_args->pktios[rx_idx].num_rx_thr++;
- gbl_args->pktios[tx_idx].num_tx_thr++;
-
- rx_idx++;
- if (rx_idx >= if_count)
- rx_idx = 0;
- }
- }
- }
-}
-
-/*
- * Bind queues to threads and fill in missing thread arguments (handles)
- */
-static void bind_queues(void)
-{
- int num_workers;
- int thr, i;
-
- num_workers = gbl_args->appl.num_workers;
-
- printf("\nQueue binding (indexes)\n-----------------------\n");
-
- for (thr = 0; thr < num_workers; thr++) {
- int rx_idx, tx_idx;
- thread_args_t *thr_args = &gbl_args->thread[thr];
- int num = thr_args->num_pktio;
-
- printf("worker %i\n", thr);
-
- for (i = 0; i < num; i++) {
- int rx_queue, tx_queue;
-
- rx_idx = thr_args->pktio[i].rx_idx;
- tx_idx = thr_args->pktio[i].tx_idx;
- rx_queue = gbl_args->pktios[rx_idx].next_rx_queue;
- tx_queue = gbl_args->pktios[tx_idx].next_tx_queue;
-
- thr_args->pktio[i].rx_queue_idx = rx_queue;
- thr_args->pktio[i].tx_queue_idx = tx_queue;
- thr_args->pktio[i].pktin =
- gbl_args->pktios[rx_idx].pktin[rx_queue];
- thr_args->pktio[i].rx_queue =
- gbl_args->pktios[rx_idx].rx_q[rx_queue];
- thr_args->pktio[i].pktout =
- gbl_args->pktios[tx_idx].pktout[tx_queue];
- thr_args->pktio[i].tx_queue =
- gbl_args->pktios[tx_idx].tx_q[tx_queue];
-
- if (!gbl_args->appl.sched_mode)
- printf(" rx: pktio %i, queue %i\n",
- rx_idx, rx_queue);
-
- printf(" tx: pktio %i, queue %i\n",
- tx_idx, tx_queue);
-
- rx_queue++;
- tx_queue++;
-
- if (rx_queue >= gbl_args->pktios[rx_idx].num_rx_queue)
- rx_queue = 0;
- if (tx_queue >= gbl_args->pktios[tx_idx].num_tx_queue)
- tx_queue = 0;
-
- gbl_args->pktios[rx_idx].next_rx_queue = rx_queue;
- gbl_args->pktios[tx_idx].next_tx_queue = tx_queue;
- }
- }
-
- printf("\n");
-}
-
-static void init_port_lookup_tbl(void)
-{
- int rx_idx, if_count;
-
- if_count = gbl_args->appl.if_count;
-
- for (rx_idx = 0; rx_idx < if_count; rx_idx++) {
- odp_pktio_t pktio = gbl_args->pktios[rx_idx].pktio;
- int pktio_idx = odp_pktio_index(pktio);
- int dst_port = find_dest_port(rx_idx);
-
- if (pktio_idx < 0 || pktio_idx >= MAX_PKTIO_INDEXES) {
- LOG_ERR("Bad pktio index %i\n", pktio_idx);
- exit(EXIT_FAILURE);
- }
-
- gbl_args->dst_port_from_idx[pktio_idx] = dst_port;
- }
-}
-
-/**
- * Prinf usage information
- */
-static void usage(char *progname)
-{
- printf("\n"
- "OpenDataPlane L2 forwarding application.\n"
- "\n"
- "Usage: %s OPTIONS\n"
- " E.g. %s -i eth0,eth1,eth2,eth3 -m 0 -t 1\n"
- " In the above example,\n"
- " eth0 will send pkts to eth1 and vice versa\n"
- " eth2 will send pkts to eth3 and vice versa\n"
- "\n"
- "Mandatory OPTIONS:\n"
- " -i, --interface Eth interfaces (comma-separated, no spaces)\n"
- " Interface count min 1, max %i\n"
- "\n"
- "Optional OPTIONS:\n"
- " -m, --mode Packet input mode\n"
- " 0: Direct mode: PKTIN_MODE_DIRECT (default)\n"
- " 1: Scheduler mode with parallel queues: PKTIN_MODE_SCHED + SCHED_SYNC_PARALLEL\n"
- " 2: Scheduler mode with atomic queues: PKTIN_MODE_SCHED + SCHED_SYNC_ATOMIC\n"
- " 3: Scheduler mode with ordered queues: PKTIN_MODE_SCHED + SCHED_SYNC_ORDERED\n"
- " 4: Plain queue mode: ODP_PKTIN_MODE_QUEUE\n"
- " -o, --out_mode Packet output mode\n"
- " 0: Direct mode: PKTOUT_MODE_DIRECT (default)\n"
- " 1: Queue mode: PKTOUT_MODE_QUEUE\n"
- " -c, --count <number> CPU count.\n"
- " -t, --time <number> Time in seconds to run.\n"
- " -a, --accuracy <number> Time in seconds get print statistics\n"
- " (default is 1 second).\n"
- " -d, --dst_change 0: Don't change packets' dst eth addresses\n"
- " 1: Change packets' dst eth addresses (default)\n"
- " -s, --src_change 0: Don't change packets' src eth addresses\n"
- " 1: Change packets' src eth addresses (default)\n"
- " -r, --dst_addr Destination addresses (comma-separated, no spaces)\n"
- " Requires also the -d flag to be set\n"
- " -e, --error_check 0: Don't check packet errors (default)\n"
- " 1: Check packet errors\n"
- " -h, --help Display help and exit.\n\n"
- "\n", NO_PATH(progname), NO_PATH(progname), MAX_PKTIOS
- );
-}
-
-/**
- * Parse and store the command line arguments
- *
- * @param argc argument count
- * @param argv[] argument vector
- * @param appl_args Store application arguments here
- */
-static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
-{
- int opt;
- int long_index;
- char *token;
- char *addr_str;
- size_t len;
- int i;
- static const struct option longopts[] = {
- {"count", required_argument, NULL, 'c'},
- {"time", required_argument, NULL, 't'},
- {"accuracy", required_argument, NULL, 'a'},
- {"interface", required_argument, NULL, 'i'},
- {"mode", required_argument, NULL, 'm'},
- {"out_mode", required_argument, NULL, 'o'},
- {"dst_addr", required_argument, NULL, 'r'},
- {"dst_change", required_argument, NULL, 'd'},
- {"src_change", required_argument, NULL, 's'},
- {"error_check", required_argument, NULL, 'e'},
- {"help", no_argument, NULL, 'h'},
- {NULL, 0, NULL, 0}
- };
-
- static const char *shortopts = "+c:+t:+a:i:m:o:r:d:s:e:h";
-
- /* let helper collect its own arguments (e.g. --odph_proc) */
- odph_parse_options(argc, argv, shortopts, longopts);
-
- appl_args->time = 0; /* loop forever if time to run is 0 */
- appl_args->accuracy = 1; /* get and print pps stats second */
- appl_args->dst_change = 1; /* change eth dst address by default */
- appl_args->src_change = 1; /* change eth src address by default */
- appl_args->error_check = 0; /* don't check packet errors by default */
-
- opterr = 0; /* do not issue errors on helper options */
-
- while (1) {
- opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
-
- if (opt == -1)
- break; /* No more options */
-
- switch (opt) {
- case 'c':
- appl_args->cpu_count = atoi(optarg);
- break;
- case 't':
- appl_args->time = atoi(optarg);
- break;
- case 'a':
- appl_args->accuracy = atoi(optarg);
- break;
- /* parse packet-io interface names */
- case 'r':
- len = strlen(optarg);
- if (len == 0) {
- usage(argv[0]);
- exit(EXIT_FAILURE);
- }
- len += 1; /* add room for '\0' */
-
- addr_str = malloc(len);
- if (addr_str == NULL) {
- usage(argv[0]);
- exit(EXIT_FAILURE);
- }
-
- /* store the mac addresses names */
- strcpy(addr_str, optarg);
- for (token = strtok(addr_str, ","), i = 0;
- token != NULL; token = strtok(NULL, ","), i++) {
- if (i >= MAX_PKTIOS) {
- printf("too many MAC addresses\n");
- usage(argv[0]);
- exit(EXIT_FAILURE);
- }
- if (odph_eth_addr_parse(&appl_args->addrs[i],
- token) != 0) {
- printf("invalid MAC address\n");
- usage(argv[0]);
- exit(EXIT_FAILURE);
- }
- }
- appl_args->addr_count = i;
- if (appl_args->addr_count < 1) {
- usage(argv[0]);
- exit(EXIT_FAILURE);
- }
- free(addr_str);
- break;
- case 'i':
- len = strlen(optarg);
- if (len == 0) {
- usage(argv[0]);
- exit(EXIT_FAILURE);
- }
- len += 1; /* add room for '\0' */
-
- appl_args->if_str = malloc(len);
- if (appl_args->if_str == NULL) {
- usage(argv[0]);
- exit(EXIT_FAILURE);
- }
-
- /* count the number of tokens separated by ',' */
- strcpy(appl_args->if_str, optarg);
- for (token = strtok(appl_args->if_str, ","), i = 0;
- token != NULL;
- token = strtok(NULL, ","), i++)
- ;
-
- appl_args->if_count = i;
-
- if (appl_args->if_count < 1 ||
- appl_args->if_count > MAX_PKTIOS) {
- usage(argv[0]);
- exit(EXIT_FAILURE);
- }
-
- /* allocate storage for the if names */
- appl_args->if_names =
- calloc(appl_args->if_count, sizeof(char *));
-
- /* store the if names (reset names string) */
- strcpy(appl_args->if_str, optarg);
- for (token = strtok(appl_args->if_str, ","), i = 0;
- token != NULL; token = strtok(NULL, ","), i++) {
- appl_args->if_names[i] = token;
- }
- break;
- case 'm':
- i = atoi(optarg);
- if (i == 1)
- appl_args->in_mode = SCHED_PARALLEL;
- else if (i == 2)
- appl_args->in_mode = SCHED_ATOMIC;
- else if (i == 3)
- appl_args->in_mode = SCHED_ORDERED;
- else if (i == 4)
- appl_args->in_mode = PLAIN_QUEUE;
- else
- appl_args->in_mode = DIRECT_RECV;
- break;
- case 'o':
- i = atoi(optarg);
- if (i != 0)
- appl_args->out_mode = PKTOUT_QUEUE;
- break;
- case 'd':
- appl_args->dst_change = atoi(optarg);
- break;
- case 's':
- appl_args->src_change = atoi(optarg);
- break;
- case 'e':
- appl_args->error_check = atoi(optarg);
- break;
- case 'h':
- usage(argv[0]);
- exit(EXIT_SUCCESS);
- break;
- default:
- break;
- }
- }
-
- if (appl_args->if_count == 0) {
- usage(argv[0]);
- exit(EXIT_FAILURE);
- }
- if (appl_args->addr_count != 0 &&
- appl_args->addr_count != appl_args->if_count) {
- printf("Number of destination addresses differs from number"
- " of interfaces\n");
- usage(argv[0]);
- exit(EXIT_FAILURE);
- }
-
- optind = 1; /* reset 'extern optind' from the getopt lib */
-}
-
-/**
- * Print system and application info
- */
-static void print_info(char *progname, appl_args_t *appl_args)
-{
- int i;
-
- printf("\n"
- "ODP system info\n"
- "---------------\n"
- "ODP API version: %s\n"
- "ODP impl name: %s\n"
- "CPU model: %s\n"
- "CPU freq (hz): %" PRIu64 "\n"
- "Cache line size: %i\n"
- "CPU count: %i\n"
- "\n",
- odp_version_api_str(), odp_version_impl_name(),
- odp_cpu_model_str(), odp_cpu_hz_max(),
- odp_sys_cache_line_size(), odp_cpu_count());
-
- printf("Running ODP appl: \"%s\"\n"
- "-----------------\n"
- "IF-count: %i\n"
- "Using IFs: ",
- progname, appl_args->if_count);
- for (i = 0; i < appl_args->if_count; ++i)
- printf(" %s", appl_args->if_names[i]);
- printf("\n"
- "Mode: ");
- if (appl_args->in_mode == DIRECT_RECV)
- printf("PKTIN_DIRECT, ");
- else if (appl_args->in_mode == PLAIN_QUEUE)
- printf("PKTIN_QUEUE, ");
- else if (appl_args->in_mode == SCHED_PARALLEL)
- printf("PKTIN_SCHED_PARALLEL, ");
- else if (appl_args->in_mode == SCHED_ATOMIC)
- printf("PKTIN_SCHED_ATOMIC, ");
- else if (appl_args->in_mode == SCHED_ORDERED)
- printf("PKTIN_SCHED_ORDERED, ");
-
- if (appl_args->out_mode)
- printf("PKTOUT_QUEUE");
- else
- printf("PKTOUT_DIRECT");
-
- printf("\n\n");
- fflush(NULL);
-}
-
-static void gbl_args_init(args_t *args)
-{
- int pktio, queue;
-
- memset(args, 0, sizeof(args_t));
-
- for (pktio = 0; pktio < MAX_PKTIOS; pktio++) {
- args->pktios[pktio].pktio = ODP_PKTIO_INVALID;
-
- for (queue = 0; queue < MAX_QUEUES; queue++)
- args->pktios[pktio].rx_q[queue] = ODP_QUEUE_INVALID;
- }
-}
-
-/**
- * ODP L2 forwarding main function
- */
-int main(int argc, char *argv[])
-{
- odph_odpthread_t thread_tbl[MAX_WORKERS];
- odp_pool_t pool;
- int i;
- int cpu;
- int num_workers;
- odp_shm_t shm;
- odp_cpumask_t cpumask;
- char cpumaskstr[ODP_CPUMASK_STR_SIZE];
- odph_ethaddr_t new_addr;
- odp_pool_param_t params;
- int ret;
- stats_t *stats;
- int if_count;
- int (*thr_run_func)(void *);
- odp_instance_t instance;
-
- /* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
- LOG_ERR("Error: ODP global init failed.\n");
- exit(EXIT_FAILURE);
- }
-
- /* Init this thread */
- if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
- LOG_ERR("Error: ODP local init failed.\n");
- exit(EXIT_FAILURE);
- }
-
- /* Reserve memory for args from shared mem */
- shm = odp_shm_reserve("shm_args", sizeof(args_t),
- ODP_CACHE_LINE_SIZE, 0);
- gbl_args = odp_shm_addr(shm);
-
- if (gbl_args == NULL) {
- LOG_ERR("Error: shared mem alloc failed.\n");
- exit(EXIT_FAILURE);
- }
- gbl_args_init(gbl_args);
-
- /* Parse and store the application arguments */
- parse_args(argc, argv, &gbl_args->appl);
-
- if (sched_mode(gbl_args->appl.in_mode))
- gbl_args->appl.sched_mode = 1;
-
- /* Print both system and application information */
- print_info(NO_PATH(argv[0]), &gbl_args->appl);
-
- /* Default to system CPU count unless user specified */
- num_workers = MAX_WORKERS;
- if (gbl_args->appl.cpu_count)
- num_workers = gbl_args->appl.cpu_count;
-
- /* Get default worker cpumask */
- num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
- (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
-
- gbl_args->appl.num_workers = num_workers;
-
- for (i = 0; i < num_workers; i++)
- gbl_args->thread[i].thr_idx = i;
-
- if_count = gbl_args->appl.if_count;
-
- printf("num worker threads: %i\n", num_workers);
- printf("first CPU: %i\n", odp_cpumask_first(&cpumask));
- printf("cpu mask: %s\n", cpumaskstr);
-
- /* Create packet pool */
- odp_pool_param_init(&params);
- params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE;
- params.pkt.len = SHM_PKT_POOL_BUF_SIZE;
- params.pkt.num = SHM_PKT_POOL_SIZE;
- params.type = ODP_POOL_PACKET;
-
- pool = odp_pool_create("packet pool", &params);
-
- if (pool == ODP_POOL_INVALID) {
- LOG_ERR("Error: packet pool create failed.\n");
- exit(EXIT_FAILURE);
- }
- odp_pool_print(pool);
-
- if (odp_pktio_max_index() >= MAX_PKTIO_INDEXES)
- LOG_DBG("Warning: max pktio index (%u) is too large\n",
- odp_pktio_max_index());
-
- bind_workers();
-
- for (i = 0; i < if_count; ++i) {
- const char *dev = gbl_args->appl.if_names[i];
- int num_rx, num_tx;
-
- /* A queue per worker in scheduled mode */
- num_rx = num_workers;
- num_tx = num_workers;
-
- if (!gbl_args->appl.sched_mode) {
- /* A queue per assigned worker */
- num_rx = gbl_args->pktios[i].num_rx_thr;
- num_tx = gbl_args->pktios[i].num_tx_thr;
- }
-
- if (create_pktio(dev, i, num_rx, num_tx, pool))
- exit(EXIT_FAILURE);
-
- /* Save interface ethernet address */
- if (odp_pktio_mac_addr(gbl_args->pktios[i].pktio,
- gbl_args->port_eth_addr[i].addr,
- ODPH_ETHADDR_LEN) != ODPH_ETHADDR_LEN) {
- LOG_ERR("Error: interface ethernet address unknown\n");
- exit(EXIT_FAILURE);
- }
-
- /* Save destination eth address */
- if (gbl_args->appl.dst_change) {
- /* 02:00:00:00:00:XX */
- memset(&new_addr, 0, sizeof(odph_ethaddr_t));
- if (gbl_args->appl.addr_count) {
- memcpy(&new_addr, &gbl_args->appl.addrs[i],
- sizeof(odph_ethaddr_t));
- } else {
- new_addr.addr[0] = 0x02;
- new_addr.addr[5] = i;
- }
- gbl_args->dst_eth_addr[i] = new_addr;
- }
- }
-
- gbl_args->pktios[i].pktio = ODP_PKTIO_INVALID;
-
- bind_queues();
-
- init_port_lookup_tbl();
-
- if (!gbl_args->appl.sched_mode)
- print_port_mapping();
-
- memset(thread_tbl, 0, sizeof(thread_tbl));
-
- stats = gbl_args->stats;
-
- odp_barrier_init(&barrier, num_workers + 1);
-
- if (gbl_args->appl.in_mode == DIRECT_RECV)
- thr_run_func = run_worker_direct_mode;
- else if (gbl_args->appl.in_mode == PLAIN_QUEUE)
- thr_run_func = run_worker_plain_queue_mode;
- else /* SCHED_PARALLEL / SCHED_ATOMIC / SCHED_ORDERED */
- thr_run_func = run_worker_sched_mode;
-
- /* Create worker threads */
- cpu = odp_cpumask_first(&cpumask);
- for (i = 0; i < num_workers; ++i) {
- odp_cpumask_t thd_mask;
- odph_odpthread_params_t thr_params;
-
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.start = thr_run_func;
- thr_params.arg = &gbl_args->thread[i];
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
-
- gbl_args->thread[i].stats = &stats[i];
-
- odp_cpumask_zero(&thd_mask);
- odp_cpumask_set(&thd_mask, cpu);
- odph_odpthreads_create(&thread_tbl[i], &thd_mask,
- &thr_params);
- cpu = odp_cpumask_next(&cpumask, cpu);
- }
-
- /* Start packet receive and transmit */
- for (i = 0; i < if_count; ++i) {
- odp_pktio_t pktio;
-
- pktio = gbl_args->pktios[i].pktio;
- ret = odp_pktio_start(pktio);
- if (ret) {
- LOG_ERR("Error: unable to start %s\n",
- gbl_args->appl.if_names[i]);
- exit(EXIT_FAILURE);
- }
- }
-
- ret = print_speed_stats(num_workers, stats, gbl_args->appl.time,
- gbl_args->appl.accuracy);
- exit_threads = 1;
-
- /* Master thread waits for other threads to exit */
- for (i = 0; i < num_workers; ++i)
- odph_odpthreads_join(&thread_tbl[i]);
-
- free(gbl_args->appl.if_names);
- free(gbl_args->appl.if_str);
-
- if (odp_pool_destroy(pool)) {
- LOG_ERR("Error: pool destroy\n");
- exit(EXIT_FAILURE);
- }
-
- if (odp_shm_free(shm)) {
- LOG_ERR("Error: shm free\n");
- exit(EXIT_FAILURE);
- }
-
- if (odp_term_local()) {
- LOG_ERR("Error: term local\n");
- exit(EXIT_FAILURE);
- }
-
- if (odp_term_global(instance)) {
- LOG_ERR("Error: term global\n");
- exit(EXIT_FAILURE);
- }
-
- printf("Exit %d\n\n", ret);
- return ret;
-}
diff --git a/test/common_plat/performance/odp_pktio_ordered_run.sh b/test/common_plat/performance/odp_pktio_ordered_run.sh
deleted file mode 100755
index d91211c0c..000000000
--- a/test/common_plat/performance/odp_pktio_ordered_run.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2016, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-DURATION=5
-LOG=odp_pktio_ordered.log
-LOOPS=100000000
-PASS_PPS=5000
-PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
-PCAP_OUT=/dev/null
-
-# This just turns off output buffering so that you still get periodic
-# output while piping to tee, as long as stdbuf is available.
-if [ "$(which stdbuf)" != "" ]; then
- STDBUF="stdbuf -o 0"
-else
- STDBUF=
-fi
-
-$STDBUF ./odp_pktio_ordered${EXEEXT} -i pcap:in=${PCAP_IN}:loops=$LOOPS,\
-pcap:out=${PCAP_OUT} -t $DURATION | tee $LOG
-
-ret=$?
-
-if [ ! -f $LOG ]; then
- echo "FAIL: $LOG not found"
- ret=1
-elif [ $ret -eq 0 ]; then
- MAX_PPS=$(awk '/TEST RESULT/ {print $3}' $LOG)
- if [ "$MAX_PPS" -lt "$PASS_PPS" ]; then
- echo "FAIL: pps below threshold $MAX_PPS < $PASS_PPS"
- ret=1
- fi
-fi
-
-rm -f $LOG
-
-exit $ret
diff --git a/test/common_plat/performance/odp_scheduling_run.sh b/test/common_plat/performance/odp_scheduling_run.sh
deleted file mode 100755
index a22326d4e..000000000
--- a/test/common_plat/performance/odp_scheduling_run.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2015, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-# Script that passes command line arguments to odp_scheduling test when
-# launched by 'make check'
-
-TEST_DIR="${TEST_DIR:-$(dirname $0)}"
-ret=0
-ALL=0
-
-run()
-{
- echo odp_scheduling_run starts requesting $1 worker threads
- echo ===============================================
-
- $TEST_DIR/odp_scheduling${EXEEXT} -c $1 || ret=1
-}
-
-run 1
-run 5
-run 8
-run 11
-run $ALL
-
-exit $ret
diff --git a/test/common_plat/validation/api/.gitignore b/test/common_plat/validation/api/.gitignore
deleted file mode 100644
index 7e563b8b3..000000000
--- a/test/common_plat/validation/api/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*.log
-*.trs
diff --git a/test/common_plat/validation/api/Makefile.am b/test/common_plat/validation/api/Makefile.am
deleted file mode 100644
index e2d30a673..000000000
--- a/test/common_plat/validation/api/Makefile.am
+++ /dev/null
@@ -1,28 +0,0 @@
-ODP_MODULES = atomic \
- barrier \
- buffer \
- classification \
- cpumask \
- crypto \
- errno \
- hash \
- init \
- lock \
- queue \
- packet \
- pktio \
- pool \
- random \
- scheduler \
- std_clib \
- thread \
- time \
- timer \
- traffic_mngr \
- shmem \
- system
-
-SUBDIRS = $(ODP_MODULES)
-
-#The tests will need to retain the deprecated test implementation
-AM_CFLAGS += -Wno-deprecated-declarations
diff --git a/test/common_plat/validation/api/Makefile.inc b/test/common_plat/validation/api/Makefile.inc
deleted file mode 100644
index fd04a199a..000000000
--- a/test/common_plat/validation/api/Makefile.inc
+++ /dev/null
@@ -1,16 +0,0 @@
-include $(top_srcdir)/test/Makefile.inc
-
-COMMON_DIR = $(top_builddir)/test/common_plat/common
-
-#the following option ensure that option '-I.' is not passed to gcc,
-#therefore distinguishing between '#include "X"' and '#include <X>'.
-#It allows common filenames (such as 'errno.h') to be used locally.
-AUTOMAKE_OPTIONS = nostdinc
-
-AM_CFLAGS += -I$(top_srcdir)/test/common_plat/common
-AM_LDFLAGS += -static
-
-LIBCUNIT_COMMON = $(COMMON_DIR)/libcunit_common.la
-LIBCPUMASK_COMMON = $(COMMON_DIR)/libcpumask_common.la
-LIBTHRMASK_COMMON = $(COMMON_DIR)/libthrmask_common.la
-LIBODP = $(LIB)/libodphelper.la $(LIB)/libodp-dpdk.la
diff --git a/test/common_plat/validation/api/atomic/Makefile.am b/test/common_plat/validation/api/atomic/Makefile.am
deleted file mode 100644
index 9b6bd6315..000000000
--- a/test/common_plat/validation/api/atomic/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestatomic.la
-libtestatomic_la_SOURCES = atomic.c
-
-test_PROGRAMS = atomic_main$(EXEEXT)
-dist_atomic_main_SOURCES = atomic_main.c
-atomic_main_LDADD = libtestatomic.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = atomic.h
diff --git a/test/common_plat/validation/api/atomic/atomic.c b/test/common_plat/validation/api/atomic/atomic.c
deleted file mode 100644
index db9484bc2..000000000
--- a/test/common_plat/validation/api/atomic/atomic.c
+++ /dev/null
@@ -1,909 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <malloc.h>
-#include <odp_api.h>
-#include <CUnit/Basic.h>
-#include <odp_cunit_common.h>
-#include <unistd.h>
-#include "atomic.h"
-
-#define VERBOSE 0
-#define MAX_ITERATIONS 1000
-
-#define ADD_SUB_CNT 5
-
-#define CNT 10
-#define U32_INIT_VAL (1UL << 10)
-#define U64_INIT_VAL (1ULL << 33)
-#define U32_MAGIC 0xa23f65b2
-#define U64_MAGIC 0xf2e1c5430cb6a52e
-
-#define GLOBAL_SHM_NAME "GlobalLockTest"
-
-#define UNUSED __attribute__((__unused__))
-
-#define CHECK_MAX_MIN (1 << 0)
-#define CHECK_XCHG (1 << 2)
-
-static odp_atomic_u32_t a32u;
-static odp_atomic_u64_t a64u;
-static odp_atomic_u32_t a32u_min;
-static odp_atomic_u32_t a32u_max;
-static odp_atomic_u64_t a64u_min;
-static odp_atomic_u64_t a64u_max;
-static odp_atomic_u32_t a32u_xchg;
-static odp_atomic_u64_t a64u_xchg;
-
-typedef __volatile uint32_t volatile_u32_t;
-typedef __volatile uint64_t volatile_u64_t;
-
-typedef struct {
- /* Global variables */
- uint32_t g_num_threads;
- uint32_t g_iterations;
- uint32_t g_verbose;
- uint32_t g_max_num_cores;
-
- volatile_u32_t global_lock_owner;
-} global_shared_mem_t;
-
-/* Per-thread memory */
-typedef struct {
- global_shared_mem_t *global_mem;
-
- int thread_id;
- int thread_core;
-
- volatile_u64_t delay_counter;
-} per_thread_mem_t;
-
-static odp_shm_t global_shm;
-static global_shared_mem_t *global_mem;
-
-/* Initialise per-thread memory */
-static per_thread_mem_t *thread_init(void)
-{
- global_shared_mem_t *global_mem;
- per_thread_mem_t *per_thread_mem;
- odp_shm_t global_shm;
- uint32_t per_thread_mem_len;
-
- per_thread_mem_len = sizeof(per_thread_mem_t);
- per_thread_mem = malloc(per_thread_mem_len);
- memset(per_thread_mem, 0, per_thread_mem_len);
-
- per_thread_mem->delay_counter = 1;
-
- per_thread_mem->thread_id = odp_thread_id();
- per_thread_mem->thread_core = odp_cpu_id();
-
- global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
- global_mem = odp_shm_addr(global_shm);
- CU_ASSERT_PTR_NOT_NULL(global_mem);
-
- per_thread_mem->global_mem = global_mem;
-
- return per_thread_mem;
-}
-
-static void thread_finalize(per_thread_mem_t *per_thread_mem)
-{
- free(per_thread_mem);
-}
-
-static void test_atomic_inc_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_inc_u32(&a32u);
-}
-
-static void test_atomic_inc_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_inc_u64(&a64u);
-}
-
-static void test_atomic_dec_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_dec_u32(&a32u);
-}
-
-static void test_atomic_dec_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_dec_u64(&a64u);
-}
-
-static void test_atomic_fetch_inc_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_inc_u32(&a32u);
-}
-
-static void test_atomic_fetch_inc_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_inc_u64(&a64u);
-}
-
-static void test_atomic_fetch_dec_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_dec_u32(&a32u);
-}
-
-static void test_atomic_fetch_dec_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_dec_u64(&a64u);
-}
-
-static void test_atomic_add_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_add_u32(&a32u, ADD_SUB_CNT);
-}
-
-static void test_atomic_add_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_add_u64(&a64u, ADD_SUB_CNT);
-}
-
-static void test_atomic_sub_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_sub_u32(&a32u, ADD_SUB_CNT);
-}
-
-static void test_atomic_sub_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_sub_u64(&a64u, ADD_SUB_CNT);
-}
-
-static void test_atomic_fetch_add_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_add_u32(&a32u, ADD_SUB_CNT);
-}
-
-static void test_atomic_fetch_add_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_add_u64(&a64u, ADD_SUB_CNT);
-}
-
-static void test_atomic_fetch_sub_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_sub_u32(&a32u, ADD_SUB_CNT);
-}
-
-static void test_atomic_fetch_sub_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_sub_u64(&a64u, ADD_SUB_CNT);
-}
-
-static void test_atomic_min_32(void)
-{
- int i;
- uint32_t tmp;
-
- for (i = 0; i < CNT; i++) {
- tmp = odp_atomic_fetch_dec_u32(&a32u);
- odp_atomic_min_u32(&a32u_min, tmp);
- }
-}
-
-static void test_atomic_min_64(void)
-{
- int i;
- uint64_t tmp;
-
- for (i = 0; i < CNT; i++) {
- tmp = odp_atomic_fetch_dec_u64(&a64u);
- odp_atomic_min_u64(&a64u_min, tmp);
- }
-}
-
-static void test_atomic_max_32(void)
-{
- int i;
- uint32_t tmp;
-
- for (i = 0; i < CNT; i++) {
- tmp = odp_atomic_fetch_inc_u32(&a32u);
- odp_atomic_max_u32(&a32u_max, tmp);
- }
-}
-
-static void test_atomic_max_64(void)
-{
- int i;
- uint64_t tmp;
-
- for (i = 0; i < CNT; i++) {
- tmp = odp_atomic_fetch_inc_u64(&a64u);
- odp_atomic_max_u64(&a64u_max, tmp);
- }
-}
-
-static void test_atomic_cas_inc_32(void)
-{
- int i;
- uint32_t old;
-
- for (i = 0; i < CNT; i++) {
- old = odp_atomic_load_u32(&a32u);
-
- while (odp_atomic_cas_u32(&a32u, &old, old + 1) == 0)
- ;
- }
-}
-
-static void test_atomic_cas_dec_32(void)
-{
- int i;
- uint32_t old;
-
- for (i = 0; i < CNT; i++) {
- old = odp_atomic_load_u32(&a32u);
-
- while (odp_atomic_cas_u32(&a32u, &old, old - 1) == 0)
- ;
- }
-}
-
-static void test_atomic_cas_inc_64(void)
-{
- int i;
- uint64_t old;
-
- for (i = 0; i < CNT; i++) {
- old = odp_atomic_load_u64(&a64u);
-
- while (odp_atomic_cas_u64(&a64u, &old, old + 1) == 0)
- ;
- }
-}
-
-static void test_atomic_cas_dec_64(void)
-{
- int i;
- uint64_t old;
-
- for (i = 0; i < CNT; i++) {
- old = odp_atomic_load_u64(&a64u);
-
- while (odp_atomic_cas_u64(&a64u, &old, old - 1) == 0)
- ;
- }
-}
-
-static void test_atomic_xchg_32(void)
-{
- uint32_t old, new;
- int i;
-
- for (i = 0; i < CNT; i++) {
- new = odp_atomic_fetch_inc_u32(&a32u);
- old = odp_atomic_xchg_u32(&a32u_xchg, new);
-
- if (old & 0x1)
- odp_atomic_xchg_u32(&a32u_xchg, 0);
- else
- odp_atomic_xchg_u32(&a32u_xchg, 1);
- }
-
- odp_atomic_sub_u32(&a32u, CNT);
- odp_atomic_xchg_u32(&a32u_xchg, U32_MAGIC);
-}
-
-static void test_atomic_xchg_64(void)
-{
- uint64_t old, new;
- int i;
-
- for (i = 0; i < CNT; i++) {
- new = odp_atomic_fetch_inc_u64(&a64u);
- old = odp_atomic_xchg_u64(&a64u_xchg, new);
-
- if (old & 0x1)
- odp_atomic_xchg_u64(&a64u_xchg, 0);
- else
- odp_atomic_xchg_u64(&a64u_xchg, 1);
- }
-
- odp_atomic_sub_u64(&a64u, CNT);
- odp_atomic_xchg_u64(&a64u_xchg, U64_MAGIC);
-}
-
-static void test_atomic_non_relaxed_32(void)
-{
- int i;
- uint32_t tmp;
-
- for (i = 0; i < CNT; i++) {
- tmp = odp_atomic_load_acq_u32(&a32u);
- odp_atomic_store_rel_u32(&a32u, tmp);
-
- tmp = odp_atomic_load_acq_u32(&a32u_max);
- odp_atomic_add_rel_u32(&a32u_max, 1);
-
- tmp = odp_atomic_load_acq_u32(&a32u_min);
- odp_atomic_sub_rel_u32(&a32u_min, 1);
-
- tmp = odp_atomic_load_u32(&a32u_xchg);
- while (odp_atomic_cas_acq_u32(&a32u_xchg, &tmp, tmp + 1) == 0)
- ;
-
- tmp = odp_atomic_load_u32(&a32u_xchg);
- while (odp_atomic_cas_rel_u32(&a32u_xchg, &tmp, tmp + 1) == 0)
- ;
-
- tmp = odp_atomic_load_u32(&a32u_xchg);
- /* finally set value for validation */
- while (odp_atomic_cas_acq_rel_u32(&a32u_xchg, &tmp, U32_MAGIC)
- == 0)
- ;
- }
-}
-
-static void test_atomic_non_relaxed_64(void)
-{
- int i;
- uint64_t tmp;
-
- for (i = 0; i < CNT; i++) {
- tmp = odp_atomic_load_acq_u64(&a64u);
- odp_atomic_store_rel_u64(&a64u, tmp);
-
- tmp = odp_atomic_load_acq_u64(&a64u_max);
- odp_atomic_add_rel_u64(&a64u_max, 1);
-
- tmp = odp_atomic_load_acq_u64(&a64u_min);
- odp_atomic_sub_rel_u64(&a64u_min, 1);
-
- tmp = odp_atomic_load_u64(&a64u_xchg);
- while (odp_atomic_cas_acq_u64(&a64u_xchg, &tmp, tmp + 1) == 0)
- ;
-
- tmp = odp_atomic_load_u64(&a64u_xchg);
- while (odp_atomic_cas_rel_u64(&a64u_xchg, &tmp, tmp + 1) == 0)
- ;
-
- tmp = odp_atomic_load_u64(&a64u_xchg);
- /* finally set value for validation */
- while (odp_atomic_cas_acq_rel_u64(&a64u_xchg, &tmp, U64_MAGIC)
- == 0)
- ;
- }
-}
-
-static void test_atomic_inc_dec_32(void)
-{
- test_atomic_inc_32();
- test_atomic_dec_32();
-}
-
-static void test_atomic_inc_dec_64(void)
-{
- test_atomic_inc_64();
- test_atomic_dec_64();
-}
-
-static void test_atomic_fetch_inc_dec_32(void)
-{
- test_atomic_fetch_inc_32();
- test_atomic_fetch_dec_32();
-}
-
-static void test_atomic_fetch_inc_dec_64(void)
-{
- test_atomic_fetch_inc_64();
- test_atomic_fetch_dec_64();
-}
-
-static void test_atomic_add_sub_32(void)
-{
- test_atomic_add_32();
- test_atomic_sub_32();
-}
-
-static void test_atomic_add_sub_64(void)
-{
- test_atomic_add_64();
- test_atomic_sub_64();
-}
-
-static void test_atomic_fetch_add_sub_32(void)
-{
- test_atomic_fetch_add_32();
- test_atomic_fetch_sub_32();
-}
-
-static void test_atomic_fetch_add_sub_64(void)
-{
- test_atomic_fetch_add_64();
- test_atomic_fetch_sub_64();
-}
-
-static void test_atomic_max_min_32(void)
-{
- test_atomic_max_32();
- test_atomic_min_32();
-}
-
-static void test_atomic_max_min_64(void)
-{
- test_atomic_max_64();
- test_atomic_min_64();
-}
-
-static void test_atomic_cas_inc_dec_32(void)
-{
- test_atomic_cas_inc_32();
- test_atomic_cas_dec_32();
-}
-
-static void test_atomic_cas_inc_dec_64(void)
-{
- test_atomic_cas_inc_64();
- test_atomic_cas_dec_64();
-}
-
-static void test_atomic_init(void)
-{
- odp_atomic_init_u32(&a32u, 0);
- odp_atomic_init_u64(&a64u, 0);
- odp_atomic_init_u32(&a32u_min, 0);
- odp_atomic_init_u32(&a32u_max, 0);
- odp_atomic_init_u64(&a64u_min, 0);
- odp_atomic_init_u64(&a64u_max, 0);
- odp_atomic_init_u32(&a32u_xchg, 0);
- odp_atomic_init_u64(&a64u_xchg, 0);
-}
-
-static void test_atomic_store(void)
-{
- odp_atomic_store_u32(&a32u, U32_INIT_VAL);
- odp_atomic_store_u64(&a64u, U64_INIT_VAL);
- odp_atomic_store_u32(&a32u_min, U32_INIT_VAL);
- odp_atomic_store_u32(&a32u_max, U32_INIT_VAL);
- odp_atomic_store_u64(&a64u_min, U64_INIT_VAL);
- odp_atomic_store_u64(&a64u_max, U64_INIT_VAL);
- odp_atomic_store_u32(&a32u_xchg, U32_INIT_VAL);
- odp_atomic_store_u64(&a64u_xchg, U64_INIT_VAL);
-}
-
-static void test_atomic_validate(int check)
-{
- CU_ASSERT(U32_INIT_VAL == odp_atomic_load_u32(&a32u));
- CU_ASSERT(U64_INIT_VAL == odp_atomic_load_u64(&a64u));
-
- if (check & CHECK_MAX_MIN) {
- CU_ASSERT(odp_atomic_load_u32(&a32u_max) >
- odp_atomic_load_u32(&a32u_min));
-
- CU_ASSERT(odp_atomic_load_u64(&a64u_max) >
- odp_atomic_load_u64(&a64u_min));
- }
-
- if (check & CHECK_XCHG) {
- CU_ASSERT(odp_atomic_load_u32(&a32u_xchg) == U32_MAGIC);
- CU_ASSERT(odp_atomic_load_u64(&a64u_xchg) == U64_MAGIC);
- }
-}
-
-int atomic_init(odp_instance_t *inst)
-{
- uint32_t workers_count, max_threads;
- int ret = 0;
- odp_cpumask_t mask;
-
- if (0 != odp_init_global(inst, NULL, NULL)) {
- fprintf(stderr, "error: odp_init_global() failed.\n");
- return -1;
- }
- if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
- fprintf(stderr, "error: odp_init_local() failed.\n");
- return -1;
- }
-
- global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
- sizeof(global_shared_mem_t), 64,
- ODP_SHM_SW_ONLY);
- if (ODP_SHM_INVALID == global_shm) {
- fprintf(stderr, "Unable reserve memory for global_shm\n");
- return -1;
- }
-
- global_mem = odp_shm_addr(global_shm);
- memset(global_mem, 0, sizeof(global_shared_mem_t));
-
- global_mem->g_num_threads = MAX_WORKERS;
- global_mem->g_iterations = MAX_ITERATIONS;
- global_mem->g_verbose = VERBOSE;
-
- workers_count = odp_cpumask_default_worker(&mask, 0);
-
- max_threads = (workers_count >= MAX_WORKERS) ?
- MAX_WORKERS : workers_count;
-
- if (max_threads < global_mem->g_num_threads) {
- printf("Requested num of threads is too large\n");
- printf("reducing from %" PRIu32 " to %" PRIu32 "\n",
- global_mem->g_num_threads,
- max_threads);
- global_mem->g_num_threads = max_threads;
- }
-
- printf("Num of threads used = %" PRIu32 "\n",
- global_mem->g_num_threads);
-
- return ret;
-}
-
-int atomic_term(odp_instance_t inst)
-{
- odp_shm_t shm;
-
- shm = odp_shm_lookup(GLOBAL_SHM_NAME);
- if (0 != odp_shm_free(shm)) {
- fprintf(stderr, "error: odp_shm_free() failed.\n");
- return -1;
- }
-
- if (0 != odp_term_local()) {
- fprintf(stderr, "error: odp_term_local() failed.\n");
- return -1;
- }
-
- if (0 != odp_term_global(inst)) {
- fprintf(stderr, "error: odp_term_global() failed.\n");
- return -1;
- }
-
- return 0;
-}
-
-/* Atomic tests */
-static int test_atomic_inc_dec_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_inc_dec_32();
- test_atomic_inc_dec_64();
-
- thread_finalize(per_thread_mem);
-
- return CU_get_number_of_failures();
-}
-
-static int test_atomic_add_sub_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_add_sub_32();
- test_atomic_add_sub_64();
-
- thread_finalize(per_thread_mem);
-
- return CU_get_number_of_failures();
-}
-
-static int test_atomic_fetch_inc_dec_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_fetch_inc_dec_32();
- test_atomic_fetch_inc_dec_64();
-
- thread_finalize(per_thread_mem);
-
- return CU_get_number_of_failures();
-}
-
-static int test_atomic_fetch_add_sub_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_fetch_add_sub_32();
- test_atomic_fetch_add_sub_64();
-
- thread_finalize(per_thread_mem);
-
- return CU_get_number_of_failures();
-}
-
-static int test_atomic_max_min_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_max_min_32();
- test_atomic_max_min_64();
-
- thread_finalize(per_thread_mem);
-
- return CU_get_number_of_failures();
-}
-
-static int test_atomic_cas_inc_dec_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_cas_inc_dec_32();
- test_atomic_cas_inc_dec_64();
-
- thread_finalize(per_thread_mem);
-
- return CU_get_number_of_failures();
-}
-
-static int test_atomic_xchg_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_xchg_32();
- test_atomic_xchg_64();
-
- thread_finalize(per_thread_mem);
-
- return CU_get_number_of_failures();
-}
-
-static int test_atomic_non_relaxed_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_non_relaxed_32();
- test_atomic_non_relaxed_64();
-
- thread_finalize(per_thread_mem);
-
- return CU_get_number_of_failures();
-}
-
-static void test_atomic_functional(int func_ptr(void *), int check)
-{
- pthrd_arg arg;
-
- arg.numthrds = global_mem->g_num_threads;
- test_atomic_init();
- test_atomic_store();
- odp_cunit_thread_create(func_ptr, &arg);
- odp_cunit_thread_exit(&arg);
- test_atomic_validate(check);
-}
-
-void atomic_test_atomic_inc_dec(void)
-{
- test_atomic_functional(test_atomic_inc_dec_thread, 0);
-}
-
-void atomic_test_atomic_add_sub(void)
-{
- test_atomic_functional(test_atomic_add_sub_thread, 0);
-}
-
-void atomic_test_atomic_fetch_inc_dec(void)
-{
- test_atomic_functional(test_atomic_fetch_inc_dec_thread, 0);
-}
-
-void atomic_test_atomic_fetch_add_sub(void)
-{
- test_atomic_functional(test_atomic_fetch_add_sub_thread, 0);
-}
-
-void atomic_test_atomic_max_min(void)
-{
- test_atomic_functional(test_atomic_max_min_thread, CHECK_MAX_MIN);
-}
-
-void atomic_test_atomic_cas_inc_dec(void)
-{
- test_atomic_functional(test_atomic_cas_inc_dec_thread, 0);
-}
-
-void atomic_test_atomic_xchg(void)
-{
- test_atomic_functional(test_atomic_xchg_thread, CHECK_XCHG);
-}
-
-void atomic_test_atomic_non_relaxed(void)
-{
- test_atomic_functional(test_atomic_non_relaxed_thread,
- CHECK_MAX_MIN | CHECK_XCHG);
-}
-
-void atomic_test_atomic_op_lock_free(void)
-{
- odp_atomic_op_t atomic_op;
- int ret_null, ret;
-
- memset(&atomic_op, 0xff, sizeof(odp_atomic_op_t));
- atomic_op.all_bits = 0;
-
- CU_ASSERT(atomic_op.all_bits == 0);
- CU_ASSERT(atomic_op.op.init == 0);
- CU_ASSERT(atomic_op.op.load == 0);
- CU_ASSERT(atomic_op.op.store == 0);
- CU_ASSERT(atomic_op.op.fetch_add == 0);
- CU_ASSERT(atomic_op.op.add == 0);
- CU_ASSERT(atomic_op.op.fetch_sub == 0);
- CU_ASSERT(atomic_op.op.sub == 0);
- CU_ASSERT(atomic_op.op.fetch_inc == 0);
- CU_ASSERT(atomic_op.op.inc == 0);
- CU_ASSERT(atomic_op.op.fetch_dec == 0);
- CU_ASSERT(atomic_op.op.dec == 0);
- CU_ASSERT(atomic_op.op.min == 0);
- CU_ASSERT(atomic_op.op.max == 0);
- CU_ASSERT(atomic_op.op.cas == 0);
- CU_ASSERT(atomic_op.op.xchg == 0);
-
- /* Test setting first, last and couple of other bits */
- atomic_op.op.init = 1;
- CU_ASSERT(atomic_op.op.init == 1);
- CU_ASSERT(atomic_op.all_bits != 0);
- atomic_op.op.init = 0;
- CU_ASSERT(atomic_op.all_bits == 0);
-
- atomic_op.op.xchg = 1;
- CU_ASSERT(atomic_op.op.xchg == 1);
- CU_ASSERT(atomic_op.all_bits != 0);
- atomic_op.op.xchg = 0;
- CU_ASSERT(atomic_op.all_bits == 0);
-
- atomic_op.op.add = 1;
- CU_ASSERT(atomic_op.op.add == 1);
- CU_ASSERT(atomic_op.all_bits != 0);
- atomic_op.op.add = 0;
- CU_ASSERT(atomic_op.all_bits == 0);
-
- atomic_op.op.dec = 1;
- CU_ASSERT(atomic_op.op.dec == 1);
- CU_ASSERT(atomic_op.all_bits != 0);
- atomic_op.op.dec = 0;
- CU_ASSERT(atomic_op.all_bits == 0);
-
- memset(&atomic_op, 0xff, sizeof(odp_atomic_op_t));
- ret = odp_atomic_lock_free_u64(&atomic_op);
- ret_null = odp_atomic_lock_free_u64(NULL);
-
- CU_ASSERT(ret == ret_null);
-
- /* Init operation is not atomic by the spec. Call to
- * odp_atomic_lock_free_u64() zeros it but never sets it. */
-
- if (ret == 0) {
- /* none are lock free */
- CU_ASSERT(atomic_op.all_bits == 0);
- CU_ASSERT(atomic_op.op.init == 0);
- CU_ASSERT(atomic_op.op.load == 0);
- CU_ASSERT(atomic_op.op.store == 0);
- CU_ASSERT(atomic_op.op.fetch_add == 0);
- CU_ASSERT(atomic_op.op.add == 0);
- CU_ASSERT(atomic_op.op.fetch_sub == 0);
- CU_ASSERT(atomic_op.op.sub == 0);
- CU_ASSERT(atomic_op.op.fetch_inc == 0);
- CU_ASSERT(atomic_op.op.inc == 0);
- CU_ASSERT(atomic_op.op.fetch_dec == 0);
- CU_ASSERT(atomic_op.op.dec == 0);
- CU_ASSERT(atomic_op.op.min == 0);
- CU_ASSERT(atomic_op.op.max == 0);
- CU_ASSERT(atomic_op.op.cas == 0);
- CU_ASSERT(atomic_op.op.xchg == 0);
- }
-
- if (ret == 1) {
- /* some are lock free */
- CU_ASSERT(atomic_op.all_bits != 0);
- CU_ASSERT(atomic_op.op.init == 0);
- }
-
- if (ret == 2) {
- /* all are lock free */
- CU_ASSERT(atomic_op.all_bits != 0);
- CU_ASSERT(atomic_op.op.init == 0);
- CU_ASSERT(atomic_op.op.load == 1);
- CU_ASSERT(atomic_op.op.store == 1);
- CU_ASSERT(atomic_op.op.fetch_add == 1);
- CU_ASSERT(atomic_op.op.add == 1);
- CU_ASSERT(atomic_op.op.fetch_sub == 1);
- CU_ASSERT(atomic_op.op.sub == 1);
- CU_ASSERT(atomic_op.op.fetch_inc == 1);
- CU_ASSERT(atomic_op.op.inc == 1);
- CU_ASSERT(atomic_op.op.fetch_dec == 1);
- CU_ASSERT(atomic_op.op.dec == 1);
- CU_ASSERT(atomic_op.op.min == 1);
- CU_ASSERT(atomic_op.op.max == 1);
- CU_ASSERT(atomic_op.op.cas == 1);
- CU_ASSERT(atomic_op.op.xchg == 1);
- }
-}
-
-odp_testinfo_t atomic_suite_atomic[] = {
- ODP_TEST_INFO(atomic_test_atomic_inc_dec),
- ODP_TEST_INFO(atomic_test_atomic_add_sub),
- ODP_TEST_INFO(atomic_test_atomic_fetch_inc_dec),
- ODP_TEST_INFO(atomic_test_atomic_fetch_add_sub),
- ODP_TEST_INFO(atomic_test_atomic_max_min),
- ODP_TEST_INFO(atomic_test_atomic_cas_inc_dec),
- ODP_TEST_INFO(atomic_test_atomic_xchg),
- ODP_TEST_INFO(atomic_test_atomic_non_relaxed),
- ODP_TEST_INFO(atomic_test_atomic_op_lock_free),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t atomic_suites[] = {
- {"atomic", NULL, NULL,
- atomic_suite_atomic},
- ODP_SUITE_INFO_NULL
-};
-
-int atomic_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- odp_cunit_register_global_init(atomic_init);
- odp_cunit_register_global_term(atomic_term);
-
- ret = odp_cunit_register(atomic_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/atomic/atomic.h b/test/common_plat/validation/api/atomic/atomic.h
deleted file mode 100644
index 66796c8e3..000000000
--- a/test/common_plat/validation/api/atomic/atomic.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_ATOMIC_H_
-#define _ODP_TEST_ATOMIC_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void atomic_test_atomic_inc_dec(void);
-void atomic_test_atomic_add_sub(void);
-void atomic_test_atomic_fetch_inc_dec(void);
-void atomic_test_atomic_fetch_add_sub(void);
-void atomic_test_atomic_max_min(void);
-void atomic_test_atomic_cas_inc_dec(void);
-void atomic_test_atomic_xchg(void);
-void atomic_test_atomic_non_relaxed(void);
-void atomic_test_atomic_op_lock_free(void);
-
-/* test arrays: */
-extern odp_testinfo_t atomic_suite_atomic[];
-
-/* test array init/term functions: */
-int atomic_suite_init(void);
-
-/* test registry: */
-extern odp_suiteinfo_t atomic_suites[];
-
-/* executable init/term functions: */
-int atomic_init(odp_instance_t *inst);
-int atomic_term(odp_instance_t inst);
-
-/* main test program: */
-int atomic_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/atomic/atomic_main.c b/test/common_plat/validation/api/atomic/atomic_main.c
deleted file mode 100644
index db035373e..000000000
--- a/test/common_plat/validation/api/atomic/atomic_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "atomic.h"
-
-int main(int argc, char *argv[])
-{
- return atomic_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/barrier/Makefile.am b/test/common_plat/validation/api/barrier/Makefile.am
deleted file mode 100644
index 8fc632c27..000000000
--- a/test/common_plat/validation/api/barrier/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestbarrier.la
-libtestbarrier_la_SOURCES = barrier.c
-
-test_PROGRAMS = barrier_main$(EXEEXT)
-dist_barrier_main_SOURCES = barrier_main.c
-barrier_main_LDADD = libtestbarrier.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = barrier.h
diff --git a/test/common_plat/validation/api/barrier/barrier.h b/test/common_plat/validation/api/barrier/barrier.h
deleted file mode 100644
index 188bcb8fa..000000000
--- a/test/common_plat/validation/api/barrier/barrier.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_BARRIER_H_
-#define _ODP_TEST_BARRIER_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void barrier_test_memory_barrier(void);
-void barrier_test_no_barrier_functional(void);
-void barrier_test_barrier_functional(void);
-
-/* test arrays: */
-extern odp_testinfo_t barrier_suite_barrier[];
-
-/* test registry: */
-extern odp_suiteinfo_t barrier_suites[];
-
-/* executable init/term functions: */
-int barrier_init(odp_instance_t *inst);
-int barrier_term(odp_instance_t inst);
-
-/* main test program: */
-int barrier_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/barrier/barrier_main.c b/test/common_plat/validation/api/barrier/barrier_main.c
deleted file mode 100644
index 064decf6c..000000000
--- a/test/common_plat/validation/api/barrier/barrier_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "barrier.h"
-
-int main(int argc, char *argv[])
-{
- return barrier_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/buffer/Makefile.am b/test/common_plat/validation/api/buffer/Makefile.am
deleted file mode 100644
index add2a3419..000000000
--- a/test/common_plat/validation/api/buffer/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestbuffer.la
-libtestbuffer_la_SOURCES = buffer.c
-
-test_PROGRAMS = buffer_main$(EXEEXT)
-dist_buffer_main_SOURCES = buffer_main.c
-buffer_main_LDADD = libtestbuffer.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = buffer.h
diff --git a/test/common_plat/validation/api/buffer/buffer.c b/test/common_plat/validation/api/buffer/buffer.c
deleted file mode 100644
index 7c723d4f4..000000000
--- a/test/common_plat/validation/api/buffer/buffer.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_api.h>
-#include "odp_cunit_common.h"
-#include "buffer.h"
-
-#define BUF_ALIGN ODP_CACHE_LINE_SIZE
-#define BUF_SIZE 1500
-
-static odp_pool_t raw_pool;
-static odp_buffer_t raw_buffer = ODP_BUFFER_INVALID;
-
-int buffer_suite_init(void)
-{
- odp_pool_param_t params;
-
- odp_pool_param_init(&params);
- params.type = ODP_POOL_BUFFER;
- params.buf.size = BUF_SIZE;
- params.buf.align = BUF_ALIGN;
- params.buf.num = 100;
-
- raw_pool = odp_pool_create("raw_pool", &params);
- if (raw_pool == ODP_POOL_INVALID)
- return -1;
- raw_buffer = odp_buffer_alloc(raw_pool);
- if (raw_buffer == ODP_BUFFER_INVALID)
- return -1;
- return 0;
-}
-
-int buffer_suite_term(void)
-{
- odp_buffer_free(raw_buffer);
- if (odp_pool_destroy(raw_pool) != 0)
- return -1;
- return 0;
-}
-
-void buffer_test_pool_alloc(void)
-{
- odp_pool_t pool;
- const int num = 3;
- odp_buffer_t buffer[num];
- odp_event_t ev;
- int index;
- char wrong_type = 0, wrong_size = 0, wrong_align = 0;
- odp_pool_param_t params;
-
- odp_pool_param_init(&params);
- params.type = ODP_POOL_BUFFER;
- params.buf.size = BUF_SIZE;
- params.buf.align = BUF_ALIGN;
- params.buf.num = num;
-
- pool = odp_pool_create("buffer_pool_alloc", &params);
- odp_pool_print(pool);
-
- /* Try to allocate num items from the pool */
- for (index = 0; index < num; index++) {
- uintptr_t addr;
-
- buffer[index] = odp_buffer_alloc(pool);
-
- if (buffer[index] == ODP_BUFFER_INVALID)
- break;
-
- ev = odp_buffer_to_event(buffer[index]);
- if (odp_event_type(ev) != ODP_EVENT_BUFFER)
- wrong_type = 1;
- if (odp_buffer_size(buffer[index]) < BUF_SIZE)
- wrong_size = 1;
-
- addr = (uintptr_t)odp_buffer_addr(buffer[index]);
-
- if ((addr % BUF_ALIGN) != 0)
- wrong_align = 1;
-
- if (wrong_type || wrong_size || wrong_align)
- odp_buffer_print(buffer[index]);
- }
-
- /* Check that the pool had at least num items */
- CU_ASSERT(index == num);
- /* index points out of buffer[] or it point to an invalid buffer */
- index--;
-
- /* Check that the pool had correct buffers */
- CU_ASSERT(wrong_type == 0);
- CU_ASSERT(wrong_size == 0);
- CU_ASSERT(wrong_align == 0);
-
- for (; index >= 0; index--)
- odp_buffer_free(buffer[index]);
-
- CU_ASSERT(odp_pool_destroy(pool) == 0);
-}
-
-/* Wrapper to call odp_buffer_alloc_multi multiple times until
- * either no mure buffers are returned, or num buffers were alloced */
-static int buffer_alloc_multi(odp_pool_t pool, odp_buffer_t buffer[], int num)
-{
- int ret, total = 0;
-
- do {
- ret = odp_buffer_alloc_multi(pool, buffer + total, num - total);
- CU_ASSERT(ret >= 0);
- CU_ASSERT(ret <= num - total);
- total += ret;
- } while (total < num && ret);
-
- return total;
-}
-
-void buffer_test_pool_alloc_multi(void)
-{
- odp_pool_t pool;
- const int num = 3;
- odp_buffer_t buffer[num + 1];
- odp_event_t ev;
- int index;
- char wrong_type = 0, wrong_size = 0, wrong_align = 0;
- odp_pool_param_t params;
-
- odp_pool_param_init(&params);
- params.type = ODP_POOL_BUFFER;
- params.buf.size = BUF_SIZE;
- params.buf.align = BUF_ALIGN;
- params.buf.num = num;
-
- pool = odp_pool_create("buffer_pool_alloc_multi", &params);
- odp_pool_print(pool);
-
- /* Try to allocate num + 1 items from the pool */
- CU_ASSERT_FATAL(buffer_alloc_multi(pool, buffer, num + 1) == num);
-
- for (index = 0; index < num; index++) {
- uintptr_t addr;
-
- if (buffer[index] == ODP_BUFFER_INVALID)
- break;
-
- ev = odp_buffer_to_event(buffer[index]);
- if (odp_event_type(ev) != ODP_EVENT_BUFFER)
- wrong_type = 1;
- if (odp_buffer_size(buffer[index]) < BUF_SIZE)
- wrong_size = 1;
-
- addr = (uintptr_t)odp_buffer_addr(buffer[index]);
-
- if ((addr % BUF_ALIGN) != 0)
- wrong_align = 1;
-
- if (wrong_type || wrong_size || wrong_align)
- odp_buffer_print(buffer[index]);
- }
-
- /* Check that the pool had at least num items */
- CU_ASSERT(index == num);
-
- /* Check that the pool had correct buffers */
- CU_ASSERT(wrong_type == 0);
- CU_ASSERT(wrong_size == 0);
- CU_ASSERT(wrong_align == 0);
-
- odp_buffer_free_multi(buffer, num);
-
- CU_ASSERT(odp_pool_destroy(pool) == 0);
-}
-
-void buffer_test_pool_free(void)
-{
- odp_pool_t pool;
- odp_buffer_t buffer;
- odp_pool_param_t params;
-
- odp_pool_param_init(&params);
- params.type = ODP_POOL_BUFFER;
- params.buf.size = 64;
- params.buf.align = BUF_ALIGN;
- params.buf.num = 1;
-
- pool = odp_pool_create("buffer_pool_free", &params);
-
- /* Allocate the only buffer from the pool */
- buffer = odp_buffer_alloc(pool);
- CU_ASSERT_FATAL(buffer != ODP_BUFFER_INVALID);
-
- /* Pool should have only one buffer */
- CU_ASSERT_FATAL(odp_buffer_alloc(pool) == ODP_BUFFER_INVALID)
-
- odp_buffer_free(buffer);
-
- /* Check that the buffer was returned back to the pool */
- buffer = odp_buffer_alloc(pool);
- CU_ASSERT_FATAL(buffer != ODP_BUFFER_INVALID);
-
- odp_buffer_free(buffer);
- CU_ASSERT(odp_pool_destroy(pool) == 0);
-}
-
-void buffer_test_pool_free_multi(void)
-{
- odp_pool_t pool[2];
- odp_buffer_t buffer[4];
- odp_buffer_t buf_inval[2];
- odp_pool_param_t params;
-
- odp_pool_param_init(&params);
- params.type = ODP_POOL_BUFFER;
- params.buf.size = 64;
- params.buf.align = BUF_ALIGN;
- params.buf.num = 2;
-
- pool[0] = odp_pool_create("buffer_pool_free_multi_0", &params);
- pool[1] = odp_pool_create("buffer_pool_free_multi_1", &params);
- CU_ASSERT_FATAL(pool[0] != ODP_POOL_INVALID);
- CU_ASSERT_FATAL(pool[1] != ODP_POOL_INVALID);
-
- /* Allocate all the buffers from the pools */
- CU_ASSERT_FATAL(buffer_alloc_multi(pool[0], &buffer[0], 2) == 2);
- CU_ASSERT_FATAL(buffer_alloc_multi(pool[1], &buffer[2], 2) == 2);
-
- /* Pools should have no more buffer */
- CU_ASSERT(odp_buffer_alloc_multi(pool[0], buf_inval, 2) == 0);
- CU_ASSERT(odp_buffer_alloc_multi(pool[1], buf_inval, 2) == 0);
-
- /* Try to free both buffers from both pools at once */
- odp_buffer_free_multi(buffer, 4);
-
- /* Check that all buffers were returned back to the pools */
- CU_ASSERT_FATAL(buffer_alloc_multi(pool[0], &buffer[0], 2) == 2);
- CU_ASSERT_FATAL(buffer_alloc_multi(pool[1], &buffer[2], 2) == 2);
-
- odp_buffer_free_multi(buffer, 4);
- CU_ASSERT(odp_pool_destroy(pool[0]) == 0);
- CU_ASSERT(odp_pool_destroy(pool[1]) == 0);
-}
-
-void buffer_test_management_basic(void)
-{
- odp_event_t ev = odp_buffer_to_event(raw_buffer);
-
- CU_ASSERT(odp_buffer_is_valid(raw_buffer) == 1);
- CU_ASSERT(odp_buffer_pool(raw_buffer) != ODP_POOL_INVALID);
- CU_ASSERT(odp_event_type(ev) == ODP_EVENT_BUFFER);
- CU_ASSERT(odp_buffer_size(raw_buffer) >= BUF_SIZE);
- CU_ASSERT(odp_buffer_addr(raw_buffer) != NULL);
- odp_buffer_print(raw_buffer);
- CU_ASSERT(odp_buffer_to_u64(raw_buffer) !=
- odp_buffer_to_u64(ODP_BUFFER_INVALID));
- CU_ASSERT(odp_event_to_u64(ev) != odp_event_to_u64(ODP_EVENT_INVALID));
-}
-
-odp_testinfo_t buffer_suite[] = {
- ODP_TEST_INFO(buffer_test_pool_alloc),
- ODP_TEST_INFO(buffer_test_pool_free),
- ODP_TEST_INFO(buffer_test_pool_alloc_multi),
- ODP_TEST_INFO(buffer_test_pool_free_multi),
- ODP_TEST_INFO(buffer_test_management_basic),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t buffer_suites[] = {
- {"buffer tests", buffer_suite_init, buffer_suite_term, buffer_suite},
- ODP_SUITE_INFO_NULL,
-};
-
-int buffer_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- ret = odp_cunit_register(buffer_suites);
-
- if (ret == 0)
- odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/buffer/buffer.h b/test/common_plat/validation/api/buffer/buffer.h
deleted file mode 100644
index 48331e3f1..000000000
--- a/test/common_plat/validation/api/buffer/buffer.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_BUFFER_H_
-#define _ODP_TEST_BUFFER_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void buffer_test_pool_alloc(void);
-void buffer_test_pool_free(void);
-void buffer_test_pool_alloc_multi(void);
-void buffer_test_pool_free_multi(void);
-void buffer_test_management_basic(void);
-
-/* test arrays: */
-extern odp_testinfo_t buffer_suite[];
-
-/* test array init/term functions: */
-int buffer_suite_init(void);
-int buffer_suite_term(void);
-
-/* test registry: */
-extern odp_suiteinfo_t buffer_suites[];
-
-/* main test program: */
-int buffer_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/buffer/buffer_main.c b/test/common_plat/validation/api/buffer/buffer_main.c
deleted file mode 100644
index 47168f8b9..000000000
--- a/test/common_plat/validation/api/buffer/buffer_main.c
+++ /dev/null
@@ -1,11 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#include "buffer.h"
-
-int main(int argc, char *argv[])
-{
- return buffer_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/classification/Makefile.am b/test/common_plat/validation/api/classification/Makefile.am
deleted file mode 100644
index df382c51f..000000000
--- a/test/common_plat/validation/api/classification/Makefile.am
+++ /dev/null
@@ -1,14 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestclassification.la
-libtestclassification_la_SOURCES = odp_classification_basic.c \
- odp_classification_tests.c \
- odp_classification_test_pmr.c \
- odp_classification_common.c \
- classification.c
-
-test_PROGRAMS = classification_main$(EXEEXT)
-dist_classification_main_SOURCES = classification_main.c
-classification_main_LDADD = libtestclassification.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = classification.h odp_classification_testsuites.h
diff --git a/test/common_plat/validation/api/classification/classification.c b/test/common_plat/validation/api/classification/classification.c
deleted file mode 100644
index 1032e7f1f..000000000
--- a/test/common_plat/validation/api/classification/classification.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_api.h>
-#include <odp_cunit_common.h>
-#include "odp_classification_testsuites.h"
-#include "classification.h"
-
-odp_suiteinfo_t classification_suites[] = {
- { .pName = "classification basic",
- .pTests = classification_suite_basic,
- },
- { .pName = "classification pmr tests",
- .pTests = classification_suite_pmr,
- .pInitFunc = classification_suite_pmr_init,
- .pCleanupFunc = classification_suite_pmr_term,
- },
- { .pName = "classification tests",
- .pTests = classification_suite,
- .pInitFunc = classification_suite_init,
- .pCleanupFunc = classification_suite_term,
- },
- ODP_SUITE_INFO_NULL,
-};
-
-int classification_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- ret = odp_cunit_register(classification_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/classification/classification_main.c b/test/common_plat/validation/api/classification/classification_main.c
deleted file mode 100644
index 8902463c2..000000000
--- a/test/common_plat/validation/api/classification/classification_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "classification.h"
-
-int main(int argc, char *argv[])
-{
- return classification_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/classification/odp_classification_basic.c b/test/common_plat/validation/api/classification/odp_classification_basic.c
deleted file mode 100644
index 9817287e9..000000000
--- a/test/common_plat/validation/api/classification/odp_classification_basic.c
+++ /dev/null
@@ -1,330 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_cunit_common.h>
-#include "odp_classification_testsuites.h"
-#include "classification.h"
-
-#define PMR_SET_NUM 5
-
-void classification_test_create_cos(void)
-{
- odp_cos_t cos;
- odp_cls_cos_param_t cls_param;
- odp_pool_t pool;
- odp_queue_t queue;
-
- pool = pool_create("cls_basic_pool");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- queue = queue_create("cls_basic_queue", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(NULL, &cls_param);
- CU_ASSERT(odp_cos_to_u64(cos) != odp_cos_to_u64(ODP_COS_INVALID));
- odp_cos_destroy(cos);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
-}
-
-void classification_test_destroy_cos(void)
-{
- odp_cos_t cos;
- char name[ODP_COS_NAME_LEN];
- odp_pool_t pool;
- odp_queue_t queue;
- odp_cls_cos_param_t cls_param;
- int retval;
-
- pool = pool_create("cls_basic_pool");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- queue = queue_create("cls_basic_queue", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- sprintf(name, "ClassOfService");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(name, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
- retval = odp_cos_destroy(cos);
- CU_ASSERT(retval == 0);
- retval = odp_cos_destroy(ODP_COS_INVALID);
- CU_ASSERT(retval < 0);
-
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
-}
-
-void classification_test_create_pmr_match(void)
-{
- odp_pmr_t pmr;
- uint16_t val;
- uint16_t mask;
- int retval;
- odp_pmr_param_t pmr_param;
- odp_cos_t default_cos;
- odp_cos_t cos;
- odp_queue_t default_queue;
- odp_queue_t queue;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t pkt_pool;
- odp_cls_cos_param_t cls_param;
- odp_pktio_t pktio;
-
- pkt_pool = pool_create("pkt_pool");
- CU_ASSERT_FATAL(pkt_pool != ODP_POOL_INVALID);
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("pmr_match", true);
- CU_ASSERT(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("pmr_match");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create("pmr_match", &cls_param);
- CU_ASSERT(cos != ODP_COS_INVALID);
-
- val = 1024;
- mask = 0xffff;
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = find_first_supported_l3_pmr();
- pmr_param.range_term = false;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(val);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVAL);
- CU_ASSERT(odp_pmr_to_u64(pmr) != odp_pmr_to_u64(ODP_PMR_INVAL));
- /* destroy the created PMR */
- retval = odp_cls_pmr_destroy(pmr);
- CU_ASSERT(retval == 0);
-
- /* destroy an INVALID PMR */
- retval = odp_cls_pmr_destroy(ODP_PMR_INVAL);
- CU_ASSERT(retval < 0);
-
- odp_queue_destroy(queue);
- odp_pool_destroy(pool);
- odp_pool_destroy(pkt_pool);
- odp_cos_destroy(cos);
- odp_queue_destroy(default_queue);
- odp_pool_destroy(default_pool);
- odp_cos_destroy(default_cos);
- odp_pktio_close(pktio);
-}
-
-void classification_test_cos_set_queue(void)
-{
- int retval;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
- odp_pool_t pool;
- odp_queue_t queue;
- odp_queue_t queue_cos;
- odp_cos_t cos_queue;
- odp_queue_t recvqueue;
-
- pool = pool_create("cls_basic_pool");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- queue = queue_create("cls_basic_queue", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- sprintf(cosname, "CoSQueue");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
- cos_queue = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos_queue != ODP_COS_INVALID);
-
- queue_cos = queue_create("QueueCoS", true);
- CU_ASSERT_FATAL(queue_cos != ODP_QUEUE_INVALID);
-
- retval = odp_cos_queue_set(cos_queue, queue_cos);
- CU_ASSERT(retval == 0);
- recvqueue = odp_cos_queue(cos_queue);
- CU_ASSERT(recvqueue == queue_cos);
-
- odp_cos_destroy(cos_queue);
- odp_queue_destroy(queue_cos);
- odp_queue_destroy(queue);
- odp_pool_destroy(pool);
-}
-
-void classification_test_cos_set_pool(void)
-{
- int retval;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
- odp_pool_t pool;
- odp_queue_t queue;
- odp_pool_t cos_pool;
- odp_cos_t cos;
- odp_pool_t recvpool;
-
- pool = pool_create("cls_basic_pool");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- queue = queue_create("cls_basic_queue", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- sprintf(cosname, "CoSQueue");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- cos_pool = pool_create("PoolCoS");
- CU_ASSERT_FATAL(cos_pool != ODP_POOL_INVALID);
-
- retval = odp_cls_cos_pool_set(cos, cos_pool);
- CU_ASSERT(retval == 0);
- recvpool = odp_cls_cos_pool(cos);
- CU_ASSERT(recvpool == cos_pool);
-
- odp_cos_destroy(cos);
- odp_queue_destroy(queue);
- odp_pool_destroy(pool);
- odp_pool_destroy(cos_pool);
-}
-
-void classification_test_cos_set_drop(void)
-{
- int retval;
- char cosname[ODP_COS_NAME_LEN];
- odp_cos_t cos_drop;
- odp_queue_t queue;
- odp_pool_t pool;
- odp_cls_cos_param_t cls_param;
-
- pool = pool_create("cls_basic_pool");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- queue = queue_create("cls_basic_queue", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- sprintf(cosname, "CoSDrop");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
- cos_drop = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos_drop != ODP_COS_INVALID);
-
- retval = odp_cos_drop_set(cos_drop, ODP_COS_DROP_POOL);
- CU_ASSERT(retval == 0);
- CU_ASSERT(ODP_COS_DROP_POOL == odp_cos_drop(cos_drop));
-
- retval = odp_cos_drop_set(cos_drop, ODP_COS_DROP_NEVER);
- CU_ASSERT(retval == 0);
- CU_ASSERT(ODP_COS_DROP_NEVER == odp_cos_drop(cos_drop));
- odp_cos_destroy(cos_drop);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
-}
-
-void classification_test_pmr_composite_create(void)
-{
- odp_pmr_t pmr_composite;
- int retval;
- odp_pmr_param_t pmr_terms[PMR_SET_NUM];
- odp_cos_t default_cos;
- odp_cos_t cos;
- odp_queue_t default_queue;
- odp_queue_t queue;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t pkt_pool;
- odp_cls_cos_param_t cls_param;
- odp_pktio_t pktio;
- uint16_t val = 1024;
- uint16_t mask = 0xffff;
- int i;
-
- pkt_pool = pool_create("pkt_pool");
- CU_ASSERT_FATAL(pkt_pool != ODP_POOL_INVALID);
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("pmr_match", true);
- CU_ASSERT(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("pmr_match");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create("pmr_match", &cls_param);
- CU_ASSERT(cos != ODP_COS_INVALID);
-
- for (i = 0; i < PMR_SET_NUM; i++) {
- odp_cls_pmr_param_init(&pmr_terms[i]);
- pmr_terms[i].term = ODP_PMR_TCP_DPORT;
- pmr_terms[i].match.value = &val;
- pmr_terms[i].range_term = false;
- pmr_terms[i].match.mask = &mask;
- pmr_terms[i].val_sz = sizeof(val);
- }
-
- pmr_composite = odp_cls_pmr_create(pmr_terms, PMR_SET_NUM,
- default_cos, cos);
- CU_ASSERT(odp_pmr_to_u64(pmr_composite) !=
- odp_pmr_to_u64(ODP_PMR_INVAL));
-
- retval = odp_cls_pmr_destroy(pmr_composite);
- CU_ASSERT(retval == 0);
-
- odp_queue_destroy(queue);
- odp_pool_destroy(pool);
- odp_pool_destroy(pkt_pool);
- odp_cos_destroy(cos);
- odp_queue_destroy(default_queue);
- odp_pool_destroy(default_pool);
- odp_cos_destroy(default_cos);
- odp_pktio_close(pktio);
-}
-
-odp_testinfo_t classification_suite_basic[] = {
- ODP_TEST_INFO(classification_test_create_cos),
- ODP_TEST_INFO(classification_test_destroy_cos),
- ODP_TEST_INFO(classification_test_create_pmr_match),
- ODP_TEST_INFO(classification_test_cos_set_queue),
- ODP_TEST_INFO(classification_test_cos_set_drop),
- ODP_TEST_INFO(classification_test_cos_set_pool),
- ODP_TEST_INFO(classification_test_pmr_composite_create),
- ODP_TEST_INFO_NULL,
-};
diff --git a/test/common_plat/validation/api/classification/odp_classification_test_pmr.c b/test/common_plat/validation/api/classification/odp_classification_test_pmr.c
deleted file mode 100644
index d95242055..000000000
--- a/test/common_plat/validation/api/classification/odp_classification_test_pmr.c
+++ /dev/null
@@ -1,1864 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "odp_classification_testsuites.h"
-#include "classification.h"
-#include <odp_cunit_common.h>
-
-static odp_pool_t pkt_pool;
-/** sequence number of IP packets */
-odp_atomic_u32_t seq;
-
-static cls_packet_info_t default_pkt_info;
-
-int classification_suite_pmr_init(void)
-{
- pkt_pool = pool_create("classification_pmr_pool");
- if (ODP_POOL_INVALID == pkt_pool) {
- fprintf(stderr, "Packet pool creation failed.\n");
- return -1;
- }
-
- memset(&default_pkt_info, 0, sizeof(cls_packet_info_t));
- default_pkt_info.pool = pkt_pool;
- default_pkt_info.seq = &seq;
-
- odp_atomic_init_u32(&seq, 0);
-
- return 0;
-}
-
-static int start_pktio(odp_pktio_t pktio)
-{
- if (odp_pktio_start(pktio)) {
- fprintf(stderr, "unable to start loop\n");
- return -1;
- }
-
- return 0;
-}
-
-void configure_default_cos(odp_pktio_t pktio, odp_cos_t *cos,
- odp_queue_t *queue, odp_pool_t *pool)
-{
- odp_cls_cos_param_t cls_param;
- odp_pool_t default_pool;
- odp_cos_t default_cos;
- odp_queue_t default_queue;
- int retval;
- char cosname[ODP_COS_NAME_LEN];
-
- default_pool = pool_create("DefaultPool");
- CU_ASSERT(default_pool != ODP_POOL_INVALID);
-
- default_queue = queue_create("DefaultQueue", true);
- CU_ASSERT(default_queue != ODP_QUEUE_INVALID);
-
- sprintf(cosname, "DefaultCos");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = default_pool;
- cls_param.queue = default_queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- default_cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT(default_cos != ODP_COS_INVALID);
-
- retval = odp_pktio_default_cos_set(pktio, default_cos);
- CU_ASSERT(retval == 0);
-
- *cos = default_cos;
- *queue = default_queue;
- *pool = default_pool;
-}
-
-int classification_suite_pmr_term(void)
-{
- int retcode = 0;
-
- if (0 != odp_pool_destroy(pkt_pool)) {
- fprintf(stderr, "pkt_pool destroy failed.\n");
- retcode = -1;
- }
-
- return retcode;
-}
-
-void classification_test_pmr_term_tcp_dport(void)
-{
- odp_packet_t pkt;
- odph_tcphdr_t *tcp;
- uint32_t seqno;
- uint16_t val;
- uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
- odp_pool_t pool;
- odp_pool_t pool_recv;
- odp_pmr_param_t pmr_param;
- odph_ethhdr_t *eth;
- val = CLS_DEFAULT_DPORT;
- mask = 0xffff;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("tcp_dport1", true);
- CU_ASSERT(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("tcp_dport1");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "tcp_dport");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT(cos != ODP_COS_INVALID);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_TCP_DPORT;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(val);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVAL);
-
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- pool_recv = odp_packet_pool(pkt);
- CU_ASSERT(pool == pool_recv);
- CU_ASSERT(retqueue == queue);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
-
- odp_packet_free(pkt);
-
- /* Other packets are delivered to default queue */
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
-
- odp_packet_free(pkt);
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- stop_pktio(pktio);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pool_destroy(pool);
- odp_pool_destroy(default_pool);
- odp_pktio_close(pktio);
-}
-
-void classification_test_pmr_term_tcp_sport(void)
-{
- odp_packet_t pkt;
- odph_tcphdr_t *tcp;
- uint32_t seqno;
- uint16_t val;
- uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
- odp_pmr_param_t pmr_param;
- odph_ethhdr_t *eth;
-
- val = CLS_DEFAULT_SPORT;
- mask = 0xffff;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("tcp_sport", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("tcp_sport");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "tcp_sport");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_TCP_SPORT;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(val);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVAL);
-
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == queue);
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- odp_packet_free(pkt);
-
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
-
- odp_packet_free(pkt);
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
-}
-
-void classification_test_pmr_term_udp_dport(void)
-{
- odp_packet_t pkt;
- odph_udphdr_t *udp;
- uint32_t seqno;
- uint16_t val;
- uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_pmr_param_t pmr_param;
- odp_cls_cos_param_t cls_param;
- odph_ethhdr_t *eth;
- cls_packet_info_t pkt_info;
-
- val = CLS_DEFAULT_DPORT;
- mask = 0xffff;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("udp_dport", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("udp_dport");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "udp_dport");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_UDP_DPORT;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(val);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVAL);
-
- pkt_info = default_pkt_info;
- pkt_info.udp = true;
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == queue);
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- odp_packet_free(pkt);
-
- /* Other packets received in default queue */
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
-
- odp_packet_free(pkt);
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- stop_pktio(pktio);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_pktio_close(pktio);
-}
-
-void classification_test_pmr_term_udp_sport(void)
-{
- odp_packet_t pkt;
- odph_udphdr_t *udp;
- uint32_t seqno;
- uint16_t val;
- uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_pmr_param_t pmr_param;
- odp_cls_cos_param_t cls_param;
- odph_ethhdr_t *eth;
- cls_packet_info_t pkt_info;
-
- val = CLS_DEFAULT_SPORT;
- mask = 0xffff;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("udp_sport", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("udp_sport");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "udp_sport");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_UDP_SPORT;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(val);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVAL);
-
- pkt_info = default_pkt_info;
- pkt_info.udp = true;
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == queue);
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- odp_packet_free(pkt);
-
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- odp_packet_free(pkt);
-
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
-}
-
-void classification_test_pmr_term_ipproto(void)
-{
- odp_packet_t pkt;
- uint32_t seqno;
- uint8_t val;
- uint8_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
- odp_pmr_param_t pmr_param;
- odph_ethhdr_t *eth;
- cls_packet_info_t pkt_info;
-
- val = ODPH_IPPROTO_UDP;
- mask = 0xff;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("ipproto", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("ipproto");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "ipproto");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_IPPROTO;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(val);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVAL);
-
- pkt_info = default_pkt_info;
- pkt_info.udp = true;
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- /* Other packets delivered to default queue */
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- CU_ASSERT(retqueue == default_queue);
-
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
-}
-
-void classification_test_pmr_term_dmac(void)
-{
- odp_packet_t pkt;
- uint32_t seqno;
- uint64_t val;
- uint64_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
- odp_pmr_param_t pmr_param;
- odph_ethhdr_t *eth;
- cls_packet_info_t pkt_info;
-
- val = CLS_DEFAULT_DMAC; /* 48 bit Ethernet Mac address */
- mask = 0xffffffffffff;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("dmac", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("dmac");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "dmac");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_DMAC;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = ODPH_ETHADDR_LEN;
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVAL);
-
- pkt_info = default_pkt_info;
- pkt_info.udp = true;
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- /* Other packets delivered to default queue */
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- memset(eth->dst.addr, 0, ODPH_ETHADDR_LEN);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- CU_ASSERT(retqueue == default_queue);
-
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
-}
-
-void classification_test_pmr_term_packet_len(void)
-{
- odp_packet_t pkt;
- uint32_t seqno;
- uint16_t val;
- uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
- odp_pmr_param_t pmr_param;
- odph_ethhdr_t *eth;
- cls_packet_info_t pkt_info;
-
- val = 1024;
- /*Mask value will match any packet of length 1000 - 1099*/
- mask = 0xff00;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("packet_len", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("packet_len");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "packet_len");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_LEN;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(val);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVAL);
-
- /* create packet of payload length 1024 */
- pkt_info = default_pkt_info;
- pkt_info.udp = true;
- pkt_info.len = 1024;
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- /* Other packets delivered to default queue */
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- CU_ASSERT(retqueue == default_queue);
-
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
-}
-
-void classification_test_pmr_term_vlan_id_0(void)
-{
- odp_packet_t pkt;
- uint32_t seqno;
- uint16_t val;
- uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
- odp_pmr_param_t pmr_param;
- odph_ethhdr_t *eth;
- odph_vlanhdr_t *vlan_0;
- cls_packet_info_t pkt_info;
-
- val = 1024;
- mask = 0xff00;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("vlan_id_0", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("vlan_id_0");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "vlan_id_0");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_VLAN_ID_0;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(val);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVAL);
-
- /* create packet of payload length 1024 */
- pkt_info = default_pkt_info;
- pkt_info.vlan = true;
- pkt_info.vlan_qinq = true;
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
- vlan_0 = (odph_vlanhdr_t *)(eth + 1);
- vlan_0->tci = odp_cpu_to_be_16(1024);
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- /* Other packets delivered to default queue */
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- CU_ASSERT(retqueue == default_queue);
-
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
-}
-
-void classification_test_pmr_term_vlan_id_x(void)
-{
- odp_packet_t pkt;
- uint32_t seqno;
- uint16_t val;
- uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
- odp_pmr_param_t pmr_param;
- odph_ethhdr_t *eth;
- odph_vlanhdr_t *vlan_x;
- cls_packet_info_t pkt_info;
-
- val = 1024;
- mask = 0xff00;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("vlan_id_x", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("vlan_id_x");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "vlan_id_x");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_VLAN_ID_X;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(val);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVAL);
-
- /* create packet of payload length 1024 */
- pkt_info = default_pkt_info;
- pkt_info.vlan = true;
- pkt_info.vlan_qinq = true;
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
- vlan_x = (odph_vlanhdr_t *)(eth + 1);
- vlan_x++;
- vlan_x->tci = odp_cpu_to_be_16(1024);
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- /* Other packets delivered to default queue */
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- CU_ASSERT(retqueue == default_queue);
-
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
-}
-
-void classification_test_pmr_term_eth_type_0(void)
-{
- odp_packet_t pkt;
- uint32_t seqno;
- uint16_t val;
- uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
- odp_pmr_param_t pmr_param;
- odph_ethhdr_t *eth;
- cls_packet_info_t pkt_info;
-
- val = 0x88A8;
- mask = 0xffff;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("eth_type_0", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("eth_type_0");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "eth_type_0");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_ETHTYPE_0;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(val);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVAL);
-
- pkt_info = default_pkt_info;
- pkt_info.vlan = true;
- pkt_info.vlan_qinq = true;
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- /* Other packets delivered to default queue */
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- CU_ASSERT(retqueue == default_queue);
-
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
-}
-
-void classification_test_pmr_term_eth_type_x(void)
-{
- odp_packet_t pkt;
- uint32_t seqno;
- uint16_t val;
- uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
- odp_pmr_param_t pmr_param;
- odph_ethhdr_t *eth;
- odph_vlanhdr_t *vlan_x;
- cls_packet_info_t pkt_info;
-
- val = 0x8100;
- mask = 0xff00;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("eth_type_x", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("eth_type_x");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "eth_type_x");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_ETHTYPE_X;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(val);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVAL);
-
- /* create packet of payload length 1024 */
- pkt_info = default_pkt_info;
- pkt_info.vlan = true;
- pkt_info.vlan_qinq = true;
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
- vlan_x = (odph_vlanhdr_t *)(eth + 1);
- vlan_x->tci = odp_cpu_to_be_16(1024);
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- /* Other packets delivered to default queue */
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- CU_ASSERT(retqueue == default_queue);
-
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
-}
-
-static void classification_test_pmr_pool_set(void)
-{
- odp_packet_t pkt;
- uint32_t seqno;
- uint8_t val;
- uint8_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t pool_new;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
- odp_pmr_param_t pmr_param;
- odph_ethhdr_t *eth;
- cls_packet_info_t pkt_info;
-
- val = ODPH_IPPROTO_UDP;
- mask = 0xff;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("ipproto1", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("ipproto1");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "ipproto1");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- pool_new = pool_create("ipproto2");
- CU_ASSERT_FATAL(pool_new != ODP_POOL_INVALID);
-
- /* new pool is set on CoS */
- retval = odp_cls_cos_pool_set(cos, pool_new);
- CU_ASSERT(retval == 0);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_IPPROTO;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(val);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVAL);
-
- pkt_info = default_pkt_info;
- pkt_info.udp = true;
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool_new);
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_pool_destroy(pool_new);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
-}
-
-static void classification_test_pmr_queue_set(void)
-{
- odp_packet_t pkt;
- uint32_t seqno;
- uint8_t val;
- uint8_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_queue_t queue_new;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
- odp_pmr_param_t pmr_param;
- odph_ethhdr_t *eth;
- cls_packet_info_t pkt_info;
-
- val = ODPH_IPPROTO_UDP;
- mask = 0xff;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("ipproto1", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("ipproto1");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "ipproto1");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- queue_new = queue_create("ipproto2", true);
- CU_ASSERT_FATAL(queue_new != ODP_QUEUE_INVALID);
-
- /* new queue is set on CoS */
- retval = odp_cos_queue_set(cos, queue_new);
- CU_ASSERT(retval == 0);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_IPPROTO;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(val);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVAL);
- pkt_info = default_pkt_info;
- pkt_info.udp = true;
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- CU_ASSERT(retqueue == queue_new);
- odp_packet_free(pkt);
-
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue_new);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
-}
-
-static void classification_test_pmr_term_daddr(void)
-{
- odp_packet_t pkt;
- uint32_t seqno;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_pool_t pool;
- odp_pool_t default_pool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- odp_cos_t default_cos;
- uint32_t addr;
- uint32_t mask;
- char cosname[ODP_QUEUE_NAME_LEN];
- odp_pmr_param_t pmr_param;
- odp_cls_cos_param_t cls_param;
- odph_ipv4hdr_t *ip;
- const char *dst_addr = "10.0.0.99/32";
- odph_ethhdr_t *eth;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("daddr", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("daddr");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "daddr");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- parse_ipv4_string(dst_addr, &addr, &mask);
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_DIP_ADDR;
- pmr_param.match.value = &addr;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(addr);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT_FATAL(pmr != ODP_PMR_INVAL);
-
- /* packet with dst ip address matching PMR rule to be
- received in the CoS queue*/
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
- ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
- ip->dst_addr = odp_cpu_to_be_32(addr);
- ip->chksum = odph_ipv4_csum_update(pkt);
-
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- /* Other packets delivered to default queue */
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
-
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
-}
-
-static void classification_test_pmr_term_ipv6daddr(void)
-{
- odp_packet_t pkt;
- uint32_t seqno;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_pool_t pool;
- odp_pool_t default_pool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- odp_cos_t default_cos;
- char cosname[ODP_QUEUE_NAME_LEN];
- odp_pmr_param_t pmr_param;
- odp_cls_cos_param_t cls_param;
- odph_ipv6hdr_t *ip;
- odph_ethhdr_t *eth;
- cls_packet_info_t pkt_info;
-
- uint8_t IPV6_DST_ADDR[ODPH_IPV6ADDR_LEN] = {
- /* I.e. ::ffff:10.1.1.100 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 10, 1, 1, 100
- };
- uint8_t ipv6_mask[ODPH_IPV6ADDR_LEN] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
- };
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("daddr", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("daddr");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "daddr");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_DIP6_ADDR;
- pmr_param.match.value = IPV6_DST_ADDR;
- pmr_param.match.mask = ipv6_mask;
- pmr_param.val_sz = ODPH_IPV6ADDR_LEN;
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT_FATAL(pmr != ODP_PMR_INVAL);
-
- /* packet with dst ip address matching PMR rule to be
- received in the CoS queue*/
- pkt_info = default_pkt_info;
- pkt_info.ipv6 = true;
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
- ip = (odph_ipv6hdr_t *)odp_packet_l3_ptr(pkt, NULL);
- memcpy(ip->dst_addr, IPV6_DST_ADDR, ODPH_IPV6ADDR_LEN);
-
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- /* Other packets delivered to default queue */
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
-
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
-}
-
-static void classification_test_pmr_term_ipv6saddr(void)
-{
- odp_packet_t pkt;
- uint32_t seqno;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_pool_t pool;
- odp_pool_t default_pool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- odp_cos_t default_cos;
- char cosname[ODP_QUEUE_NAME_LEN];
- odp_pmr_param_t pmr_param;
- odp_cls_cos_param_t cls_param;
- odph_ipv6hdr_t *ip;
- odph_ethhdr_t *eth;
- cls_packet_info_t pkt_info;
- uint8_t IPV6_SRC_ADDR[ODPH_IPV6ADDR_LEN] = {
- /* I.e. ::ffff:10.0.0.100 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 10, 1, 1, 1
- };
- uint8_t ipv6_mask[ODPH_IPV6ADDR_LEN] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
- };
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("saddr", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("saddr");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "saddr");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_SIP6_ADDR;
- pmr_param.match.value = IPV6_SRC_ADDR;
- pmr_param.match.mask = ipv6_mask;
- pmr_param.val_sz = ODPH_IPV6ADDR_LEN;
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT_FATAL(pmr != ODP_PMR_INVAL);
-
- /* packet with dst ip address matching PMR rule to be
- received in the CoS queue*/
- pkt_info = default_pkt_info;
- pkt_info.ipv6 = true;
-
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
- ip = (odph_ipv6hdr_t *)odp_packet_l3_ptr(pkt, NULL);
- memcpy(ip->src_addr, IPV6_SRC_ADDR, ODPH_IPV6ADDR_LEN);
-
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- /* Other packets delivered to default queue */
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
-
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
-}
-
-odp_testinfo_t classification_suite_pmr[] = {
- ODP_TEST_INFO(classification_test_pmr_term_tcp_dport),
- ODP_TEST_INFO(classification_test_pmr_term_tcp_sport),
- ODP_TEST_INFO(classification_test_pmr_term_udp_dport),
- ODP_TEST_INFO(classification_test_pmr_term_udp_sport),
- ODP_TEST_INFO(classification_test_pmr_term_ipproto),
- ODP_TEST_INFO(classification_test_pmr_term_dmac),
- ODP_TEST_INFO(classification_test_pmr_pool_set),
- ODP_TEST_INFO(classification_test_pmr_queue_set),
- ODP_TEST_INFO(classification_test_pmr_term_daddr),
- ODP_TEST_INFO(classification_test_pmr_term_ipv6saddr),
- ODP_TEST_INFO(classification_test_pmr_term_ipv6daddr),
- ODP_TEST_INFO(classification_test_pmr_term_packet_len),
- ODP_TEST_INFO(classification_test_pmr_term_vlan_id_0),
- ODP_TEST_INFO(classification_test_pmr_term_vlan_id_x),
- ODP_TEST_INFO(classification_test_pmr_term_eth_type_0),
- ODP_TEST_INFO(classification_test_pmr_term_eth_type_x),
- ODP_TEST_INFO_NULL,
-};
diff --git a/test/common_plat/validation/api/classification/odp_classification_testsuites.h b/test/common_plat/validation/api/classification/odp_classification_testsuites.h
deleted file mode 100644
index d296923ad..000000000
--- a/test/common_plat/validation/api/classification/odp_classification_testsuites.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_CLASSIFICATION_TESTSUITES_H_
-#define ODP_CLASSIFICATION_TESTSUITES_H_
-
-#include <odp_api.h>
-#include <odp/helper/odph_api.h>
-#include <odp_cunit_common.h>
-#include <stdbool.h>
-
-typedef struct cls_packet_info {
- odp_pool_t pool;
- bool vlan;
- bool vlan_qinq;
- odp_atomic_u32_t *seq;
- bool udp;
- bool ipv6;
- uint32_t len;
-} cls_packet_info_t;
-
-extern odp_testinfo_t classification_suite[];
-extern odp_testinfo_t classification_suite_basic[];
-extern odp_testinfo_t classification_suite_pmr[];
-
-int classification_suite_init(void);
-int classification_suite_term(void);
-
-int classification_suite_pmr_term(void);
-int classification_suite_pmr_init(void);
-
-odp_packet_t create_packet(cls_packet_info_t pkt_info);
-int cls_pkt_set_seq(odp_packet_t pkt);
-uint32_t cls_pkt_get_seq(odp_packet_t pkt);
-odp_pktio_t create_pktio(odp_queue_type_t q_type, odp_pool_t pool);
-void configure_default_cos(odp_pktio_t pktio, odp_cos_t *cos,
- odp_queue_t *queue, odp_pool_t *pool);
-int parse_ipv4_string(const char *ipaddress, uint32_t *addr, uint32_t *mask);
-void enqueue_pktio_interface(odp_packet_t pkt, odp_pktio_t pktio);
-odp_packet_t receive_packet(odp_queue_t *queue, uint64_t ns);
-odp_pool_t pool_create(const char *poolname);
-odp_queue_t queue_create(const char *queuename, bool sched);
-void configure_pktio_default_cos(void);
-void test_pktio_default_cos(void);
-void configure_pktio_error_cos(void);
-void test_pktio_error_cos(void);
-void configure_cls_pmr_chain(void);
-void test_cls_pmr_chain(void);
-void configure_cos_with_l2_priority(void);
-void test_cos_with_l2_priority(void);
-void configure_pmr_cos(void);
-void test_pmr_cos(void);
-void configure_pktio_pmr_composite(void);
-void test_pktio_pmr_composite_cos(void);
-int stop_pktio(odp_pktio_t pktio);
-odp_cls_pmr_term_t find_first_supported_l3_pmr(void);
-int set_first_supported_pmr_port(odp_packet_t pkt, uint16_t port);
-
-#endif /* ODP_BUFFER_TESTSUITES_H_ */
diff --git a/test/common_plat/validation/api/cpumask/Makefile.am b/test/common_plat/validation/api/cpumask/Makefile.am
deleted file mode 100644
index ec5fce338..000000000
--- a/test/common_plat/validation/api/cpumask/Makefile.am
+++ /dev/null
@@ -1,11 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestcpumask.la
-libtestcpumask_la_SOURCES = cpumask.c
-libtestcpumask_la_LIBADD = $(LIBCPUMASK_COMMON)
-
-test_PROGRAMS = cpumask_main$(EXEEXT)
-dist_cpumask_main_SOURCES = cpumask_main.c
-cpumask_main_LDADD = libtestcpumask.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = cpumask.h
diff --git a/test/common_plat/validation/api/cpumask/cpumask.c b/test/common_plat/validation/api/cpumask/cpumask.c
deleted file mode 100644
index a0cb559fb..000000000
--- a/test/common_plat/validation/api/cpumask/cpumask.c
+++ /dev/null
@@ -1,116 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_api.h>
-
-#include "odp_cunit_common.h"
-#include "cpumask.h"
-#include "mask_common.h"
-
-/* default worker parameter to get all that may be available */
-#define ALL_AVAILABLE 0
-
-void cpumask_test_odp_cpumask_def_control(void)
-{
- unsigned num;
- unsigned mask_count;
- unsigned max_cpus = mask_capacity();
- odp_cpumask_t mask;
-
- num = odp_cpumask_default_control(&mask, ALL_AVAILABLE);
- mask_count = odp_cpumask_count(&mask);
-
- CU_ASSERT(mask_count == num);
- CU_ASSERT(num > 0);
- CU_ASSERT(num <= max_cpus);
-}
-
-void cpumask_test_odp_cpumask_def_worker(void)
-{
- unsigned num;
- unsigned mask_count;
- unsigned max_cpus = mask_capacity();
- odp_cpumask_t mask;
-
- num = odp_cpumask_default_worker(&mask, ALL_AVAILABLE);
- mask_count = odp_cpumask_count(&mask);
-
- CU_ASSERT(mask_count == num);
- CU_ASSERT(num > 0);
- CU_ASSERT(num <= max_cpus);
-}
-
-void cpumask_test_odp_cpumask_def(void)
-{
- unsigned mask_count;
- unsigned num_worker;
- unsigned num_control;
- unsigned max_cpus = mask_capacity();
- unsigned available_cpus = odp_cpu_count();
- unsigned requested_cpus;
- odp_cpumask_t mask;
-
- CU_ASSERT(available_cpus <= max_cpus);
-
- if (available_cpus > 1)
- requested_cpus = available_cpus - 1;
- else
- requested_cpus = available_cpus;
- num_worker = odp_cpumask_default_worker(&mask, requested_cpus);
- mask_count = odp_cpumask_count(&mask);
- CU_ASSERT(mask_count == num_worker);
-
- num_control = odp_cpumask_default_control(&mask, 1);
- mask_count = odp_cpumask_count(&mask);
- CU_ASSERT(mask_count == num_control);
-
- CU_ASSERT(num_control >= 1);
- CU_ASSERT(num_worker <= available_cpus);
- CU_ASSERT(num_worker > 0);
-}
-
-odp_testinfo_t cpumask_suite[] = {
- ODP_TEST_INFO(cpumask_test_odp_cpumask_to_from_str),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_equal),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_zero),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_set),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_clr),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_isset),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_count),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_and),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_or),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_xor),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_copy),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_first),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_last),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_next),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_setall),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_def_control),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_def_worker),
- ODP_TEST_INFO(cpumask_test_odp_cpumask_def),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t cpumask_suites[] = {
- {"Cpumask", NULL, NULL, cpumask_suite},
- ODP_SUITE_INFO_NULL,
-};
-
-int cpumask_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- ret = odp_cunit_register(cpumask_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/cpumask/cpumask.h b/test/common_plat/validation/api/cpumask/cpumask.h
deleted file mode 100644
index 87a4512bf..000000000
--- a/test/common_plat/validation/api/cpumask/cpumask.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_CPUMASK_H_
-#define _ODP_TEST_CPUMASK_H_
-
-#include <odp_api.h>
-#include <odp_cunit_common.h>
-
-/* test functions: */
-#include "mask_common.h"
-void cpumask_test_odp_cpumask_def_control(void);
-void cpumask_test_odp_cpumask_def_worker(void);
-void cpumask_test_odp_cpumask_def(void);
-
-/* test arrays: */
-extern odp_testinfo_t cpumask_suite[];
-
-/* test registry: */
-extern odp_suiteinfo_t cpumask_suites[];
-
-/* main test program: */
-int cpumask_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/cpumask/cpumask_main.c b/test/common_plat/validation/api/cpumask/cpumask_main.c
deleted file mode 100644
index 39e3171ca..000000000
--- a/test/common_plat/validation/api/cpumask/cpumask_main.c
+++ /dev/null
@@ -1,11 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#include "cpumask.h"
-
-int main(int argc, char *argv[])
-{
- return cpumask_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/crypto/Makefile.am b/test/common_plat/validation/api/crypto/Makefile.am
deleted file mode 100644
index 3ea41b41f..000000000
--- a/test/common_plat/validation/api/crypto/Makefile.am
+++ /dev/null
@@ -1,11 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestcrypto.la
-libtestcrypto_la_SOURCES = crypto.c \
- odp_crypto_test_inp.c
-
-test_PROGRAMS = crypto_main$(EXEEXT)
-dist_crypto_main_SOURCES = crypto_main.c
-crypto_main_LDADD = libtestcrypto.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = crypto.h odp_crypto_test_inp.h test_vectors.h test_vectors_len.h
diff --git a/test/common_plat/validation/api/crypto/crypto.c b/test/common_plat/validation/api/crypto/crypto.c
deleted file mode 100644
index 94beb2f12..000000000
--- a/test/common_plat/validation/api/crypto/crypto.c
+++ /dev/null
@@ -1,129 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_api.h>
-#include <odp_cunit_common.h>
-#include "odp_crypto_test_inp.h"
-#include "crypto.h"
-
-#define PKT_POOL_NUM 64
-#define PKT_POOL_LEN (1 * 1024)
-
-odp_suiteinfo_t crypto_suites[] = {
- {ODP_CRYPTO_SYNC_INP, crypto_suite_sync_init, crypto_suite_term,
- crypto_suite},
- {ODP_CRYPTO_ASYNC_INP, crypto_suite_async_init, crypto_suite_term,
- crypto_suite},
- ODP_SUITE_INFO_NULL,
-};
-
-int crypto_init(odp_instance_t *inst)
-{
- odp_pool_param_t params;
- odp_pool_t pool;
- odp_queue_t out_queue;
- odp_pool_capability_t pool_capa;
-
- if (0 != odp_init_global(inst, NULL, NULL)) {
- fprintf(stderr, "error: odp_init_global() failed.\n");
- return -1;
- }
-
- if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
- fprintf(stderr, "error: odp_init_local() failed.\n");
- return -1;
- }
-
- if (odp_pool_capability(&pool_capa) < 0) {
- fprintf(stderr, "error: odp_pool_capability() failed.\n");
- return -1;
- }
-
- odp_pool_param_init(&params);
- params.pkt.seg_len = PKT_POOL_LEN;
- params.pkt.len = PKT_POOL_LEN;
- params.pkt.num = PKT_POOL_NUM;
- params.type = ODP_POOL_PACKET;
-
- if (pool_capa.pkt.max_seg_len &&
- PKT_POOL_LEN > pool_capa.pkt.max_seg_len) {
- fprintf(stderr, "Warning: small packet segment length\n");
- params.pkt.seg_len = pool_capa.pkt.max_seg_len;
- }
-
- if (pool_capa.pkt.max_len &&
- PKT_POOL_LEN > pool_capa.pkt.max_len) {
- fprintf(stderr, "Pool max packet length too small\n");
- return -1;
- }
-
- pool = odp_pool_create("packet_pool", &params);
-
- if (ODP_POOL_INVALID == pool) {
- fprintf(stderr, "Packet pool creation failed.\n");
- return -1;
- }
- out_queue = odp_queue_create("crypto-out", NULL);
- if (ODP_QUEUE_INVALID == out_queue) {
- fprintf(stderr, "Crypto outq creation failed.\n");
- return -1;
- }
-
- return 0;
-}
-
-int crypto_term(odp_instance_t inst)
-{
- odp_pool_t pool;
- odp_queue_t out_queue;
-
- out_queue = odp_queue_lookup("crypto-out");
- if (ODP_QUEUE_INVALID != out_queue) {
- if (odp_queue_destroy(out_queue))
- fprintf(stderr, "Crypto outq destroy failed.\n");
- } else {
- fprintf(stderr, "Crypto outq not found.\n");
- }
-
- pool = odp_pool_lookup("packet_pool");
- if (ODP_POOL_INVALID != pool) {
- if (odp_pool_destroy(pool))
- fprintf(stderr, "Packet pool destroy failed.\n");
- } else {
- fprintf(stderr, "Packet pool not found.\n");
- }
-
- if (0 != odp_term_local()) {
- fprintf(stderr, "error: odp_term_local() failed.\n");
- return -1;
- }
-
- if (0 != odp_term_global(inst)) {
- fprintf(stderr, "error: odp_term_global() failed.\n");
- return -1;
- }
-
- return 0;
-}
-
-int crypto_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- odp_cunit_register_global_init(crypto_init);
- odp_cunit_register_global_term(crypto_term);
-
- ret = odp_cunit_register(crypto_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/crypto/crypto.h b/test/common_plat/validation/api/crypto/crypto.h
deleted file mode 100644
index 9b909aa04..000000000
--- a/test/common_plat/validation/api/crypto/crypto.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_CRYPTO_H_
-#define _ODP_TEST_CRYPTO_H_
-
-#include "odp_cunit_common.h"
-
-/* test functions: */
-void crypto_test_enc_alg_3des_cbc(void);
-void crypto_test_enc_alg_3des_cbc_ovr_iv(void);
-void crypto_test_dec_alg_3des_cbc(void);
-void crypto_test_dec_alg_3des_cbc_ovr_iv(void);
-void crypto_test_enc_alg_aes128_cbc(void);
-void crypto_test_enc_alg_aes128_cbc_ovr_iv(void);
-void crypto_test_dec_alg_aes128_cbc(void);
-void crypto_test_dec_alg_aes128_cbc_ovr_iv(void);
-void crypto_test_enc_alg_aes128_gcm(void);
-void crypto_test_enc_alg_aes128_gcm_ovr_iv(void);
-void crypto_test_dec_alg_aes128_gcm(void);
-void crypto_test_dec_alg_aes128_gcm_ovr_iv(void);
-void crypto_test_alg_hmac_md5(void);
-void crypto_test_alg_hmac_sha256(void);
-
-/* test arrays: */
-extern odp_testinfo_t crypto_suite[];
-
-/* test array init/term functions: */
-int crypto_suite_sync_init(void);
-int crypto_suite_async_init(void);
-
-/* test registry: */
-extern odp_suiteinfo_t crypto_suites[];
-
-/* executable init/term functions: */
-int crypto_init(odp_instance_t *inst);
-int crypto_term(odp_instance_t inst);
-
-/* main test program: */
-int crypto_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/crypto/crypto_main.c b/test/common_plat/validation/api/crypto/crypto_main.c
deleted file mode 100644
index d8c26fa25..000000000
--- a/test/common_plat/validation/api/crypto/crypto_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "crypto.h"
-
-int main(int argc, char *argv[])
-{
- return crypto_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/crypto/odp_crypto_test_inp.c b/test/common_plat/validation/api/crypto/odp_crypto_test_inp.c
deleted file mode 100644
index 43ddb2ffd..000000000
--- a/test/common_plat/validation/api/crypto/odp_crypto_test_inp.c
+++ /dev/null
@@ -1,1076 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_api.h>
-#include <CUnit/Basic.h>
-#include <odp_cunit_common.h>
-#include "test_vectors.h"
-#include "odp_crypto_test_inp.h"
-#include "crypto.h"
-
-#define MAX_ALG_CAPA 32
-
-struct suite_context_s {
- odp_crypto_op_mode_t pref_mode;
- odp_pool_t pool;
- odp_queue_t queue;
-};
-
-static struct suite_context_s suite_context;
-
-static const char *auth_alg_name(odp_auth_alg_t auth)
-{
- switch (auth) {
- case ODP_AUTH_ALG_NULL:
- return "ODP_AUTH_ALG_NULL";
- case ODP_AUTH_ALG_MD5_HMAC:
- return "ODP_AUTH_ALG_MD5_HMAC";
- case ODP_AUTH_ALG_SHA256_HMAC:
- return "ODP_AUTH_ALG_SHA256_HMAC";
- case ODP_AUTH_ALG_AES_GCM:
- return "ODP_AUTH_ALG_AES_GCM";
- default:
- return "Unknown";
- }
-}
-
-static const char *cipher_alg_name(odp_cipher_alg_t cipher)
-{
- switch (cipher) {
- case ODP_CIPHER_ALG_NULL:
- return "ODP_CIPHER_ALG_NULL";
- case ODP_CIPHER_ALG_DES:
- return "ODP_CIPHER_ALG_DES";
- case ODP_CIPHER_ALG_3DES_CBC:
- return "ODP_CIPHER_ALG_3DES_CBC";
- case ODP_CIPHER_ALG_AES_CBC:
- return "ODP_CIPHER_ALG_AES_CBC";
- case ODP_CIPHER_ALG_AES_GCM:
- return "ODP_CIPHER_ALG_AES_GCM";
- default:
- return "Unknown";
- }
-}
-
-/* Basic algorithm run function for async inplace mode.
- * Creates a session from input parameters and runs one operation
- * on input_vec. Checks the output of the crypto operation against
- * output_vec. Operation completion event is dequeued polling the
- * session output queue. Completion context pointer is retrieved
- * and checked against the one set before the operation.
- * Completion event can be a separate buffer or the input packet
- * buffer can be used.
- * */
-static void alg_test(odp_crypto_op_t op,
- odp_cipher_alg_t cipher_alg,
- odp_crypto_iv_t ses_iv,
- uint8_t *op_iv_ptr,
- odp_crypto_key_t cipher_key,
- odp_auth_alg_t auth_alg,
- odp_crypto_key_t auth_key,
- odp_crypto_data_range_t *cipher_range,
- odp_crypto_data_range_t *auth_range,
- const uint8_t *plaintext,
- unsigned int plaintext_len,
- const uint8_t *ciphertext,
- unsigned int ciphertext_len,
- const uint8_t *digest,
- uint32_t digest_len)
-{
- odp_crypto_session_t session;
- odp_crypto_capability_t capa;
- int rc;
- odp_crypto_ses_create_err_t status;
- odp_bool_t posted;
- odp_event_t event;
- odp_crypto_compl_t compl_event;
- odp_crypto_op_result_t result;
- odp_crypto_session_param_t ses_params;
- odp_crypto_op_param_t op_params;
- uint8_t *data_addr;
- int data_off;
- odp_crypto_cipher_capability_t cipher_capa[MAX_ALG_CAPA];
- odp_crypto_auth_capability_t auth_capa[MAX_ALG_CAPA];
- int num, i;
- int found;
-
- rc = odp_crypto_capability(&capa);
- CU_ASSERT(!rc);
-
- if (cipher_alg == ODP_CIPHER_ALG_3DES_CBC &&
- !(capa.ciphers.bit.trides_cbc))
- rc = -1;
- if (cipher_alg == ODP_CIPHER_ALG_AES_CBC &&
- !(capa.ciphers.bit.aes_cbc))
- rc = -1;
- if (cipher_alg == ODP_CIPHER_ALG_AES_GCM &&
- !(capa.ciphers.bit.aes_gcm))
- rc = -1;
- if (cipher_alg == ODP_CIPHER_ALG_DES &&
- !(capa.ciphers.bit.des))
- rc = -1;
- if (cipher_alg == ODP_CIPHER_ALG_NULL &&
- !(capa.ciphers.bit.null))
- rc = -1;
-
- CU_ASSERT(!rc);
- CU_ASSERT((~capa.ciphers.all_bits & capa.hw_ciphers.all_bits) == 0);
-
- if (auth_alg == ODP_AUTH_ALG_AES_GCM &&
- !(capa.auths.bit.aes_gcm))
- rc = -1;
- if (auth_alg == ODP_AUTH_ALG_MD5_HMAC &&
- !(capa.auths.bit.md5_hmac))
- rc = -1;
- if (auth_alg == ODP_AUTH_ALG_NULL &&
- !(capa.auths.bit.null))
- rc = -1;
- if (auth_alg == ODP_AUTH_ALG_SHA256_HMAC &&
- !(capa.auths.bit.sha256_hmac))
- rc = -1;
-
- CU_ASSERT(!rc);
- CU_ASSERT((~capa.auths.all_bits & capa.hw_auths.all_bits) == 0);
-
- num = odp_crypto_cipher_capability(cipher_alg, cipher_capa,
- MAX_ALG_CAPA);
-
- if (cipher_alg != ODP_CIPHER_ALG_NULL) {
- CU_ASSERT(num > 0);
- found = 0;
- } else {
- CU_ASSERT(num == 0);
- found = 1;
- }
-
- CU_ASSERT(num <= MAX_ALG_CAPA);
-
- if (num > MAX_ALG_CAPA)
- num = MAX_ALG_CAPA;
-
- /* Search for the test case */
- for (i = 0; i < num; i++) {
- if (cipher_capa[i].key_len == cipher_key.length &&
- cipher_capa[i].iv_len == ses_iv.length) {
- found = 1;
- break;
- }
- }
-
- CU_ASSERT(found);
-
- num = odp_crypto_auth_capability(auth_alg, auth_capa, MAX_ALG_CAPA);
-
- if (auth_alg != ODP_AUTH_ALG_NULL) {
- CU_ASSERT(num > 0);
- found = 0;
- } else {
- CU_ASSERT(num == 0);
- found = 1;
- }
-
- CU_ASSERT(num <= MAX_ALG_CAPA);
-
- if (num > MAX_ALG_CAPA)
- num = MAX_ALG_CAPA;
-
- /* Search for the test case */
- for (i = 0; i < num; i++) {
- if (auth_capa[i].digest_len == digest_len &&
- auth_capa[i].key_len == auth_key.length) {
- found = 1;
- break;
- }
- }
-
- CU_ASSERT(found);
-
- /* Create a crypto session */
- odp_crypto_session_param_init(&ses_params);
- ses_params.op = op;
- ses_params.auth_cipher_text = false;
- ses_params.pref_mode = suite_context.pref_mode;
- ses_params.cipher_alg = cipher_alg;
- ses_params.auth_alg = auth_alg;
- ses_params.compl_queue = suite_context.queue;
- ses_params.output_pool = suite_context.pool;
- ses_params.cipher_key = cipher_key;
- ses_params.iv = ses_iv;
- ses_params.auth_key = auth_key;
-
- rc = odp_crypto_session_create(&ses_params, &session, &status);
- CU_ASSERT_FATAL(!rc);
- CU_ASSERT(status == ODP_CRYPTO_SES_CREATE_ERR_NONE);
- CU_ASSERT(odp_crypto_session_to_u64(session) !=
- odp_crypto_session_to_u64(ODP_CRYPTO_SESSION_INVALID));
-
- /* Prepare input data */
- odp_packet_t pkt = odp_packet_alloc(suite_context.pool,
- plaintext_len + digest_len);
- CU_ASSERT(pkt != ODP_PACKET_INVALID);
- data_addr = odp_packet_data(pkt);
- memcpy(data_addr, plaintext, plaintext_len);
- data_off = 0;
-
- /* Prepare input/output params */
- memset(&op_params, 0, sizeof(op_params));
- op_params.session = session;
- op_params.pkt = pkt;
- op_params.out_pkt = pkt;
- op_params.ctx = (void *)0xdeadbeef;
-
- if (cipher_range) {
- op_params.cipher_range = *cipher_range;
- data_off = cipher_range->offset;
- } else {
- op_params.cipher_range.offset = data_off;
- op_params.cipher_range.length = plaintext_len;
- }
- if (auth_range) {
- op_params.auth_range = *auth_range;
- } else {
- op_params.auth_range.offset = data_off;
- op_params.auth_range.length = plaintext_len;
- }
- if (op_iv_ptr)
- op_params.override_iv_ptr = op_iv_ptr;
-
- op_params.hash_result_offset = plaintext_len;
-
- rc = odp_crypto_operation(&op_params, &posted, &result);
- if (rc < 0) {
- CU_FAIL("Failed odp_crypto_operation()");
- goto cleanup;
- }
-
- if (posted) {
- /* Poll completion queue for results */
- do {
- event = odp_queue_deq(suite_context.queue);
- } while (event == ODP_EVENT_INVALID);
-
- compl_event = odp_crypto_compl_from_event(event);
- CU_ASSERT(odp_crypto_compl_to_u64(compl_event) ==
- odp_crypto_compl_to_u64(odp_crypto_compl_from_event(event)));
- odp_crypto_compl_result(compl_event, &result);
- odp_crypto_compl_free(compl_event);
- }
-
- CU_ASSERT(result.ok);
- CU_ASSERT(result.pkt == pkt);
-
- if (cipher_alg != ODP_CIPHER_ALG_NULL)
- CU_ASSERT(!memcmp(data_addr, ciphertext, ciphertext_len));
-
- if (op == ODP_CRYPTO_OP_ENCODE && auth_alg != ODP_AUTH_ALG_NULL)
- CU_ASSERT(!memcmp(data_addr + op_params.hash_result_offset,
- digest, digest_len));
-
- CU_ASSERT(result.ctx == (void *)0xdeadbeef);
-cleanup:
- rc = odp_crypto_session_destroy(session);
- CU_ASSERT(!rc);
-
- odp_packet_free(pkt);
-}
-
-/**
- * Check if given cipher and authentication algorithms are supported
- *
- * @param cipher Cipher algorithm
- * @param auth Authentication algorithm
- *
- * @retval ODP_TEST_ACTIVE when both algorithms are supported
- * @retval ODP_TEST_INACTIVE when either algorithm is not supported
- */
-static int check_alg_support(odp_cipher_alg_t cipher, odp_auth_alg_t auth)
-{
- odp_crypto_capability_t capability;
-
- if (odp_crypto_capability(&capability))
- return ODP_TEST_INACTIVE;
-
- /* Cipher algorithms */
- switch (cipher) {
- case ODP_CIPHER_ALG_NULL:
- if (!capability.ciphers.bit.null)
- return ODP_TEST_INACTIVE;
- break;
- case ODP_CIPHER_ALG_DES:
- if (!capability.ciphers.bit.des)
- return ODP_TEST_INACTIVE;
- break;
- case ODP_CIPHER_ALG_3DES_CBC:
- if (!capability.ciphers.bit.trides_cbc)
- return ODP_TEST_INACTIVE;
- break;
- case ODP_CIPHER_ALG_AES_CBC:
- if (!capability.ciphers.bit.aes_cbc)
- return ODP_TEST_INACTIVE;
- break;
- case ODP_CIPHER_ALG_AES_GCM:
- if (!capability.ciphers.bit.aes_gcm)
- return ODP_TEST_INACTIVE;
- break;
- default:
- fprintf(stderr, "Unsupported cipher algorithm\n");
- return ODP_TEST_INACTIVE;
- }
-
- /* Authentication algorithms */
- switch (auth) {
- case ODP_AUTH_ALG_NULL:
- if (!capability.auths.bit.null)
- return ODP_TEST_INACTIVE;
- break;
- case ODP_AUTH_ALG_MD5_HMAC:
- if (!capability.auths.bit.md5_hmac)
- return ODP_TEST_INACTIVE;
- break;
- case ODP_AUTH_ALG_SHA256_HMAC:
- if (!capability.auths.bit.sha256_hmac)
- return ODP_TEST_INACTIVE;
- break;
- case ODP_AUTH_ALG_AES_GCM:
- if (!capability.auths.bit.aes_gcm)
- return ODP_TEST_INACTIVE;
- break;
- default:
- fprintf(stderr, "Unsupported authentication algorithm\n");
- return ODP_TEST_INACTIVE;
- }
-
- return ODP_TEST_ACTIVE;
-}
-
-/**
- * Check if given cipher options are supported
- *
- * @param cipher Cipher algorithm
- * @param key_len Key length
- * @param iv_len IV length
- *
- * @retval non-zero if both cipher options are supported
- * @retval 0 if both options are not supported
- */
-static int check_cipher_options(odp_cipher_alg_t cipher, uint32_t key_len,
- uint32_t iv_len)
-{
- int i;
- int num;
- odp_crypto_cipher_capability_t cipher_capa[MAX_ALG_CAPA];
-
- num = odp_crypto_cipher_capability(cipher, cipher_capa, MAX_ALG_CAPA);
- CU_ASSERT_FATAL(num >= 1);
-
- for (i = 0; i < num; i++) {
- if (key_len == cipher_capa[i].key_len &&
- iv_len == cipher_capa[i].iv_len)
- break;
- }
-
- if (i == num) {
- printf("\n Unsupported: alg=%s, key_len=%" PRIu32 ", "
- "iv_len=%" PRIu32 "\n", cipher_alg_name(cipher), key_len,
- iv_len);
- return 0;
- }
- return 1;
-}
-
-/**
- * Check if given authentication options are supported
- *
- * @param auth Authentication algorithm
- * @param key_len Key length
- * @param digest_len Digest length
- *
- * @retval non-zero if both authentication options are supported
- * @retval 0 if both options are not supported
- */
-static int check_auth_options(odp_auth_alg_t auth, uint32_t key_len,
- uint32_t digest_len)
-{
- int i;
- int num;
- odp_crypto_auth_capability_t capa[MAX_ALG_CAPA];
-
- num = odp_crypto_auth_capability(auth, capa, MAX_ALG_CAPA);
- CU_ASSERT_FATAL(num >= 1);
-
- for (i = 0; i < num; i++) {
- if (key_len == capa[i].key_len &&
- digest_len == capa[i].digest_len)
- break;
- }
-
- if (i == num) {
- printf("\n Unsupported: alg=%s, key_len=%" PRIu32 ", "
- "digest_len=%" PRIu32 "\n", auth_alg_name(auth), key_len,
- digest_len);
- return 0;
- }
- return 1;
-}
-
-static int check_alg_3des_cbc(void)
-{
- return check_alg_support(ODP_CIPHER_ALG_3DES_CBC, ODP_AUTH_ALG_NULL);
-}
-
-/* This test verifies the correctness of encode (plaintext -> ciphertext)
- * operation for 3DES_CBC algorithm. IV for the operation is the session IV.
- * In addition the test verifies if the implementation can use the
- * packet buffer as completion event buffer.*/
-void crypto_test_enc_alg_3des_cbc(void)
-{
- odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
- auth_key = { .data = NULL, .length = 0 };
- odp_crypto_iv_t iv;
- unsigned int test_vec_num = (sizeof(tdes_cbc_reference_length) /
- sizeof(tdes_cbc_reference_length[0]));
- unsigned int i;
-
- for (i = 0; i < test_vec_num; i++) {
- cipher_key.data = tdes_cbc_reference_key[i];
- cipher_key.length = sizeof(tdes_cbc_reference_key[i]);
- iv.data = tdes_cbc_reference_iv[i];
- iv.length = sizeof(tdes_cbc_reference_iv[i]);
-
- if (!check_cipher_options(ODP_CIPHER_ALG_3DES_CBC,
- cipher_key.length, iv.length))
- continue;
-
- alg_test(ODP_CRYPTO_OP_ENCODE,
- ODP_CIPHER_ALG_3DES_CBC,
- iv,
- NULL,
- cipher_key,
- ODP_AUTH_ALG_NULL,
- auth_key,
- NULL, NULL,
- tdes_cbc_reference_plaintext[i],
- tdes_cbc_reference_length[i],
- tdes_cbc_reference_ciphertext[i],
- tdes_cbc_reference_length[i], NULL, 0);
- }
-}
-
-/* This test verifies the correctness of encode (plaintext -> ciphertext)
- * operation for 3DES_CBC algorithm. IV for the operation is the operation IV.
- * */
-void crypto_test_enc_alg_3des_cbc_ovr_iv(void)
-{
- odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
- auth_key = { .data = NULL, .length = 0 };
- odp_crypto_iv_t iv = { .data = NULL, .length = TDES_CBC_IV_LEN };
- unsigned int test_vec_num = (sizeof(tdes_cbc_reference_length) /
- sizeof(tdes_cbc_reference_length[0]));
- unsigned int i;
-
- for (i = 0; i < test_vec_num; i++) {
- cipher_key.data = tdes_cbc_reference_key[i];
- cipher_key.length = sizeof(tdes_cbc_reference_key[i]);
-
- if (!check_cipher_options(ODP_CIPHER_ALG_3DES_CBC,
- cipher_key.length, iv.length))
- continue;
-
- alg_test(ODP_CRYPTO_OP_ENCODE,
- ODP_CIPHER_ALG_3DES_CBC,
- iv,
- tdes_cbc_reference_iv[i],
- cipher_key,
- ODP_AUTH_ALG_NULL,
- auth_key,
- NULL, NULL,
- tdes_cbc_reference_plaintext[i],
- tdes_cbc_reference_length[i],
- tdes_cbc_reference_ciphertext[i],
- tdes_cbc_reference_length[i], NULL, 0);
- }
-}
-
-/* This test verifies the correctness of decode (ciphertext -> plaintext)
- * operation for 3DES_CBC algorithm. IV for the operation is the session IV
- * In addition the test verifies if the implementation can use the
- * packet buffer as completion event buffer.
- * */
-void crypto_test_dec_alg_3des_cbc(void)
-{
- odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
- auth_key = { .data = NULL, .length = 0 };
- odp_crypto_iv_t iv = { .data = NULL, .length = 0 };
- unsigned int test_vec_num = (sizeof(tdes_cbc_reference_length) /
- sizeof(tdes_cbc_reference_length[0]));
- unsigned int i;
-
- for (i = 0; i < test_vec_num; i++) {
- cipher_key.data = tdes_cbc_reference_key[i];
- cipher_key.length = sizeof(tdes_cbc_reference_key[i]);
- iv.data = tdes_cbc_reference_iv[i];
- iv.length = sizeof(tdes_cbc_reference_iv[i]);
-
- if (!check_cipher_options(ODP_CIPHER_ALG_3DES_CBC,
- cipher_key.length, iv.length))
- continue;
-
- alg_test(ODP_CRYPTO_OP_DECODE,
- ODP_CIPHER_ALG_3DES_CBC,
- iv,
- NULL,
- cipher_key,
- ODP_AUTH_ALG_NULL,
- auth_key,
- NULL, NULL,
- tdes_cbc_reference_ciphertext[i],
- tdes_cbc_reference_length[i],
- tdes_cbc_reference_plaintext[i],
- tdes_cbc_reference_length[i], NULL, 0);
- }
-}
-
-/* This test verifies the correctness of decode (ciphertext -> plaintext)
- * operation for 3DES_CBC algorithm. IV for the operation is the session IV
- * In addition the test verifies if the implementation can use the
- * packet buffer as completion event buffer.
- * */
-void crypto_test_dec_alg_3des_cbc_ovr_iv(void)
-{
- odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
- auth_key = { .data = NULL, .length = 0 };
- odp_crypto_iv_t iv = { .data = NULL, .length = TDES_CBC_IV_LEN };
- unsigned int test_vec_num = (sizeof(tdes_cbc_reference_length) /
- sizeof(tdes_cbc_reference_length[0]));
- unsigned int i;
-
- for (i = 0; i < test_vec_num; i++) {
- cipher_key.data = tdes_cbc_reference_key[i];
- cipher_key.length = sizeof(tdes_cbc_reference_key[i]);
-
- if (!check_cipher_options(ODP_CIPHER_ALG_3DES_CBC,
- cipher_key.length, iv.length))
- continue;
-
- alg_test(ODP_CRYPTO_OP_DECODE,
- ODP_CIPHER_ALG_3DES_CBC,
- iv,
- tdes_cbc_reference_iv[i],
- cipher_key,
- ODP_AUTH_ALG_NULL,
- auth_key,
- NULL, NULL,
- tdes_cbc_reference_ciphertext[i],
- tdes_cbc_reference_length[i],
- tdes_cbc_reference_plaintext[i],
- tdes_cbc_reference_length[i], NULL, 0);
- }
-}
-
-static int check_alg_aes_gcm(void)
-{
- return check_alg_support(ODP_CIPHER_ALG_AES_GCM, ODP_AUTH_ALG_AES_GCM);
-}
-
-/* This test verifies the correctness of encode (plaintext -> ciphertext)
- * operation for AES128_GCM algorithm. IV for the operation is the session IV.
- * In addition the test verifies if the implementation can use the
- * packet buffer as completion event buffer.*/
-void crypto_test_enc_alg_aes128_gcm(void)
-{
- odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
- auth_key = { .data = NULL, .length = 0 };
- odp_crypto_iv_t iv = { .data = NULL, .length = AES128_GCM_IV_LEN };
- unsigned int test_vec_num = (sizeof(aes128_gcm_reference_length) /
- sizeof(aes128_gcm_reference_length[0]));
- unsigned int i;
-
- for (i = 0; i < test_vec_num; i++) {
- cipher_key.data = aes128_gcm_reference_key[i];
- cipher_key.length = sizeof(aes128_gcm_reference_key[i]);
- iv.data = aes128_gcm_reference_iv[i];
- iv.length = sizeof(aes128_gcm_reference_iv[i]);
-
- if (!check_cipher_options(ODP_CIPHER_ALG_AES_GCM,
- cipher_key.length, iv.length))
- continue;
- if (!check_auth_options(ODP_AUTH_ALG_AES_GCM,
- auth_key.length, AES128_GCM_CHECK_LEN))
- continue;
-
- alg_test(ODP_CRYPTO_OP_ENCODE,
- ODP_CIPHER_ALG_AES_GCM,
- iv,
- NULL,
- cipher_key,
- ODP_AUTH_ALG_AES_GCM,
- auth_key,
- &aes128_gcm_cipher_range[i],
- &aes128_gcm_auth_range[i],
- aes128_gcm_reference_plaintext[i],
- aes128_gcm_reference_length[i],
- aes128_gcm_reference_ciphertext[i],
- aes128_gcm_reference_length[i],
- aes128_gcm_reference_ciphertext[i] +
- aes128_gcm_reference_length[i],
- AES128_GCM_CHECK_LEN);
- }
-}
-
-/* This test verifies the correctness of encode (plaintext -> ciphertext)
- * operation for AES128_GCM algorithm. IV for the operation is the session IV.
- * In addition the test verifies if the implementation can use the
- * packet buffer as completion event buffer.*/
-void crypto_test_enc_alg_aes128_gcm_ovr_iv(void)
-{
- odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
- auth_key = { .data = NULL, .length = 0 };
- odp_crypto_iv_t iv = { .data = NULL, .length = AES128_GCM_IV_LEN };
- unsigned int test_vec_num = (sizeof(aes128_gcm_reference_length) /
- sizeof(aes128_gcm_reference_length[0]));
- unsigned int i;
-
- for (i = 0; i < test_vec_num; i++) {
- cipher_key.data = aes128_gcm_reference_key[i];
- cipher_key.length = sizeof(aes128_gcm_reference_key[i]);
-
- if (!check_cipher_options(ODP_CIPHER_ALG_AES_GCM,
- cipher_key.length, iv.length))
- continue;
- if (!check_auth_options(ODP_AUTH_ALG_AES_GCM,
- auth_key.length, AES128_GCM_CHECK_LEN))
- continue;
-
- alg_test(ODP_CRYPTO_OP_ENCODE,
- ODP_CIPHER_ALG_AES_GCM,
- iv,
- aes128_gcm_reference_iv[i],
- cipher_key,
- ODP_AUTH_ALG_AES_GCM,
- auth_key,
- &aes128_gcm_cipher_range[i],
- &aes128_gcm_auth_range[i],
- aes128_gcm_reference_plaintext[i],
- aes128_gcm_reference_length[i],
- aes128_gcm_reference_ciphertext[i],
- aes128_gcm_reference_length[i],
- aes128_gcm_reference_ciphertext[i] +
- aes128_gcm_reference_length[i],
- AES128_GCM_CHECK_LEN);
- }
-}
-
-/* This test verifies the correctness of decode (ciphertext -> plaintext)
- * operation for 3DES_CBC algorithm. IV for the operation is the session IV
- * In addition the test verifies if the implementation can use the
- * packet buffer as completion event buffer.
- * */
-void crypto_test_dec_alg_aes128_gcm(void)
-{
- odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
- auth_key = { .data = NULL, .length = 0 };
- odp_crypto_iv_t iv = { .data = NULL, .length = AES128_GCM_IV_LEN };
- unsigned int test_vec_num = (sizeof(aes128_gcm_reference_length) /
- sizeof(aes128_gcm_reference_length[0]));
- unsigned int i;
-
- for (i = 0; i < test_vec_num; i++) {
- cipher_key.data = aes128_gcm_reference_key[i];
- cipher_key.length = sizeof(aes128_gcm_reference_key[i]);
- iv.data = aes128_gcm_reference_iv[i];
- iv.length = sizeof(aes128_gcm_reference_iv[i]);
-
- if (!check_cipher_options(ODP_CIPHER_ALG_AES_GCM,
- cipher_key.length, iv.length))
- continue;
- if (!check_auth_options(ODP_AUTH_ALG_AES_GCM,
- auth_key.length, AES128_GCM_CHECK_LEN))
- continue;
-
- alg_test(ODP_CRYPTO_OP_DECODE,
- ODP_CIPHER_ALG_AES_GCM,
- iv,
- NULL,
- cipher_key,
- ODP_AUTH_ALG_AES_GCM,
- auth_key,
- &aes128_gcm_cipher_range[i],
- &aes128_gcm_auth_range[i],
- aes128_gcm_reference_ciphertext[i],
- aes128_gcm_reference_length[i] + AES128_GCM_CHECK_LEN,
- aes128_gcm_reference_plaintext[i],
- aes128_gcm_reference_length[i],
- aes128_gcm_reference_ciphertext[i] +
- aes128_gcm_reference_length[i],
- AES128_GCM_CHECK_LEN);
- }
-}
-
-/* This test verifies the correctness of decode (ciphertext -> plaintext)
- * operation for 3DES_CBC algorithm. IV for the operation is the session IV
- * In addition the test verifies if the implementation can use the
- * packet buffer as completion event buffer.
- * */
-void crypto_test_dec_alg_aes128_gcm_ovr_iv(void)
-{
- odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
- auth_key = { .data = NULL, .length = 0 };
- odp_crypto_iv_t iv = { .data = NULL, .length = AES128_GCM_IV_LEN };
- unsigned int test_vec_num = (sizeof(aes128_gcm_reference_length) /
- sizeof(aes128_gcm_reference_length[0]));
- unsigned int i;
-
- for (i = 0; i < test_vec_num; i++) {
- cipher_key.data = aes128_gcm_reference_key[i];
- cipher_key.length = sizeof(aes128_gcm_reference_key[i]);
-
- if (!check_cipher_options(ODP_CIPHER_ALG_AES_GCM,
- cipher_key.length, iv.length))
- continue;
- if (!check_auth_options(ODP_AUTH_ALG_AES_GCM,
- auth_key.length, AES128_GCM_CHECK_LEN))
- continue;
-
- alg_test(ODP_CRYPTO_OP_DECODE,
- ODP_CIPHER_ALG_AES_GCM,
- iv,
- aes128_gcm_reference_iv[i],
- cipher_key,
- ODP_AUTH_ALG_AES_GCM,
- auth_key,
- &aes128_gcm_cipher_range[i],
- &aes128_gcm_auth_range[i],
- aes128_gcm_reference_ciphertext[i],
- aes128_gcm_reference_length[i] + AES128_GCM_CHECK_LEN,
- aes128_gcm_reference_plaintext[i],
- aes128_gcm_reference_length[i],
- aes128_gcm_reference_ciphertext[i] +
- aes128_gcm_reference_length[i],
- AES128_GCM_CHECK_LEN);
- }
-}
-
-static int check_alg_aes_cbc(void)
-{
- return check_alg_support(ODP_CIPHER_ALG_AES_CBC, ODP_AUTH_ALG_NULL);
-}
-
-/* This test verifies the correctness of encode (plaintext -> ciphertext)
- * operation for AES128_CBC algorithm. IV for the operation is the session IV.
- * In addition the test verifies if the implementation can use the
- * packet buffer as completion event buffer.*/
-void crypto_test_enc_alg_aes128_cbc(void)
-{
- odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
- auth_key = { .data = NULL, .length = 0 };
- odp_crypto_iv_t iv;
- unsigned int test_vec_num = (sizeof(aes128_cbc_reference_length) /
- sizeof(aes128_cbc_reference_length[0]));
- unsigned int i;
-
- for (i = 0; i < test_vec_num; i++) {
- cipher_key.data = aes128_cbc_reference_key[i];
- cipher_key.length = sizeof(aes128_cbc_reference_key[i]);
- iv.data = aes128_cbc_reference_iv[i];
- iv.length = sizeof(aes128_cbc_reference_iv[i]);
-
- if (!check_cipher_options(ODP_CIPHER_ALG_AES_CBC,
- cipher_key.length, iv.length))
- continue;
-
- alg_test(ODP_CRYPTO_OP_ENCODE,
- ODP_CIPHER_ALG_AES_CBC,
- iv,
- NULL,
- cipher_key,
- ODP_AUTH_ALG_NULL,
- auth_key,
- NULL, NULL,
- aes128_cbc_reference_plaintext[i],
- aes128_cbc_reference_length[i],
- aes128_cbc_reference_ciphertext[i],
- aes128_cbc_reference_length[i], NULL, 0);
- }
-}
-
-/* This test verifies the correctness of encode (plaintext -> ciphertext)
- * operation for AES128_CBC algorithm. IV for the operation is the operation IV.
- * */
-void crypto_test_enc_alg_aes128_cbc_ovr_iv(void)
-{
- odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
- auth_key = { .data = NULL, .length = 0 };
- odp_crypto_iv_t iv = { .data = NULL, .length = AES128_CBC_IV_LEN };
- unsigned int test_vec_num = (sizeof(aes128_cbc_reference_length) /
- sizeof(aes128_cbc_reference_length[0]));
- unsigned int i;
-
- for (i = 0; i < test_vec_num; i++) {
- cipher_key.data = aes128_cbc_reference_key[i];
- cipher_key.length = sizeof(aes128_cbc_reference_key[i]);
-
- if (!check_cipher_options(ODP_CIPHER_ALG_AES_CBC,
- cipher_key.length, iv.length))
- continue;
-
- alg_test(ODP_CRYPTO_OP_ENCODE,
- ODP_CIPHER_ALG_AES_CBC,
- iv,
- aes128_cbc_reference_iv[i],
- cipher_key,
- ODP_AUTH_ALG_NULL,
- auth_key,
- NULL, NULL,
- aes128_cbc_reference_plaintext[i],
- aes128_cbc_reference_length[i],
- aes128_cbc_reference_ciphertext[i],
- aes128_cbc_reference_length[i], NULL, 0);
- }
-}
-
-/* This test verifies the correctness of decode (ciphertext -> plaintext)
- * operation for AES128_CBC algorithm. IV for the operation is the session IV
- * In addition the test verifies if the implementation can use the
- * packet buffer as completion event buffer.
- * */
-void crypto_test_dec_alg_aes128_cbc(void)
-{
- odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
- auth_key = { .data = NULL, .length = 0 };
- odp_crypto_iv_t iv = { .data = NULL, .length = 0 };
- unsigned int test_vec_num = (sizeof(aes128_cbc_reference_length) /
- sizeof(aes128_cbc_reference_length[0]));
- unsigned int i;
-
- for (i = 0; i < test_vec_num; i++) {
- cipher_key.data = aes128_cbc_reference_key[i];
- cipher_key.length = sizeof(aes128_cbc_reference_key[i]);
- iv.data = aes128_cbc_reference_iv[i];
- iv.length = sizeof(aes128_cbc_reference_iv[i]);
-
- if (!check_cipher_options(ODP_CIPHER_ALG_AES_CBC,
- cipher_key.length, iv.length))
- continue;
-
- alg_test(ODP_CRYPTO_OP_DECODE,
- ODP_CIPHER_ALG_AES_CBC,
- iv,
- NULL,
- cipher_key,
- ODP_AUTH_ALG_NULL,
- auth_key,
- NULL, NULL,
- aes128_cbc_reference_ciphertext[i],
- aes128_cbc_reference_length[i],
- aes128_cbc_reference_plaintext[i],
- aes128_cbc_reference_length[i], NULL, 0);
- }
-}
-
-/* This test verifies the correctness of decode (ciphertext -> plaintext)
- * operation for AES128_CBC algorithm. IV for the operation is the session IV
- * In addition the test verifies if the implementation can use the
- * packet buffer as completion event buffer.
- * */
-void crypto_test_dec_alg_aes128_cbc_ovr_iv(void)
-{
- odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
- auth_key = { .data = NULL, .length = 0 };
- odp_crypto_iv_t iv = { .data = NULL, .length = AES128_CBC_IV_LEN };
- unsigned int test_vec_num = (sizeof(aes128_cbc_reference_length) /
- sizeof(aes128_cbc_reference_length[0]));
- unsigned int i;
-
- for (i = 0; i < test_vec_num; i++) {
- cipher_key.data = aes128_cbc_reference_key[i];
- cipher_key.length = sizeof(aes128_cbc_reference_key[i]);
-
- if (!check_cipher_options(ODP_CIPHER_ALG_AES_CBC,
- cipher_key.length, iv.length))
- continue;
-
- alg_test(ODP_CRYPTO_OP_DECODE,
- ODP_CIPHER_ALG_AES_CBC,
- iv,
- aes128_cbc_reference_iv[i],
- cipher_key,
- ODP_AUTH_ALG_NULL,
- auth_key,
- NULL, NULL,
- aes128_cbc_reference_ciphertext[i],
- aes128_cbc_reference_length[i],
- aes128_cbc_reference_plaintext[i],
- aes128_cbc_reference_length[i], NULL, 0);
- }
-}
-
-static int check_alg_hmac_md5(void)
-{
- return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_MD5_HMAC);
-}
-
-/* This test verifies the correctness of HMAC_MD5 digest operation.
- * The output check length is truncated to 12 bytes (96 bits) as
- * returned by the crypto operation API call.
- * Note that hash digest is a one-way operation.
- * In addition the test verifies if the implementation can use the
- * packet buffer as completion event buffer.
- * */
-void crypto_test_alg_hmac_md5(void)
-{
- odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
- auth_key = { .data = NULL, .length = 0 };
- odp_crypto_iv_t iv = { .data = NULL, .length = 0 };
-
- unsigned int test_vec_num = (sizeof(hmac_md5_reference_length) /
- sizeof(hmac_md5_reference_length[0]));
- unsigned int i;
-
- for (i = 0; i < test_vec_num; i++) {
- auth_key.data = hmac_md5_reference_key[i];
- auth_key.length = sizeof(hmac_md5_reference_key[i]);
-
- if (!check_auth_options(ODP_AUTH_ALG_MD5_HMAC, auth_key.length,
- HMAC_MD5_96_CHECK_LEN))
- continue;
-
- alg_test(ODP_CRYPTO_OP_ENCODE,
- ODP_CIPHER_ALG_NULL,
- iv,
- iv.data,
- cipher_key,
- ODP_AUTH_ALG_MD5_HMAC,
- auth_key,
- NULL, NULL,
- hmac_md5_reference_plaintext[i],
- hmac_md5_reference_length[i],
- NULL, 0,
- hmac_md5_reference_digest[i],
- HMAC_MD5_96_CHECK_LEN);
- }
-}
-
-static int check_alg_hmac_sha256(void)
-{
- return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_SHA256_HMAC);
-}
-
-/* This test verifies the correctness of HMAC_MD5 digest operation.
- * The output check length is truncated to 12 bytes (96 bits) as
- * returned by the crypto operation API call.
- * Note that hash digest is a one-way operation.
- * In addition the test verifies if the implementation can use the
- * packet buffer as completion event buffer.
- * */
-void crypto_test_alg_hmac_sha256(void)
-{
- odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
- auth_key = { .data = NULL, .length = 0 };
- odp_crypto_iv_t iv = { .data = NULL, .length = 0 };
-
- unsigned int test_vec_num = (sizeof(hmac_sha256_reference_length) /
- sizeof(hmac_sha256_reference_length[0]));
-
- unsigned int i;
-
- for (i = 0; i < test_vec_num; i++) {
- auth_key.data = hmac_sha256_reference_key[i];
- auth_key.length = sizeof(hmac_sha256_reference_key[i]);
-
- if (!check_auth_options(ODP_AUTH_ALG_SHA256_HMAC,
- auth_key.length,
- HMAC_SHA256_128_CHECK_LEN))
- continue;
-
- alg_test(ODP_CRYPTO_OP_ENCODE,
- ODP_CIPHER_ALG_NULL,
- iv,
- iv.data,
- cipher_key,
- ODP_AUTH_ALG_SHA256_HMAC,
- auth_key,
- NULL, NULL,
- hmac_sha256_reference_plaintext[i],
- hmac_sha256_reference_length[i],
- NULL, 0,
- hmac_sha256_reference_digest[i],
- HMAC_SHA256_128_CHECK_LEN);
- }
-}
-
-int crypto_suite_sync_init(void)
-{
- suite_context.pool = odp_pool_lookup("packet_pool");
- if (suite_context.pool == ODP_POOL_INVALID)
- return -1;
-
- suite_context.queue = ODP_QUEUE_INVALID;
- suite_context.pref_mode = ODP_CRYPTO_SYNC;
- return 0;
-}
-
-int crypto_suite_async_init(void)
-{
- suite_context.pool = odp_pool_lookup("packet_pool");
- if (suite_context.pool == ODP_POOL_INVALID)
- return -1;
- suite_context.queue = odp_queue_lookup("crypto-out");
- if (suite_context.queue == ODP_QUEUE_INVALID)
- return -1;
-
- suite_context.pref_mode = ODP_CRYPTO_ASYNC;
- return 0;
-}
-
-odp_testinfo_t crypto_suite[] = {
- ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_3des_cbc,
- check_alg_3des_cbc),
- ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_3des_cbc,
- check_alg_3des_cbc),
- ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_3des_cbc_ovr_iv,
- check_alg_3des_cbc),
- ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_3des_cbc_ovr_iv,
- check_alg_3des_cbc),
- ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_aes128_cbc,
- check_alg_aes_cbc),
- ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_aes128_cbc,
- check_alg_aes_cbc),
- ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_aes128_cbc_ovr_iv,
- check_alg_aes_cbc),
- ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_aes128_cbc_ovr_iv,
- check_alg_aes_cbc),
- ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_aes128_gcm,
- check_alg_aes_gcm),
- ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_aes128_gcm_ovr_iv,
- check_alg_aes_gcm),
- ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_aes128_gcm,
- check_alg_aes_gcm),
- ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_aes128_gcm_ovr_iv,
- check_alg_aes_gcm),
- ODP_TEST_INFO_CONDITIONAL(crypto_test_alg_hmac_md5,
- check_alg_hmac_md5),
- ODP_TEST_INFO_CONDITIONAL(crypto_test_alg_hmac_sha256,
- check_alg_hmac_sha256),
- ODP_TEST_INFO_NULL,
-};
-
-int crypto_suite_term(void)
-{
- int i;
- int first = 1;
-
- for (i = 0; crypto_suite[i].pName; i++) {
- if (crypto_suite[i].check_active &&
- crypto_suite[i].check_active() == ODP_TEST_INACTIVE) {
- if (first) {
- first = 0;
- printf("\n\n Inactive tests:\n");
- }
- printf(" %s\n", crypto_suite[i].pName);
- }
- }
- return 0;
-}
diff --git a/test/common_plat/validation/api/crypto/odp_crypto_test_inp.h b/test/common_plat/validation/api/crypto/odp_crypto_test_inp.h
deleted file mode 100644
index 0f6933790..000000000
--- a/test/common_plat/validation/api/crypto/odp_crypto_test_inp.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#ifndef ODP_CRYPTO_TEST_ASYNC_INP_
-#define ODP_CRYPTO_TEST_ASYNC_INP_
-
-#include <odp_cunit_common.h>
-
-/* Suite names */
-#define ODP_CRYPTO_ASYNC_INP "odp_crypto_async_inp"
-#define ODP_CRYPTO_SYNC_INP "odp_crypto_sync_inp"
-
-/* Suite test array */
-extern odp_testinfo_t crypto_suite[];
-
-int crypto_suite_sync_init(void);
-int crypto_suite_async_init(void);
-int crypto_suite_term(void);
-
-#endif
diff --git a/test/common_plat/validation/api/crypto/test_vectors.h b/test/common_plat/validation/api/crypto/test_vectors.h
deleted file mode 100644
index da4610f33..000000000
--- a/test/common_plat/validation/api/crypto/test_vectors.h
+++ /dev/null
@@ -1,353 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_CRYPTO_VECTORS_H_
-#define _ODP_TEST_CRYPTO_VECTORS_H_
-
-#include "test_vectors_len.h"
-/* TDES-CBC reference vectors, according to
- * "http://csrc.nist.gov/groups/STM/cavp/documents/des/DESMMT.pdf"
- */
-static uint8_t tdes_cbc_reference_key[][TDES_CBC_KEY_LEN] = {
- {0x62, 0x7f, 0x46, 0x0e, 0x08, 0x10, 0x4a, 0x10, 0x43, 0xcd, 0x26, 0x5d,
- 0x58, 0x40, 0xea, 0xf1, 0x31, 0x3e, 0xdf, 0x97, 0xdf, 0x2a, 0x8a, 0x8c,
- },
-
- {0x37, 0xae, 0x5e, 0xbf, 0x46, 0xdf, 0xf2, 0xdc, 0x07, 0x54, 0xb9, 0x4f,
- 0x31, 0xcb, 0xb3, 0x85, 0x5e, 0x7f, 0xd3, 0x6d, 0xc8, 0x70, 0xbf, 0xae}
-};
-
-static uint8_t tdes_cbc_reference_iv[][TDES_CBC_IV_LEN] = {
- {0x8e, 0x29, 0xf7, 0x5e, 0xa7, 0x7e, 0x54, 0x75},
-
- {0x3d, 0x1d, 0xe3, 0xcc, 0x13, 0x2e, 0x3b, 0x65}
-};
-
-/** length in bytes */
-static uint32_t tdes_cbc_reference_length[] = { 8, 16 };
-
-static uint8_t
-tdes_cbc_reference_plaintext[][TDES_CBC_MAX_DATA_LEN] = {
- {0x32, 0x6a, 0x49, 0x4c, 0xd3, 0x3f, 0xe7, 0x56},
-
- {0x84, 0x40, 0x1f, 0x78, 0xfe, 0x6c, 0x10, 0x87, 0x6d, 0x8e, 0xa2, 0x30,
- 0x94, 0xea, 0x53, 0x09}
-};
-
-static uint8_t
-tdes_cbc_reference_ciphertext[][TDES_CBC_MAX_DATA_LEN] = {
- {0xb2, 0x2b, 0x8d, 0x66, 0xde, 0x97, 0x06, 0x92},
-
- {0x7b, 0x1f, 0x7c, 0x7e, 0x3b, 0x1c, 0x94, 0x8e, 0xbd, 0x04, 0xa7, 0x5f,
- 0xfb, 0xa7, 0xd2, 0xf5}
-};
-
-static uint8_t aes128_cbc_reference_key[][AES128_CBC_KEY_LEN] = {
- {0x06, 0xa9, 0x21, 0x40, 0x36, 0xb8, 0xa1, 0x5b,
- 0x51, 0x2e, 0x03, 0xd5, 0x34, 0x12, 0x00, 0x06 },
- {0xc2, 0x86, 0x69, 0x6d, 0x88, 0x7c, 0x9a, 0xa0,
- 0x61, 0x1b, 0xbb, 0x3e, 0x20, 0x25, 0xa4, 0x5a },
- {0x6c, 0x3e, 0xa0, 0x47, 0x76, 0x30, 0xce, 0x21,
- 0xa2, 0xce, 0x33, 0x4a, 0xa7, 0x46, 0xc2, 0xcd },
- {0x56, 0xe4, 0x7a, 0x38, 0xc5, 0x59, 0x89, 0x74,
- 0xbc, 0x46, 0x90, 0x3d, 0xba, 0x29, 0x03, 0x49 }
-};
-
-static uint8_t aes128_cbc_reference_iv[][AES128_CBC_IV_LEN] = {
- { 0x3d, 0xaf, 0xba, 0x42, 0x9d, 0x9e, 0xb4, 0x30,
- 0xb4, 0x22, 0xda, 0x80, 0x2c, 0x9f, 0xac, 0x41 },
- { 0x56, 0x2e, 0x17, 0x99, 0x6d, 0x09, 0x3d, 0x28,
- 0xdd, 0xb3, 0xba, 0x69, 0x5a, 0x2e, 0x6f, 0x58 },
- { 0xc7, 0x82, 0xdc, 0x4c, 0x09, 0x8c, 0x66, 0xcb,
- 0xd9, 0xcd, 0x27, 0xd8, 0x25, 0x68, 0x2c, 0x81 },
- { 0x8c, 0xe8, 0x2e, 0xef, 0xbe, 0xa0, 0xda, 0x3c,
- 0x44, 0x69, 0x9e, 0xd7, 0xdb, 0x51, 0xb7, 0xd9 }
-};
-
-/** length in bytes */
-static uint32_t aes128_cbc_reference_length[] = { 16, 32, 48, 64 };
-
-static uint8_t
-aes128_cbc_reference_plaintext[][AES128_CBC_MAX_DATA_LEN] = {
- "Single block msg",
- { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
- 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
- "This is a 48-byte message (exactly 3 AES blocks)",
- { 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
- 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
- 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
- 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
- 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
- 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
- 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
- 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf }
-};
-
-static uint8_t
-aes128_cbc_reference_ciphertext[][AES128_CBC_MAX_DATA_LEN] = {
- { 0xe3, 0x53, 0x77, 0x9c, 0x10, 0x79, 0xae, 0xb8,
- 0x27, 0x08, 0x94, 0x2d, 0xbe, 0x77, 0x18, 0x1a },
- { 0xd2, 0x96, 0xcd, 0x94, 0xc2, 0xcc, 0xcf, 0x8a,
- 0x3a, 0x86, 0x30, 0x28, 0xb5, 0xe1, 0xdc, 0x0a,
- 0x75, 0x86, 0x60, 0x2d, 0x25, 0x3c, 0xff, 0xf9,
- 0x1b, 0x82, 0x66, 0xbe, 0xa6, 0xd6, 0x1a, 0xb1 },
- { 0xd0, 0xa0, 0x2b, 0x38, 0x36, 0x45, 0x17, 0x53,
- 0xd4, 0x93, 0x66, 0x5d, 0x33, 0xf0, 0xe8, 0x86,
- 0x2d, 0xea, 0x54, 0xcd, 0xb2, 0x93, 0xab, 0xc7,
- 0x50, 0x69, 0x39, 0x27, 0x67, 0x72, 0xf8, 0xd5,
- 0x02, 0x1c, 0x19, 0x21, 0x6b, 0xad, 0x52, 0x5c,
- 0x85, 0x79, 0x69, 0x5d, 0x83, 0xba, 0x26, 0x84 },
- { 0xc3, 0x0e, 0x32, 0xff, 0xed, 0xc0, 0x77, 0x4e,
- 0x6a, 0xff, 0x6a, 0xf0, 0x86, 0x9f, 0x71, 0xaa,
- 0x0f, 0x3a, 0xf0, 0x7a, 0x9a, 0x31, 0xa9, 0xc6,
- 0x84, 0xdb, 0x20, 0x7e, 0xb0, 0xef, 0x8e, 0x4e,
- 0x35, 0x90, 0x7a, 0xa6, 0x32, 0xc3, 0xff, 0xdf,
- 0x86, 0x8b, 0xb7, 0xb2, 0x9d, 0x3d, 0x46, 0xad,
- 0x83, 0xce, 0x9f, 0x9a, 0x10, 0x2e, 0xe9, 0x9d,
- 0x49, 0xa5, 0x3e, 0x87, 0xf4, 0xc3, 0xda, 0x55 }
-};
-
-/* AES-GCM test vectors extracted from
- * https://tools.ietf.org/html/draft-mcgrew-gcm-test-01#section-2
- */
-static uint8_t aes128_gcm_reference_key[][AES128_GCM_KEY_LEN] = {
- { 0x4c, 0x80, 0xcd, 0xef, 0xbb, 0x5d, 0x10, 0xda,
- 0x90, 0x6a, 0xc7, 0x3c, 0x36, 0x13, 0xa6, 0x34 },
- { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
- 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- { 0x3d, 0xe0, 0x98, 0x74, 0xb3, 0x88, 0xe6, 0x49,
- 0x19, 0x88, 0xd0, 0xc3, 0x60, 0x7e, 0xae, 0x1f }
-};
-
-static uint8_t aes128_gcm_reference_iv[][AES128_GCM_IV_LEN] = {
- { 0x2e, 0x44, 0x3b, 0x68, 0x49, 0x56, 0xed, 0x7e,
- 0x3b, 0x24, 0x4c, 0xfe },
- { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
- 0xde, 0xca, 0xf8, 0x88 },
- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00 },
- { 0x57, 0x69, 0x0e, 0x43, 0x4e, 0x28, 0x00, 0x00,
- 0xa2, 0xfc, 0xa1, 0xa3 }
-};
-
-static uint32_t aes128_gcm_reference_length[] = { 84, 72, 72, 40};
-
-static odp_crypto_data_range_t aes128_gcm_cipher_range[] = {
- { .offset = 12, .length = 72 },
- { .offset = 8, .length = 64 },
- { .offset = 8, .length = 64 },
- { .offset = 12, .length = 28 },
-};
-
-static odp_crypto_data_range_t aes128_gcm_auth_range[] = {
- { .offset = 0, .length = 84 },
- { .offset = 0, .length = 72 },
- { .offset = 0, .length = 72 },
- { .offset = 0, .length = 40 },
-};
-
-static uint8_t
-aes128_gcm_reference_plaintext[][AES128_GCM_MAX_DATA_LEN] = {
- { /* Aad */
- 0x00, 0x00, 0x43, 0x21, 0x87, 0x65, 0x43, 0x21,
- 0x00, 0x00, 0x00, 0x00,
- /* Plain */
- 0x45, 0x00, 0x00, 0x48, 0x69, 0x9a, 0x00, 0x00,
- 0x80, 0x11, 0x4d, 0xb7, 0xc0, 0xa8, 0x01, 0x02,
- 0xc0, 0xa8, 0x01, 0x01, 0x0a, 0x9b, 0xf1, 0x56,
- 0x38, 0xd3, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x04, 0x5f, 0x73, 0x69,
- 0x70, 0x04, 0x5f, 0x75, 0x64, 0x70, 0x03, 0x73,
- 0x69, 0x70, 0x09, 0x63, 0x79, 0x62, 0x65, 0x72,
- 0x63, 0x69, 0x74, 0x79, 0x02, 0x64, 0x6b, 0x00,
- 0x00, 0x21, 0x00, 0x01, 0x01, 0x02, 0x02, 0x01 },
-
- { /* Aad */
- 0x00, 0x00, 0xa5, 0xf8, 0x00, 0x00, 0x00, 0x0a,
- /* Plain */
- 0x45, 0x00, 0x00, 0x3e, 0x69, 0x8f, 0x00, 0x00,
- 0x80, 0x11, 0x4d, 0xcc, 0xc0, 0xa8, 0x01, 0x02,
- 0xc0, 0xa8, 0x01, 0x01, 0x0a, 0x98, 0x00, 0x35,
- 0x00, 0x2a, 0x23, 0x43, 0xb2, 0xd0, 0x01, 0x00,
- 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x03, 0x73, 0x69, 0x70, 0x09, 0x63, 0x79, 0x62,
- 0x65, 0x72, 0x63, 0x69, 0x74, 0x79, 0x02, 0x64,
- 0x6b, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01 },
-
- { /* Aad */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
- /* Plain */
- 0x45, 0x00, 0x00, 0x3c, 0x99, 0xc5, 0x00, 0x00,
- 0x80, 0x01, 0xcb, 0x7a, 0x40, 0x67, 0x93, 0x18,
- 0x01, 0x01, 0x01, 0x01, 0x08, 0x00, 0x07, 0x5c,
- 0x02, 0x00, 0x44, 0x00, 0x61, 0x62, 0x63, 0x64,
- 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c,
- 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74,
- 0x75, 0x76, 0x77, 0x61, 0x62, 0x63, 0x64, 0x65,
- 0x66, 0x67, 0x68, 0x69, 0x01, 0x02, 0x02, 0x01 },
-
- { /* Aad */
- 0x42, 0xf6, 0x7e, 0x3f, 0x10, 0x10, 0x10, 0x10,
- 0x10, 0x10, 0x10, 0x10,
- /* Plain */
- 0x45, 0x00, 0x00, 0x1c, 0x42, 0xa2, 0x00, 0x00,
- 0x80, 0x01, 0x44, 0x1f, 0x40, 0x67, 0x93, 0xb6,
- 0xe0, 0x00, 0x00, 0x02, 0x0a, 0x00, 0xf5, 0xff,
- 0x01, 0x02, 0x02, 0x01 }
-};
-
-static uint8_t
-aes128_gcm_reference_ciphertext[][AES128_GCM_MAX_DATA_LEN] = {
- { /* Aad */
- 0x00, 0x00, 0x43, 0x21, 0x87, 0x65, 0x43, 0x21,
- 0x00, 0x00, 0x00, 0x00,
- /* Plain */
- 0xfe, 0xcf, 0x53, 0x7e, 0x72, 0x9d, 0x5b, 0x07,
- 0xdc, 0x30, 0xdf, 0x52, 0x8d, 0xd2, 0x2b, 0x76,
- 0x8d, 0x1b, 0x98, 0x73, 0x66, 0x96, 0xa6, 0xfd,
- 0x34, 0x85, 0x09, 0xfa, 0x13, 0xce, 0xac, 0x34,
- 0xcf, 0xa2, 0x43, 0x6f, 0x14, 0xa3, 0xf3, 0xcf,
- 0x65, 0x92, 0x5b, 0xf1, 0xf4, 0xa1, 0x3c, 0x5d,
- 0x15, 0xb2, 0x1e, 0x18, 0x84, 0xf5, 0xff, 0x62,
- 0x47, 0xae, 0xab, 0xb7, 0x86, 0xb9, 0x3b, 0xce,
- 0x61, 0xbc, 0x17, 0xd7, 0x68, 0xfd, 0x97, 0x32,
- /* Digest */
- 0x45, 0x90, 0x18, 0x14, 0x8f, 0x6c, 0xbe, 0x72,
- 0x2f, 0xd0, 0x47, 0x96, 0x56, 0x2d, 0xfd, 0xb4 },
-
- { /* Aad */
- 0x00, 0x00, 0xa5, 0xf8, 0x00, 0x00, 0x00, 0x0a,
- /* Plain */
- 0xde, 0xb2, 0x2c, 0xd9, 0xb0, 0x7c, 0x72, 0xc1,
- 0x6e, 0x3a, 0x65, 0xbe, 0xeb, 0x8d, 0xf3, 0x04,
- 0xa5, 0xa5, 0x89, 0x7d, 0x33, 0xae, 0x53, 0x0f,
- 0x1b, 0xa7, 0x6d, 0x5d, 0x11, 0x4d, 0x2a, 0x5c,
- 0x3d, 0xe8, 0x18, 0x27, 0xc1, 0x0e, 0x9a, 0x4f,
- 0x51, 0x33, 0x0d, 0x0e, 0xec, 0x41, 0x66, 0x42,
- 0xcf, 0xbb, 0x85, 0xa5, 0xb4, 0x7e, 0x48, 0xa4,
- 0xec, 0x3b, 0x9b, 0xa9, 0x5d, 0x91, 0x8b, 0xd1,
- /* Digest */
- 0x83, 0xb7, 0x0d, 0x3a, 0xa8, 0xbc, 0x6e, 0xe4,
- 0xc3, 0x09, 0xe9, 0xd8, 0x5a, 0x41, 0xad, 0x4a },
- { /* Aad */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
- /* Plain */
- 0x46, 0x88, 0xda, 0xf2, 0xf9, 0x73, 0xa3, 0x92,
- 0x73, 0x29, 0x09, 0xc3, 0x31, 0xd5, 0x6d, 0x60,
- 0xf6, 0x94, 0xab, 0xaa, 0x41, 0x4b, 0x5e, 0x7f,
- 0xf5, 0xfd, 0xcd, 0xff, 0xf5, 0xe9, 0xa2, 0x84,
- 0x45, 0x64, 0x76, 0x49, 0x27, 0x19, 0xff, 0xb6,
- 0x4d, 0xe7, 0xd9, 0xdc, 0xa1, 0xe1, 0xd8, 0x94,
- 0xbc, 0x3b, 0xd5, 0x78, 0x73, 0xed, 0x4d, 0x18,
- 0x1d, 0x19, 0xd4, 0xd5, 0xc8, 0xc1, 0x8a, 0xf3,
- /* Digest */
- 0xf8, 0x21, 0xd4, 0x96, 0xee, 0xb0, 0x96, 0xe9,
- 0x8a, 0xd2, 0xb6, 0x9e, 0x47, 0x99, 0xc7, 0x1d },
-
- { /* Aad */
- 0x42, 0xf6, 0x7e, 0x3f, 0x10, 0x10, 0x10, 0x10,
- 0x10, 0x10, 0x10, 0x10,
- /* Plain */
- 0xfb, 0xa2, 0xca, 0x84, 0x5e, 0x5d, 0xf9, 0xf0,
- 0xf2, 0x2c, 0x3e, 0x6e, 0x86, 0xdd, 0x83, 0x1e,
- 0x1f, 0xc6, 0x57, 0x92, 0xcd, 0x1a, 0xf9, 0x13,
- 0x0e, 0x13, 0x79, 0xed,
- /* Digest */
- 0x36, 0x9f, 0x07, 0x1f, 0x35, 0xe0, 0x34, 0xbe,
- 0x95, 0xf1, 0x12, 0xe4, 0xe7, 0xd0, 0x5d, 0x35 }
-};
-
-static uint8_t hmac_md5_reference_key[][HMAC_MD5_KEY_LEN] = {
- { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
- 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b },
-
- /* "Jefe" */
- { 0x4a, 0x65, 0x66, 0x65 },
-
- { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
- 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa }
-};
-
-static uint32_t hmac_md5_reference_length[] = { 8, 28, 50 };
-
-static uint8_t
-hmac_md5_reference_plaintext[][HMAC_MD5_MAX_DATA_LEN] = {
- /* "Hi There" */
- { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
-
- /* what do ya want for nothing?*/
- { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
- 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
- 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
- 0x69, 0x6e, 0x67, 0x3f },
-
- { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
- 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
- 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
- 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
- 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd }
-};
-
-static uint8_t hmac_md5_reference_digest[][HMAC_MD5_DIGEST_LEN] = {
- { 0x92, 0x94, 0x72, 0x7a, 0x36, 0x38, 0xbb, 0x1c,
- 0x13, 0xf4, 0x8e, 0xf8, 0x15, 0x8b, 0xfc, 0x9d },
-
- { 0x75, 0x0c, 0x78, 0x3e, 0x6a, 0xb0, 0xb5, 0x03,
- 0xea, 0xa8, 0x6e, 0x31, 0x0a, 0x5d, 0xb7, 0x38 },
-
- { 0x56, 0xbe, 0x34, 0x52, 0x1d, 0x14, 0x4c, 0x88,
- 0xdb, 0xb8, 0xc7, 0x33, 0xf0, 0xe8, 0xb3, 0xf6 }
-};
-
-static uint8_t hmac_sha256_reference_key[][HMAC_SHA256_KEY_LEN] = {
- { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
- 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
- 0x0b, 0x0b, 0x0b, 0x0b },
-
- /* "Jefe" */
- { 0x4a, 0x65, 0x66, 0x65 },
-
- { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
- 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
- 0xaa, 0xaa, 0xaa, 0xaa }
-};
-
-static uint32_t hmac_sha256_reference_length[] = { 8, 28, 50 };
-
-static uint8_t
-hmac_sha256_reference_plaintext[][HMAC_SHA256_MAX_DATA_LEN] = {
- /* "Hi There" */
- { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
-
- /* what do ya want for nothing?*/
- { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
- 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
- 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
- 0x69, 0x6e, 0x67, 0x3f },
-
- { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
- 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
- 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
- 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
- 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd }
-};
-
-static uint8_t hmac_sha256_reference_digest[][HMAC_SHA256_DIGEST_LEN] = {
- { 0xb0, 0x34, 0x4c, 0x61, 0xd8, 0xdb, 0x38, 0x53,
- 0x5c, 0xa8, 0xaf, 0xce, 0xaf, 0x0b, 0xf1, 0x2b },
-
- { 0x5b, 0xdc, 0xc1, 0x46, 0xbf, 0x60, 0x75, 0x4e,
- 0x6a, 0x04, 0x24, 0x26, 0x08, 0x95, 0x75, 0xc7 },
-
- { 0x77, 0x3e, 0xa9, 0x1e, 0x36, 0x80, 0x0e, 0x46,
- 0x85, 0x4d, 0xb8, 0xeb, 0xd0, 0x91, 0x81, 0xa7 }
-};
-
-#endif
diff --git a/test/common_plat/validation/api/crypto/test_vectors_len.h b/test/common_plat/validation/api/crypto/test_vectors_len.h
deleted file mode 100644
index 4fbb5cd70..000000000
--- a/test/common_plat/validation/api/crypto/test_vectors_len.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#ifndef TEST_VECTORS_LEN_
-#define TEST_VECTORS_LEN_
-
-/* TDES-CBC */
-#define TDES_CBC_KEY_LEN 24
-#define TDES_CBC_IV_LEN 8
-#define TDES_CBC_MAX_DATA_LEN 16
-
-/* AES128-CBC */
-#define AES128_CBC_KEY_LEN 16
-#define AES128_CBC_IV_LEN 16
-#define AES128_CBC_MAX_DATA_LEN 64
-
-/* AES128-CBC */
-#define AES128_GCM_KEY_LEN 16
-#define AES128_GCM_IV_LEN 12
-#define AES128_GCM_MAX_DATA_LEN 106
-#define AES128_GCM_DIGEST_LEN 16
-#define AES128_GCM_CHECK_LEN 16
-
-/* HMAC-MD5 */
-#define HMAC_MD5_KEY_LEN 16
-#define HMAC_MD5_MAX_DATA_LEN 128
-#define HMAC_MD5_DIGEST_LEN 16
-#define HMAC_MD5_96_CHECK_LEN 12
-
-/* HMAC-SHA256 */
-#define HMAC_SHA256_KEY_LEN 32
-#define HMAC_SHA256_MAX_DATA_LEN 128
-#define HMAC_SHA256_DIGEST_LEN 32
-#define HMAC_SHA256_128_CHECK_LEN 16
-
-#endif
diff --git a/test/common_plat/validation/api/errno/Makefile.am b/test/common_plat/validation/api/errno/Makefile.am
deleted file mode 100644
index a24275d6e..000000000
--- a/test/common_plat/validation/api/errno/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtesterrno.la
-libtesterrno_la_SOURCES = errno.c
-
-test_PROGRAMS = errno_main$(EXEEXT)
-dist_errno_main_SOURCES = errno_main.c
-errno_main_LDADD = libtesterrno.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = errno.h
diff --git a/test/common_plat/validation/api/errno/errno.h b/test/common_plat/validation/api/errno/errno.h
deleted file mode 100644
index 720385196..000000000
--- a/test/common_plat/validation/api/errno/errno.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_ERRNO_H_
-#define _ODP_TEST_ERRNO_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void errno_test_odp_errno_sunny_day(void);
-
-/* test arrays: */
-extern odp_testinfo_t errno_suite[];
-
-/* test registry: */
-extern odp_suiteinfo_t errno_suites[];
-
-/* main test program: */
-int errno_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/errno/errno_main.c b/test/common_plat/validation/api/errno/errno_main.c
deleted file mode 100644
index 0138279ef..000000000
--- a/test/common_plat/validation/api/errno/errno_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "errno.h"
-
-int main(int argc, char *argv[])
-{
- return errno_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/hash/Makefile.am b/test/common_plat/validation/api/hash/Makefile.am
deleted file mode 100644
index b899b8bd3..000000000
--- a/test/common_plat/validation/api/hash/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtesthash.la
-libtesthash_la_SOURCES = hash.c
-
-test_PROGRAMS = hash_main$(EXEEXT)
-dist_hash_main_SOURCES = hash_main.c
-hash_main_LDADD = libtesthash.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = hash.h
diff --git a/test/common_plat/validation/api/hash/hash.c b/test/common_plat/validation/api/hash/hash.c
deleted file mode 100644
index b353fcecd..000000000
--- a/test/common_plat/validation/api/hash/hash.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_api.h>
-#include <odp_cunit_common.h>
-#include "hash.h"
-
-void hash_test_crc32c(void)
-{
- uint32_t test_value = 0x12345678;
- uint32_t ret = odp_hash_crc32c(&test_value, 4, 0);
-
- CU_ASSERT(ret == 0xfa745634);
-
- test_value = 0x87654321;
- ret = odp_hash_crc32c(&test_value, 4, 0);
-
- CU_ASSERT(ret == 0xaca37da7);
-
- uint32_t test_values[] = {0x12345678, 0x87654321};
-
- ret = odp_hash_crc32c(test_values, 8, 0);
-
- CU_ASSERT(ret == 0xe6e910b0);
-}
-
-odp_testinfo_t hash_suite[] = {
- ODP_TEST_INFO(hash_test_crc32c),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t hash_suites[] = {
- {"Hash", NULL, NULL, hash_suite},
- ODP_SUITE_INFO_NULL
-};
-
-int hash_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- ret = odp_cunit_register(hash_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/hash/hash.h b/test/common_plat/validation/api/hash/hash.h
deleted file mode 100644
index 936571e6a..000000000
--- a/test/common_plat/validation/api/hash/hash.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_HASH_H_
-#define _ODP_TEST_HASH_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void hash_test_crc32c(void);
-
-/* test arrays: */
-extern odp_testinfo_t hash_suite[];
-
-/* test registry: */
-extern odp_suiteinfo_t hash_suites[];
-
-/* main test program: */
-int hash_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/hash/hash_main.c b/test/common_plat/validation/api/hash/hash_main.c
deleted file mode 100644
index f9818b7bb..000000000
--- a/test/common_plat/validation/api/hash/hash_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "hash.h"
-
-int main(int argc, char *argv[])
-{
- return hash_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/init/.gitignore b/test/common_plat/validation/api/init/.gitignore
deleted file mode 100644
index f433708b0..000000000
--- a/test/common_plat/validation/api/init/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-init_main_abort
-init_main_log
-init_main_ok
diff --git a/test/common_plat/validation/api/init/Makefile.am b/test/common_plat/validation/api/init/Makefile.am
deleted file mode 100644
index 0793e6423..000000000
--- a/test/common_plat/validation/api/init/Makefile.am
+++ /dev/null
@@ -1,16 +0,0 @@
-include ../Makefile.inc
-noinst_LTLIBRARIES = libtestinit.la
-libtestinit_la_SOURCES = init.c
-
-# most platforms are expected not to support multiple ODP inits
-# following each other: therefore 3 separate binaries are
-# created, each containing its ODP init test.
-test_PROGRAMS = init_main_abort$(EXEEXT) init_main_log$(EXEEXT) init_main_ok$(EXEEXT)
-dist_init_main_abort_SOURCES = init_main_abort.c
-dist_init_main_log_SOURCES = init_main_log.c
-dist_init_main_ok_SOURCES = init_main_ok.c
-init_main_abort_LDADD = libtestinit.la $(LIBCUNIT_COMMON) $(LIBODP)
-init_main_log_LDADD = libtestinit.la $(LIBCUNIT_COMMON) $(LIBODP)
-init_main_ok_LDADD = libtestinit.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = init.h
diff --git a/test/common_plat/validation/api/init/init.c b/test/common_plat/validation/api/init/init.c
deleted file mode 100644
index 61055fad5..000000000
--- a/test/common_plat/validation/api/init/init.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <stdarg.h>
-#include <stdlib.h>
-#include <odp_api.h>
-#include <CUnit/Basic.h>
-#include "init.h"
-
-/* flag set when the replacement logging function is used */
-int replacement_logging_used;
-
-/* replacement abort function: */
-static void odp_init_abort(void) ODP_NORETURN;
-
-/* replacement log function: */
-ODP_PRINTF_FORMAT(2, 3)
-static int odp_init_log(odp_log_level_t level, const char *fmt, ...);
-
-/* test ODP global init, with alternate abort function */
-void init_test_odp_init_global_replace_abort(void)
-{
- int status;
- struct odp_init_t init_data;
- odp_instance_t instance;
-
- memset(&init_data, 0, sizeof(init_data));
- init_data.abort_fn = &odp_init_abort;
-
- status = odp_init_global(&instance, &init_data, NULL);
- CU_ASSERT_FATAL(status == 0);
-
- status = odp_term_global(instance);
- CU_ASSERT(status == 0);
-}
-
-odp_testinfo_t init_suite_abort[] = {
- ODP_TEST_INFO(init_test_odp_init_global_replace_abort),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t init_suites_abort[] = {
- {"Init", NULL, NULL, init_suite_abort},
- ODP_SUITE_INFO_NULL,
-};
-
-static void odp_init_abort(void)
-{
- abort();
-}
-
-int init_main_abort(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- /* prevent default ODP init: */
- odp_cunit_register_global_init(NULL);
- odp_cunit_register_global_term(NULL);
-
- /* run the tests: */
- ret = odp_cunit_register(init_suites_abort);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
-
-/* test ODP global init, with alternate log function */
-void init_test_odp_init_global_replace_log(void)
-{
- int status;
- struct odp_init_t init_data;
- odp_instance_t instance;
-
- memset(&init_data, 0, sizeof(init_data));
- init_data.log_fn = &odp_init_log;
-
- replacement_logging_used = 0;
-
- status = odp_init_global(&instance, &init_data, NULL);
- CU_ASSERT_FATAL(status == 0);
-
- CU_ASSERT_TRUE(replacement_logging_used || ODP_DEBUG_PRINT == 0);
-
- status = odp_term_global(instance);
- CU_ASSERT(status == 0);
-}
-
-odp_testinfo_t init_suite_log[] = {
- ODP_TEST_INFO(init_test_odp_init_global_replace_log),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t init_suites_log[] = {
- {"Init", NULL, NULL, init_suite_log},
- ODP_SUITE_INFO_NULL,
-};
-
-static int odp_init_log(odp_log_level_t level __attribute__((unused)),
- const char *fmt, ...)
-{
- va_list args;
- int r;
-
- /* just set a flag to be sure the replacement fn was used */
- replacement_logging_used = 1;
-
- va_start(args, fmt);
- r = vfprintf(stderr, fmt, args);
- va_end(args);
-
- return r;
-}
-
-int init_main_log(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- /* prevent default ODP init: */
- odp_cunit_register_global_init(NULL);
- odp_cunit_register_global_term(NULL);
-
- /* register the tests: */
- ret = odp_cunit_register(init_suites_log);
-
- /* run the tests: */
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
-
-/* test normal ODP global init */
-void init_test_odp_init_global(void)
-{
- int status;
- odp_instance_t instance;
-
- status = odp_init_global(&instance, NULL, NULL);
- CU_ASSERT_FATAL(status == 0);
-
- status = odp_term_global(instance);
- CU_ASSERT(status == 0);
-}
-
-odp_testinfo_t init_suite_ok[] = {
- ODP_TEST_INFO(init_test_odp_init_global),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t init_suites_ok[] = {
- {"Init", NULL, NULL, init_suite_ok},
- ODP_SUITE_INFO_NULL,
-};
-
-int init_main_ok(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- /* prevent default ODP init: */
- odp_cunit_register_global_init(NULL);
- odp_cunit_register_global_term(NULL);
-
- /* register the tests: */
- ret = odp_cunit_register(init_suites_ok);
-
- /* run the tests: */
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/init/init.h b/test/common_plat/validation/api/init/init.h
deleted file mode 100644
index cad9cf988..000000000
--- a/test/common_plat/validation/api/init/init.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_INIT_H_
-#define _ODP_TEST_INIT_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void init_test_odp_init_global_replace_abort(void);
-void init_test_odp_init_global_replace_log(void);
-void init_test_odp_init_global(void);
-
-/* test arrays: */
-extern odp_testinfo_t init_suite_abort[];
-extern odp_testinfo_t init_suite_log[];
-extern odp_testinfo_t init_suite_ok[];
-
-/* test registry: */
-extern odp_suiteinfo_t init_suites_abort[];
-extern odp_suiteinfo_t init_suites_log[];
-extern odp_suiteinfo_t init_suites_ok[];
-
-/* main test program: */
-int init_main_abort(int argc, char *argv[]);
-int init_main_log(int argc, char *argv[]);
-int init_main_ok(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/init/init_main_abort.c b/test/common_plat/validation/api/init/init_main_abort.c
deleted file mode 100644
index 2e0faafb8..000000000
--- a/test/common_plat/validation/api/init/init_main_abort.c
+++ /dev/null
@@ -1,11 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#include "init.h"
-
-int main(int argc, char *argv[])
-{
- return init_main_abort(argc, argv);
-}
diff --git a/test/common_plat/validation/api/init/init_main_log.c b/test/common_plat/validation/api/init/init_main_log.c
deleted file mode 100644
index 41dd00d72..000000000
--- a/test/common_plat/validation/api/init/init_main_log.c
+++ /dev/null
@@ -1,11 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#include "init.h"
-
-int main(int argc, char *argv[])
-{
- return init_main_log(argc, argv);
-}
diff --git a/test/common_plat/validation/api/init/init_main_ok.c b/test/common_plat/validation/api/init/init_main_ok.c
deleted file mode 100644
index 6053ec188..000000000
--- a/test/common_plat/validation/api/init/init_main_ok.c
+++ /dev/null
@@ -1,11 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#include "init.h"
-
-int main(int argc, char *argv[])
-{
- return init_main_ok(argc, argv);
-}
diff --git a/test/common_plat/validation/api/lock/Makefile.am b/test/common_plat/validation/api/lock/Makefile.am
deleted file mode 100644
index 29993df44..000000000
--- a/test/common_plat/validation/api/lock/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestlock.la
-libtestlock_la_SOURCES = lock.c
-
-test_PROGRAMS = lock_main$(EXEEXT)
-dist_lock_main_SOURCES = lock_main.c
-lock_main_LDADD = libtestlock.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = lock.h
diff --git a/test/common_plat/validation/api/lock/lock.h b/test/common_plat/validation/api/lock/lock.h
deleted file mode 100644
index e0f49728b..000000000
--- a/test/common_plat/validation/api/lock/lock.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_LOCK_H_
-#define _ODP_TEST_LOCK_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void lock_test_no_lock_functional(void);
-void lock_test_spinlock_api(void);
-void lock_test_spinlock_functional(void);
-void lock_test_spinlock_recursive_api(void);
-void lock_test_spinlock_recursive_functional(void);
-void lock_test_ticketlock_api(void);
-void lock_test_ticketlock_functional(void);
-void lock_test_rwlock_api(void);
-void lock_test_rwlock_functional(void);
-void lock_test_rwlock_recursive_api(void);
-void lock_test_rwlock_recursive_functional(void);
-
-/* test arrays: */
-extern odp_testinfo_t lock_suite_no_locking[];
-extern odp_testinfo_t lock_suite_spinlock[];
-extern odp_testinfo_t lock_suite_spinlock_recursive[];
-extern odp_testinfo_t lock_suite_ticketlock[];
-extern odp_testinfo_t lock_suite_rwlock[];
-extern odp_testinfo_t lock_suite_rwlock_recursive[];
-
-/* test array init/term functions: */
-int lock_suite_init(void);
-
-/* test registry: */
-extern odp_suiteinfo_t lock_suites[];
-
-/* executable init/term functions: */
-int lock_init(odp_instance_t *inst);
-int lock_term(odp_instance_t inst);
-
-/* main test program: */
-int lock_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/lock/lock_main.c b/test/common_plat/validation/api/lock/lock_main.c
deleted file mode 100644
index 5a30f02b4..000000000
--- a/test/common_plat/validation/api/lock/lock_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "lock.h"
-
-int main(int argc, char *argv[])
-{
- return lock_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/packet/Makefile.am b/test/common_plat/validation/api/packet/Makefile.am
deleted file mode 100644
index d8ebc1a23..000000000
--- a/test/common_plat/validation/api/packet/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestpacket.la
-libtestpacket_la_SOURCES = packet.c
-
-test_PROGRAMS = packet_main$(EXEEXT)
-dist_packet_main_SOURCES = packet_main.c
-packet_main_LDADD = libtestpacket.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = packet.h
diff --git a/test/common_plat/validation/api/packet/packet.c b/test/common_plat/validation/api/packet/packet.c
deleted file mode 100644
index 284aaeb5a..000000000
--- a/test/common_plat/validation/api/packet/packet.c
+++ /dev/null
@@ -1,2451 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <stdlib.h>
-
-#include <odp_api.h>
-#include <odp_cunit_common.h>
-#include "packet.h"
-
-#define PACKET_BUF_LEN ODP_CONFIG_PACKET_SEG_LEN_MIN
-/* Reserve some tailroom for tests */
-#define PACKET_TAILROOM_RESERVE 4
-/* Number of packets in the test packet pool */
-#define PACKET_POOL_NUM 300
-
-static odp_pool_t packet_pool, packet_pool_no_uarea, packet_pool_double_uarea;
-static uint32_t packet_len;
-
-static uint32_t segmented_packet_len;
-static odp_bool_t segmentation_supported = true;
-
-odp_packet_t test_packet, segmented_test_packet;
-
-static struct udata_struct {
- uint64_t u64;
- uint32_t u32;
- char str[10];
-} test_packet_udata = {
- 123456,
- 789912,
- "abcdefg",
-};
-
-#define packet_compare_offset(pkt1, off1, pkt2, off2, len) \
- _packet_compare_offset((pkt1), (off1), (pkt2), (off2), (len), __LINE__)
-
-#define packet_compare_data(pkt1, pkt2) \
- _packet_compare_data((pkt1), (pkt2), __LINE__)
-
-static void _packet_compare_data(odp_packet_t pkt1, odp_packet_t pkt2,
- int line)
-{
- uint32_t len = odp_packet_len(pkt1);
- uint32_t offset = 0;
- uint32_t seglen1, seglen2, cmplen;
- int ret;
-
- CU_ASSERT_FATAL(len == odp_packet_len(pkt2));
-
- while (len > 0) {
- void *pkt1map = odp_packet_offset(pkt1, offset, &seglen1, NULL);
- void *pkt2map = odp_packet_offset(pkt2, offset, &seglen2, NULL);
-
- CU_ASSERT_PTR_NOT_NULL_FATAL(pkt1map);
- CU_ASSERT_PTR_NOT_NULL_FATAL(pkt2map);
- cmplen = seglen1 < seglen2 ? seglen1 : seglen2;
- ret = memcmp(pkt1map, pkt2map, cmplen);
-
- if (ret) {
- printf("\ncompare_data failed: line %i, offset %"
- PRIu32 "\n", line, offset);
- }
-
- CU_ASSERT(ret == 0);
-
- offset += cmplen;
- len -= cmplen;
- }
-}
-
-static int fill_data_forward(odp_packet_t pkt, uint32_t offset, uint32_t len,
- uint32_t *cur_data)
-{
- uint8_t buf[len];
- uint32_t i, data;
-
- data = *cur_data;
-
- for (i = 0; i < len; i++)
- buf[i] = data++;
-
- *cur_data = data;
-
- return odp_packet_copy_from_mem(pkt, offset, len, buf);
-}
-
-static int fill_data_backward(odp_packet_t pkt, uint32_t offset, uint32_t len,
- uint32_t *cur_data)
-{
- uint8_t buf[len];
- uint32_t i, data;
-
- data = *cur_data;
-
- for (i = 0; i < len; i++)
- buf[len - i - 1] = data++;
-
- *cur_data = data;
-
- return odp_packet_copy_from_mem(pkt, offset, len, buf);
-}
-
-int packet_suite_init(void)
-{
- odp_pool_param_t params;
- odp_pool_capability_t capa;
- struct udata_struct *udat;
- uint32_t udat_size;
- uint8_t data = 0;
- uint32_t i;
- uint32_t num = PACKET_POOL_NUM;
-
- if (odp_pool_capability(&capa) < 0) {
- printf("pool_capability failed\n");
- return -1;
- }
- if (capa.pkt.max_segs_per_pkt == 0)
- capa.pkt.max_segs_per_pkt = 10;
-
- /* Pick a typical packet size and decrement it to the single segment
- * limit if needed (min_seg_len maybe equal to max_len
- * on some systems). */
- packet_len = 512;
- while (packet_len > (capa.pkt.min_seg_len - PACKET_TAILROOM_RESERVE))
- packet_len--;
-
- if (capa.pkt.max_len) {
- segmented_packet_len = capa.pkt.max_len;
- } else {
- segmented_packet_len = capa.pkt.min_seg_len *
- capa.pkt.max_segs_per_pkt;
- }
- if (capa.pkt.max_num != 0 && capa.pkt.max_num < num)
- num = capa.pkt.max_num;
-
- odp_pool_param_init(&params);
-
- params.type = ODP_POOL_PACKET;
- params.pkt.seg_len = capa.pkt.min_seg_len;
- params.pkt.len = capa.pkt.min_seg_len;
- params.pkt.num = num;
- params.pkt.uarea_size = sizeof(struct udata_struct);
-
- packet_pool = odp_pool_create("packet_pool", &params);
- if (packet_pool == ODP_POOL_INVALID) {
- printf("pool_create failed: 1\n");
- return -1;
- }
-
- params.pkt.uarea_size = 0;
- packet_pool_no_uarea = odp_pool_create("packet_pool_no_uarea",
- &params);
- if (packet_pool_no_uarea == ODP_POOL_INVALID) {
- odp_pool_destroy(packet_pool);
- printf("pool_create failed: 2\n");
- return -1;
- }
-
- params.pkt.uarea_size = 2 * sizeof(struct udata_struct);
- packet_pool_double_uarea = odp_pool_create("packet_pool_double_uarea",
- &params);
-
- if (packet_pool_double_uarea == ODP_POOL_INVALID) {
- odp_pool_destroy(packet_pool_no_uarea);
- odp_pool_destroy(packet_pool);
- printf("pool_create failed: 3\n");
- return -1;
- }
-
- test_packet = odp_packet_alloc(packet_pool, packet_len);
-
- for (i = 0; i < packet_len; i++) {
- odp_packet_copy_from_mem(test_packet, i, 1, &data);
- data++;
- }
-
- /* Try to allocate the largest possible packet to see
- * if segmentation is supported */
- do {
- segmented_test_packet = odp_packet_alloc(packet_pool,
- segmented_packet_len);
- if (segmented_test_packet == ODP_PACKET_INVALID)
- segmented_packet_len -= capa.pkt.min_seg_len;
- } while (segmented_test_packet == ODP_PACKET_INVALID);
-
- if (odp_packet_is_valid(test_packet) == 0 ||
- odp_packet_is_valid(segmented_test_packet) == 0) {
- printf("packet_is_valid failed\n");
- return -1;
- }
-
- segmentation_supported = odp_packet_is_segmented(segmented_test_packet);
-
- data = 0;
- for (i = 0; i < segmented_packet_len; i++) {
- odp_packet_copy_from_mem(segmented_test_packet, i, 1, &data);
- data++;
- }
-
- udat = odp_packet_user_area(test_packet);
- udat_size = odp_packet_user_area_size(test_packet);
- if (!udat || udat_size != sizeof(struct udata_struct)) {
- printf("packet_user_area failed: 1\n");
- return -1;
- }
-
- odp_pool_print(packet_pool);
- memcpy(udat, &test_packet_udata, sizeof(struct udata_struct));
-
- udat = odp_packet_user_area(segmented_test_packet);
- udat_size = odp_packet_user_area_size(segmented_test_packet);
- if (udat == NULL || udat_size != sizeof(struct udata_struct)) {
- printf("packet_user_area failed: 2\n");
- return -1;
- }
-
- memcpy(udat, &test_packet_udata, sizeof(struct udata_struct));
-
- return 0;
-}
-
-int packet_suite_term(void)
-{
- odp_packet_free(test_packet);
- odp_packet_free(segmented_test_packet);
-
- if (odp_pool_destroy(packet_pool_double_uarea) != 0 ||
- odp_pool_destroy(packet_pool_no_uarea) != 0 ||
- odp_pool_destroy(packet_pool) != 0)
- return -1;
-
- return 0;
-}
-
-void packet_test_alloc_free(void)
-{
- odp_pool_t pool;
- odp_packet_t packet;
- odp_pool_param_t params;
- odp_pool_capability_t capa;
-
- CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
-
- odp_pool_param_init(&params);
-
- params.type = ODP_POOL_PACKET;
- params.pkt.seg_len = capa.pkt.min_seg_len;
- params.pkt.len = capa.pkt.min_seg_len;
- params.pkt.num = 1;
-
- pool = odp_pool_create("packet_pool_alloc", &params);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- /* Allocate the only buffer from the pool */
- packet = odp_packet_alloc(pool, packet_len);
- CU_ASSERT_FATAL(packet != ODP_PACKET_INVALID);
- CU_ASSERT(odp_packet_len(packet) == packet_len);
- CU_ASSERT(odp_event_type(odp_packet_to_event(packet)) ==
- ODP_EVENT_PACKET);
- CU_ASSERT(odp_packet_to_u64(packet) !=
- odp_packet_to_u64(ODP_PACKET_INVALID));
-
- /* Pool should have only one packet */
- CU_ASSERT_FATAL(odp_packet_alloc(pool, packet_len)
- == ODP_PACKET_INVALID);
-
- odp_packet_free(packet);
-
- /* Check that the buffer was returned back to the pool */
- packet = odp_packet_alloc(pool, packet_len);
- CU_ASSERT_FATAL(packet != ODP_PACKET_INVALID);
- CU_ASSERT(odp_packet_len(packet) == packet_len);
-
- odp_packet_free(packet);
- CU_ASSERT(odp_pool_destroy(pool) == 0);
-}
-
-/* Wrapper to call odp_packet_alloc_multi multiple times until
- * either no mure buffers are returned, or num buffers were alloced */
-static int packet_alloc_multi(odp_pool_t pool, uint32_t pkt_len,
- odp_packet_t pkt[], int num)
-{
- int ret, total = 0;
-
- do {
- ret = odp_packet_alloc_multi(pool, pkt_len, pkt + total,
- num - total);
- CU_ASSERT(ret >= 0);
- CU_ASSERT(ret <= num - total);
- total += ret;
- } while (total < num && ret);
-
- return total;
-}
-
-void packet_test_alloc_free_multi(void)
-{
- const int num_pkt = 2;
- odp_pool_t pool[2];
- int i, ret;
- odp_packet_t packet[2 * num_pkt + 1];
- odp_packet_t inval_pkt[num_pkt];
- odp_pool_param_t params;
- odp_pool_capability_t capa;
-
- CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
-
- odp_pool_param_init(&params);
-
- params.type = ODP_POOL_PACKET;
- params.pkt.seg_len = capa.pkt.min_seg_len;
- params.pkt.len = capa.pkt.min_seg_len;
- params.pkt.num = num_pkt;
-
- pool[0] = odp_pool_create("packet_pool_alloc_multi_0", &params);
- pool[1] = odp_pool_create("packet_pool_alloc_multi_1", &params);
- CU_ASSERT_FATAL(pool[0] != ODP_POOL_INVALID);
- CU_ASSERT_FATAL(pool[1] != ODP_POOL_INVALID);
-
- /* Allocate all the packets from the pools */
-
- ret = packet_alloc_multi(pool[0], packet_len, &packet[0], num_pkt + 1);
- CU_ASSERT_FATAL(ret == num_pkt);
- ret = packet_alloc_multi(pool[1], packet_len,
- &packet[num_pkt], num_pkt + 1);
- CU_ASSERT_FATAL(ret == num_pkt);
-
- for (i = 0; i < 2 * num_pkt; ++i) {
- CU_ASSERT(odp_packet_len(packet[i]) == packet_len);
- CU_ASSERT(odp_event_type(odp_packet_to_event(packet[i])) ==
- ODP_EVENT_PACKET);
- CU_ASSERT(odp_packet_to_u64(packet[i]) !=
- odp_packet_to_u64(ODP_PACKET_INVALID));
- }
-
- /* Pools should have no more packets */
- ret = odp_packet_alloc_multi(pool[0], packet_len, inval_pkt, num_pkt);
- CU_ASSERT(ret == 0);
- ret = odp_packet_alloc_multi(pool[1], packet_len, inval_pkt, num_pkt);
- CU_ASSERT(ret == 0);
-
- /* Free all packets from all pools at once */
- odp_packet_free_multi(packet, 2 * num_pkt);
-
- /* Check that all the packets were returned back to their pools */
- ret = packet_alloc_multi(pool[0], packet_len, &packet[0], num_pkt);
- CU_ASSERT(ret);
- ret = packet_alloc_multi(pool[1], packet_len,
- &packet[num_pkt], num_pkt);
- CU_ASSERT(ret);
-
- for (i = 0; i < 2 * num_pkt; ++i) {
- CU_ASSERT_FATAL(packet[i] != ODP_PACKET_INVALID);
- CU_ASSERT(odp_packet_len(packet[i]) == packet_len);
- }
- odp_packet_free_multi(packet, 2 * num_pkt);
- CU_ASSERT(odp_pool_destroy(pool[0]) == 0);
- CU_ASSERT(odp_pool_destroy(pool[1]) == 0);
-}
-
-void packet_test_alloc_segmented(void)
-{
- const int num = 5;
- odp_packet_t pkts[num];
- odp_packet_t pkt;
- uint32_t max_len;
- odp_pool_t pool;
- odp_pool_param_t params;
- odp_pool_capability_t capa;
- int ret, i, num_alloc;
-
- CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
- if (capa.pkt.max_segs_per_pkt == 0)
- capa.pkt.max_segs_per_pkt = 10;
-
- if (capa.pkt.max_len)
- max_len = capa.pkt.max_len;
- else
- max_len = capa.pkt.min_seg_len * capa.pkt.max_segs_per_pkt;
-
- odp_pool_param_init(&params);
-
- params.type = ODP_POOL_PACKET;
- params.pkt.seg_len = capa.pkt.min_seg_len;
- params.pkt.len = max_len;
-
- /* Ensure that 'num' segmented packets can be allocated */
- params.pkt.num = num * capa.pkt.max_segs_per_pkt;
-
- pool = odp_pool_create("pool_alloc_segmented", &params);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- /* Less than max len allocs */
- pkt = odp_packet_alloc(pool, max_len / 2);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(odp_packet_len(pkt) == max_len / 2);
-
- odp_packet_free(pkt);
-
- num_alloc = 0;
- for (i = 0; i < num; i++) {
- ret = odp_packet_alloc_multi(pool, max_len / 2,
- &pkts[num_alloc], num - num_alloc);
- CU_ASSERT_FATAL(ret >= 0);
- num_alloc += ret;
- if (num_alloc >= num)
- break;
- }
-
- CU_ASSERT(num_alloc == num);
-
- for (i = 0; i < num_alloc; i++)
- CU_ASSERT(odp_packet_len(pkts[i]) == max_len / 2);
-
- odp_packet_free_multi(pkts, num_alloc);
-
- /* Max len allocs */
- pkt = odp_packet_alloc(pool, max_len);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(odp_packet_len(pkt) == max_len);
-
- odp_packet_free(pkt);
-
- num_alloc = 0;
- for (i = 0; i < num; i++) {
- ret = odp_packet_alloc_multi(pool, max_len,
- &pkts[num_alloc], num - num_alloc);
- CU_ASSERT_FATAL(ret >= 0);
- num_alloc += ret;
- if (num_alloc >= num)
- break;
- }
-
- CU_ASSERT(num_alloc == num);
-
- for (i = 0; i < num_alloc; i++)
- CU_ASSERT(odp_packet_len(pkts[i]) == max_len);
-
- odp_packet_free_multi(pkts, num_alloc);
-
- CU_ASSERT(odp_pool_destroy(pool) == 0);
-}
-
-void packet_test_event_conversion(void)
-{
- odp_packet_t pkt = test_packet;
- odp_packet_t tmp_pkt;
- odp_event_t ev;
-
- ev = odp_packet_to_event(pkt);
- CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
- CU_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
-
- tmp_pkt = odp_packet_from_event(ev);
- CU_ASSERT_FATAL(tmp_pkt != ODP_PACKET_INVALID);
- CU_ASSERT(tmp_pkt == pkt);
- packet_compare_data(tmp_pkt, pkt);
-}
-
-void packet_test_basic_metadata(void)
-{
- odp_packet_t pkt = test_packet;
- odp_time_t ts;
-
- CU_ASSERT_PTR_NOT_NULL(odp_packet_head(pkt));
- CU_ASSERT_PTR_NOT_NULL(odp_packet_data(pkt));
-
- CU_ASSERT(odp_packet_pool(pkt) != ODP_POOL_INVALID);
- /* Packet was allocated by application so shouldn't have valid pktio. */
- CU_ASSERT(odp_packet_input(pkt) == ODP_PKTIO_INVALID);
- CU_ASSERT(odp_packet_input_index(pkt) < 0);
-
- odp_packet_flow_hash_set(pkt, UINT32_MAX);
- CU_ASSERT(odp_packet_has_flow_hash(pkt));
- CU_ASSERT(odp_packet_flow_hash(pkt) == UINT32_MAX);
- odp_packet_has_flow_hash_clr(pkt);
- CU_ASSERT(!odp_packet_has_flow_hash(pkt));
-
- ts = odp_time_global();
- odp_packet_ts_set(pkt, ts);
- CU_ASSERT_FATAL(odp_packet_has_ts(pkt));
- CU_ASSERT(!odp_time_cmp(ts, odp_packet_ts(pkt)));
- odp_packet_has_ts_clr(pkt);
- CU_ASSERT(!odp_packet_has_ts(pkt));
-}
-
-void packet_test_length(void)
-{
- odp_packet_t pkt = test_packet;
- uint32_t buf_len, headroom, tailroom;
- odp_pool_capability_t capa;
-
- CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
-
- buf_len = odp_packet_buf_len(pkt);
- headroom = odp_packet_headroom(pkt);
- tailroom = odp_packet_tailroom(pkt);
-
- CU_ASSERT(odp_packet_len(pkt) == packet_len);
- CU_ASSERT(headroom >= capa.pkt.min_headroom);
- CU_ASSERT(tailroom >= capa.pkt.min_tailroom);
-
- CU_ASSERT(buf_len >= packet_len + headroom + tailroom);
-}
-
-void packet_test_prefetch(void)
-{
- odp_packet_prefetch(test_packet, 0, odp_packet_len(test_packet));
- CU_PASS();
-}
-
-void packet_test_debug(void)
-{
- CU_ASSERT(odp_packet_is_valid(test_packet) == 1);
- odp_packet_print(test_packet);
-}
-
-void packet_test_context(void)
-{
- odp_packet_t pkt = test_packet;
- char ptr_test_value = 2;
- void *prev_ptr;
- struct udata_struct *udat;
-
- prev_ptr = odp_packet_user_ptr(pkt);
- odp_packet_user_ptr_set(pkt, &ptr_test_value);
- CU_ASSERT(odp_packet_user_ptr(pkt) == &ptr_test_value);
- odp_packet_user_ptr_set(pkt, prev_ptr);
-
- udat = odp_packet_user_area(pkt);
- CU_ASSERT_PTR_NOT_NULL(udat);
- CU_ASSERT(odp_packet_user_area_size(pkt) ==
- sizeof(struct udata_struct));
- CU_ASSERT(memcmp(udat, &test_packet_udata, sizeof(struct udata_struct))
- == 0);
-
- odp_packet_reset(pkt, packet_len);
-}
-
-void packet_test_layer_offsets(void)
-{
- odp_packet_t pkt = test_packet;
- uint8_t *l2_addr, *l3_addr, *l4_addr;
- uint32_t seg_len;
- const uint32_t l2_off = 2;
- const uint32_t l3_off = l2_off + 14;
- const uint32_t l4_off = l3_off + 14;
- int ret;
-
- /* Set offsets to the same value */
- ret = odp_packet_l2_offset_set(pkt, l2_off);
- CU_ASSERT(ret == 0);
- ret = odp_packet_l3_offset_set(pkt, l2_off);
- CU_ASSERT(ret == 0);
- ret = odp_packet_l4_offset_set(pkt, l2_off);
- CU_ASSERT(ret == 0);
-
- /* Addresses should be the same */
- l2_addr = odp_packet_l2_ptr(pkt, &seg_len);
- CU_ASSERT(seg_len != 0);
- l3_addr = odp_packet_l3_ptr(pkt, &seg_len);
- CU_ASSERT(seg_len != 0);
- l4_addr = odp_packet_l4_ptr(pkt, &seg_len);
- CU_ASSERT(seg_len != 0);
- CU_ASSERT_PTR_NOT_NULL(l2_addr);
- CU_ASSERT(l2_addr == l3_addr);
- CU_ASSERT(l2_addr == l4_addr);
-
- /* Set offsets to the different values */
- odp_packet_l2_offset_set(pkt, l2_off);
- CU_ASSERT(odp_packet_l2_offset(pkt) == l2_off);
- odp_packet_l3_offset_set(pkt, l3_off);
- CU_ASSERT(odp_packet_l3_offset(pkt) == l3_off);
- odp_packet_l4_offset_set(pkt, l4_off);
- CU_ASSERT(odp_packet_l4_offset(pkt) == l4_off);
-
- /* Addresses should not be the same */
- l2_addr = odp_packet_l2_ptr(pkt, NULL);
- CU_ASSERT_PTR_NOT_NULL(l2_addr);
- l3_addr = odp_packet_l3_ptr(pkt, NULL);
- CU_ASSERT_PTR_NOT_NULL(l3_addr);
- l4_addr = odp_packet_l4_ptr(pkt, NULL);
- CU_ASSERT_PTR_NOT_NULL(l4_addr);
-
- CU_ASSERT(l2_addr != l3_addr);
- CU_ASSERT(l2_addr != l4_addr);
- CU_ASSERT(l3_addr != l4_addr);
-}
-
-static void _verify_headroom_shift(odp_packet_t *pkt,
- int shift)
-{
- uint32_t room = odp_packet_headroom(*pkt);
- uint32_t seg_data_len = odp_packet_seg_len(*pkt);
- uint32_t pkt_data_len = odp_packet_len(*pkt);
- void *data;
- char *data_orig = odp_packet_data(*pkt);
- char *head_orig = odp_packet_head(*pkt);
- uint32_t seg_len;
- int extended, rc;
-
- if (shift >= 0) {
- if ((uint32_t)abs(shift) <= room) {
- data = odp_packet_push_head(*pkt, shift);
- extended = 0;
- } else {
- rc = odp_packet_extend_head(pkt, shift,
- &data, &seg_len);
- extended = 1;
- }
- } else {
- if ((uint32_t)abs(shift) <= seg_data_len) {
- data = odp_packet_pull_head(*pkt, -shift);
- extended = 0;
- } else {
- rc = odp_packet_trunc_head(pkt, -shift,
- &data, &seg_len);
- extended = 1;
- }
- }
-
- CU_ASSERT_PTR_NOT_NULL(data);
- if (extended) {
- CU_ASSERT(rc >= 0);
- if (shift >= 0) {
- CU_ASSERT(odp_packet_seg_len(*pkt) == shift - room);
- } else {
- CU_ASSERT(odp_packet_headroom(*pkt) >=
- (uint32_t)abs(shift) - seg_data_len);
- }
- CU_ASSERT(odp_packet_head(*pkt) != head_orig);
- } else {
- CU_ASSERT(odp_packet_headroom(*pkt) == room - shift);
- CU_ASSERT(odp_packet_seg_len(*pkt) == seg_data_len + shift);
- CU_ASSERT(data == data_orig - shift);
- CU_ASSERT(odp_packet_head(*pkt) == head_orig);
- }
-
- CU_ASSERT(odp_packet_len(*pkt) == pkt_data_len + shift);
- CU_ASSERT(odp_packet_data(*pkt) == data);
-}
-
-void packet_test_headroom(void)
-{
- odp_packet_t pkt = odp_packet_copy(test_packet,
- odp_packet_pool(test_packet));
- uint32_t room;
- uint32_t seg_data_len;
- uint32_t push_val, pull_val;
- odp_pool_capability_t capa;
-
- CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
-
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- room = odp_packet_headroom(pkt);
-
- CU_ASSERT(room >= capa.pkt.min_headroom);
-
- seg_data_len = odp_packet_seg_len(pkt);
- CU_ASSERT(seg_data_len >= 1);
-
- pull_val = seg_data_len / 2;
- push_val = room;
-
- _verify_headroom_shift(&pkt, -pull_val);
- _verify_headroom_shift(&pkt, push_val + pull_val);
- _verify_headroom_shift(&pkt, -push_val);
- _verify_headroom_shift(&pkt, 0);
-
- if (segmentation_supported) {
- push_val = room * 2;
- _verify_headroom_shift(&pkt, push_val);
- _verify_headroom_shift(&pkt, 0);
- _verify_headroom_shift(&pkt, -push_val);
- }
-
- odp_packet_free(pkt);
-}
-
-static void _verify_tailroom_shift(odp_packet_t *pkt,
- int shift)
-{
- odp_packet_seg_t seg;
- uint32_t room;
- uint32_t seg_data_len, pkt_data_len, seg_len;
- void *tail;
- char *tail_orig;
- int extended, rc;
-
- room = odp_packet_tailroom(*pkt);
- pkt_data_len = odp_packet_len(*pkt);
- tail_orig = odp_packet_tail(*pkt);
-
- seg = odp_packet_last_seg(*pkt);
- CU_ASSERT(seg != ODP_PACKET_SEG_INVALID);
- seg_data_len = odp_packet_seg_data_len(*pkt, seg);
-
- if (shift >= 0) {
- uint32_t l2_off, l3_off, l4_off;
-
- l2_off = odp_packet_l2_offset(*pkt);
- l3_off = odp_packet_l3_offset(*pkt);
- l4_off = odp_packet_l4_offset(*pkt);
-
- if ((uint32_t)abs(shift) <= room) {
- tail = odp_packet_push_tail(*pkt, shift);
- extended = 0;
- } else {
- rc = odp_packet_extend_tail(pkt, shift,
- &tail, &seg_len);
- extended = 1;
- }
-
- CU_ASSERT(l2_off == odp_packet_l2_offset(*pkt));
- CU_ASSERT(l3_off == odp_packet_l3_offset(*pkt));
- CU_ASSERT(l4_off == odp_packet_l4_offset(*pkt));
- } else {
- if ((uint32_t)abs(shift) <= seg_data_len) {
- tail = odp_packet_pull_tail(*pkt, -shift);
- extended = 0;
- } else {
- rc = odp_packet_trunc_tail(pkt, -shift,
- &tail, &seg_len);
- extended = 1;
- }
- }
-
- CU_ASSERT_PTR_NOT_NULL(tail);
- if (extended) {
- CU_ASSERT(rc >= 0);
- CU_ASSERT(odp_packet_last_seg(*pkt) != seg);
- seg = odp_packet_last_seg(*pkt);
- if (shift > 0) {
- CU_ASSERT(odp_packet_seg_data_len(*pkt, seg) ==
- shift - room);
- } else {
- CU_ASSERT(odp_packet_tailroom(*pkt) >=
- (uint32_t)abs(shift) - seg_data_len);
- CU_ASSERT(seg_len == odp_packet_tailroom(*pkt));
- }
- } else {
- CU_ASSERT(odp_packet_seg_data_len(*pkt, seg) ==
- seg_data_len + shift);
- CU_ASSERT(odp_packet_tailroom(*pkt) == room - shift);
- if (room == 0 || (room - shift) == 0)
- return;
- if (shift >= 0) {
- CU_ASSERT(odp_packet_tail(*pkt) ==
- tail_orig + shift);
- } else {
- CU_ASSERT(tail == tail_orig + shift);
- }
- }
-
- CU_ASSERT(odp_packet_len(*pkt) == pkt_data_len + shift);
- if (shift >= 0) {
- CU_ASSERT(tail == tail_orig);
- } else {
- CU_ASSERT(odp_packet_tail(*pkt) == tail);
- }
-}
-
-void packet_test_tailroom(void)
-{
- odp_packet_t pkt = odp_packet_copy(test_packet,
- odp_packet_pool(test_packet));
- odp_packet_seg_t segment;
- uint32_t room;
- uint32_t seg_data_len;
- uint32_t push_val, pull_val;
- odp_pool_capability_t capa;
-
- CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
-
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
-
- segment = odp_packet_last_seg(pkt);
- CU_ASSERT(segment != ODP_PACKET_SEG_INVALID);
- room = odp_packet_tailroom(pkt);
- CU_ASSERT(room >= capa.pkt.min_tailroom);
-
- seg_data_len = odp_packet_seg_data_len(pkt, segment);
- CU_ASSERT(seg_data_len >= 1);
-
- pull_val = seg_data_len / 2;
- /* Leave one byte in a tailroom for odp_packet_tail() to succeed */
- push_val = (room > 0) ? room - 1 : room;
-
- _verify_tailroom_shift(&pkt, -pull_val);
- _verify_tailroom_shift(&pkt, push_val + pull_val);
- _verify_tailroom_shift(&pkt, -push_val);
- _verify_tailroom_shift(&pkt, 0);
-
- if (segmentation_supported) {
- push_val = room + 100;
- _verify_tailroom_shift(&pkt, push_val);
- _verify_tailroom_shift(&pkt, 0);
- _verify_tailroom_shift(&pkt, -push_val);
- }
-
- odp_packet_free(pkt);
-}
-
-void packet_test_segments(void)
-{
- int num_segs, seg_index;
- uint32_t data_len;
- odp_packet_seg_t seg;
- odp_packet_t pkt = test_packet;
- odp_packet_t seg_pkt = segmented_test_packet;
-
- CU_ASSERT(odp_packet_is_valid(pkt) == 1);
-
- num_segs = odp_packet_num_segs(pkt);
- CU_ASSERT(num_segs != 0);
-
- if (odp_packet_is_segmented(pkt)) {
- CU_ASSERT(num_segs > 1);
- } else {
- CU_ASSERT(num_segs == 1);
- }
-
- CU_ASSERT(odp_packet_is_segmented(pkt) == 0);
- if (segmentation_supported)
- CU_ASSERT(odp_packet_is_segmented(seg_pkt) == 1);
-
- seg = odp_packet_first_seg(pkt);
- data_len = 0;
- seg_index = 0;
- while (seg_index < num_segs && seg != ODP_PACKET_SEG_INVALID) {
- uint32_t seg_data_len;
- void *seg_data;
-
- seg_data_len = odp_packet_seg_data_len(pkt, seg);
- seg_data = odp_packet_seg_data(pkt, seg);
-
- CU_ASSERT(seg_data_len > 0);
- CU_ASSERT_PTR_NOT_NULL(seg_data);
- CU_ASSERT(odp_packet_seg_to_u64(seg) !=
- odp_packet_seg_to_u64(ODP_PACKET_SEG_INVALID));
- CU_ASSERT(odp_memcmp(seg_data, seg_data, seg_data_len) == 0);
-
- data_len += seg_data_len;
-
- seg_index++;
- seg = odp_packet_next_seg(pkt, seg);
- }
-
- CU_ASSERT(seg_index == num_segs);
- CU_ASSERT(data_len <= odp_packet_buf_len(pkt));
- CU_ASSERT(data_len == odp_packet_len(pkt));
-
- if (seg_index == num_segs)
- CU_ASSERT(seg == ODP_PACKET_SEG_INVALID);
-
- seg = odp_packet_first_seg(seg_pkt);
- num_segs = odp_packet_num_segs(seg_pkt);
-
- data_len = 0;
- seg_index = 0;
-
- while (seg_index < num_segs && seg != ODP_PACKET_SEG_INVALID) {
- uint32_t seg_data_len;
- void *seg_data;
-
- seg_data_len = odp_packet_seg_data_len(seg_pkt, seg);
- seg_data = odp_packet_seg_data(seg_pkt, seg);
-
- CU_ASSERT(seg_data_len > 0);
- CU_ASSERT(seg_data != NULL);
- CU_ASSERT(odp_packet_seg_to_u64(seg) !=
- odp_packet_seg_to_u64(ODP_PACKET_SEG_INVALID));
- CU_ASSERT(odp_memcmp(seg_data, seg_data, seg_data_len) == 0);
-
- data_len += seg_data_len;
-
- seg_index++;
- seg = odp_packet_next_seg(seg_pkt, seg);
- }
-
- CU_ASSERT(seg_index == num_segs);
- CU_ASSERT(data_len <= odp_packet_buf_len(seg_pkt));
- CU_ASSERT(data_len == odp_packet_len(seg_pkt));
-
- if (seg_index == num_segs)
- CU_ASSERT(seg == ODP_PACKET_SEG_INVALID);
-}
-
-void packet_test_segment_last(void)
-{
- odp_packet_t pkt = test_packet;
- odp_packet_seg_t seg;
-
- seg = odp_packet_last_seg(pkt);
- CU_ASSERT_FATAL(seg != ODP_PACKET_SEG_INVALID);
-
- seg = odp_packet_next_seg(pkt, seg);
- CU_ASSERT(seg == ODP_PACKET_SEG_INVALID);
-}
-
-#define TEST_INFLAG(packet, flag) \
-do { \
- odp_packet_has_##flag##_set(packet, 0); \
- CU_ASSERT(odp_packet_has_##flag(packet) == 0); \
- odp_packet_has_##flag##_set(packet, 1); \
- CU_ASSERT(odp_packet_has_##flag(packet) != 0); \
-} while (0)
-
-void packet_test_in_flags(void)
-{
- odp_packet_t pkt = test_packet;
-
- TEST_INFLAG(pkt, l2);
- TEST_INFLAG(pkt, l3);
- TEST_INFLAG(pkt, l4);
- TEST_INFLAG(pkt, eth);
- TEST_INFLAG(pkt, eth_bcast);
- TEST_INFLAG(pkt, eth_mcast);
- TEST_INFLAG(pkt, jumbo);
- TEST_INFLAG(pkt, vlan);
- TEST_INFLAG(pkt, vlan_qinq);
- TEST_INFLAG(pkt, arp);
- TEST_INFLAG(pkt, ipv4);
- TEST_INFLAG(pkt, ipv6);
- TEST_INFLAG(pkt, ip_bcast);
- TEST_INFLAG(pkt, ip_mcast);
- TEST_INFLAG(pkt, ipfrag);
- TEST_INFLAG(pkt, ipopt);
- TEST_INFLAG(pkt, ipsec);
- TEST_INFLAG(pkt, udp);
- TEST_INFLAG(pkt, tcp);
- TEST_INFLAG(pkt, sctp);
- TEST_INFLAG(pkt, icmp);
-}
-
-void packet_test_error_flags(void)
-{
- odp_packet_t pkt = test_packet;
- int err;
-
- /**
- * The packet have not been classified so it doesn't have error flags
- * properly set. Just check that functions return one of allowed values.
- */
- err = odp_packet_has_error(pkt);
- CU_ASSERT(err == 0 || err == 1);
-
- err = odp_packet_has_l2_error(pkt);
- CU_ASSERT(err == 0 || err == 1);
-
- err = odp_packet_has_l3_error(pkt);
- CU_ASSERT(err == 0 || err == 1);
-
- err = odp_packet_has_l4_error(pkt);
- CU_ASSERT(err == 0 || err == 1);
-}
-
-struct packet_metadata {
- uint32_t l2_off;
- uint32_t l3_off;
- uint32_t l4_off;
- void *usr_ptr;
- uint64_t usr_u64;
-};
-
-void packet_test_add_rem_data(void)
-{
- odp_packet_t pkt, new_pkt;
- uint32_t pkt_len, offset, add_len;
- void *usr_ptr;
- struct udata_struct *udat, *new_udat;
- int ret;
- odp_pool_capability_t capa;
- uint32_t min_seg_len;
-
- CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
-
- min_seg_len = capa.pkt.min_seg_len;
-
- pkt = odp_packet_alloc(packet_pool, packet_len);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
-
- pkt_len = odp_packet_len(pkt);
- usr_ptr = odp_packet_user_ptr(pkt);
- udat = odp_packet_user_area(pkt);
- CU_ASSERT(odp_packet_user_area_size(pkt) ==
- sizeof(struct udata_struct));
- memcpy(udat, &test_packet_udata, sizeof(struct udata_struct));
-
- offset = pkt_len / 2;
-
- if (segmentation_supported) {
- /* Insert one more packet length in the middle of a packet */
- add_len = min_seg_len;
- } else {
- /* Add diff between largest and smaller packets
- * which is at least tailroom */
- add_len = segmented_packet_len - packet_len;
- }
-
- new_pkt = pkt;
- ret = odp_packet_add_data(&new_pkt, offset, add_len);
- CU_ASSERT(ret >= 0);
- if (ret < 0)
- goto free_packet;
- CU_ASSERT(odp_packet_len(new_pkt) == pkt_len + add_len);
- /* Verify that user metadata is preserved */
- CU_ASSERT(odp_packet_user_ptr(new_pkt) == usr_ptr);
-
- /* Verify that user metadata has been preserved */
- new_udat = odp_packet_user_area(new_pkt);
- CU_ASSERT_PTR_NOT_NULL(new_udat);
- CU_ASSERT(odp_packet_user_area_size(new_pkt) ==
- sizeof(struct udata_struct));
- CU_ASSERT(memcmp(new_udat, &test_packet_udata,
- sizeof(struct udata_struct)) == 0);
-
- pkt = new_pkt;
-
- pkt_len = odp_packet_len(pkt);
- usr_ptr = odp_packet_user_ptr(pkt);
-
- ret = odp_packet_rem_data(&new_pkt, offset, add_len);
- CU_ASSERT(ret >= 0);
- if (ret < 0)
- goto free_packet;
- CU_ASSERT(odp_packet_len(new_pkt) == pkt_len - add_len);
- CU_ASSERT(odp_packet_user_ptr(new_pkt) == usr_ptr);
-
- /* Verify that user metadata has been preserved */
- new_udat = odp_packet_user_area(new_pkt);
- CU_ASSERT_PTR_NOT_NULL(new_udat);
- CU_ASSERT(odp_packet_user_area_size(new_pkt) ==
- sizeof(struct udata_struct));
- CU_ASSERT(memcmp(new_udat, &test_packet_udata,
- sizeof(struct udata_struct)) == 0);
-
- pkt = new_pkt;
-
-free_packet:
- odp_packet_free(pkt);
-}
-
-#define COMPARE_HAS_INFLAG(p1, p2, flag) \
- CU_ASSERT(odp_packet_has_##flag(p1) == odp_packet_has_##flag(p2))
-
-#define COMPARE_INFLAG(p1, p2, flag) \
- CU_ASSERT(odp_packet_##flag(p1) == odp_packet_##flag(p2))
-
-static void _packet_compare_inflags(odp_packet_t pkt1, odp_packet_t pkt2)
-{
- COMPARE_HAS_INFLAG(pkt1, pkt2, l2);
- COMPARE_HAS_INFLAG(pkt1, pkt2, l3);
- COMPARE_HAS_INFLAG(pkt1, pkt2, l4);
- COMPARE_HAS_INFLAG(pkt1, pkt2, eth);
- COMPARE_HAS_INFLAG(pkt1, pkt2, eth_bcast);
- COMPARE_HAS_INFLAG(pkt1, pkt2, eth_mcast);
- COMPARE_HAS_INFLAG(pkt1, pkt2, jumbo);
- COMPARE_HAS_INFLAG(pkt1, pkt2, vlan);
- COMPARE_HAS_INFLAG(pkt1, pkt2, vlan_qinq);
- COMPARE_HAS_INFLAG(pkt1, pkt2, arp);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ipv4);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ipv6);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ip_bcast);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ip_mcast);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ipfrag);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ipopt);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ipsec);
- COMPARE_HAS_INFLAG(pkt1, pkt2, udp);
- COMPARE_HAS_INFLAG(pkt1, pkt2, tcp);
- COMPARE_HAS_INFLAG(pkt1, pkt2, sctp);
- COMPARE_HAS_INFLAG(pkt1, pkt2, icmp);
- COMPARE_HAS_INFLAG(pkt1, pkt2, flow_hash);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ts);
-
- COMPARE_INFLAG(pkt1, pkt2, color);
- COMPARE_INFLAG(pkt1, pkt2, drop_eligible);
- COMPARE_INFLAG(pkt1, pkt2, shaper_len_adjust);
-}
-
-static void _packet_compare_udata(odp_packet_t pkt1, odp_packet_t pkt2)
-{
- uint32_t usize1 = odp_packet_user_area_size(pkt1);
- uint32_t usize2 = odp_packet_user_area_size(pkt2);
-
- void *uaddr1 = odp_packet_user_area(pkt1);
- void *uaddr2 = odp_packet_user_area(pkt2);
-
- uint32_t cmplen = usize1 <= usize2 ? usize1 : usize2;
-
- if (cmplen)
- CU_ASSERT(!memcmp(uaddr1, uaddr2, cmplen));
-}
-
-static void _packet_compare_offset(odp_packet_t pkt1, uint32_t off1,
- odp_packet_t pkt2, uint32_t off2,
- uint32_t len, int line)
-{
- uint32_t seglen1, seglen2, cmplen;
- int ret;
-
- if (off1 + len > odp_packet_len(pkt1) ||
- off2 + len > odp_packet_len(pkt2))
- return;
-
- while (len > 0) {
- void *pkt1map = odp_packet_offset(pkt1, off1, &seglen1, NULL);
- void *pkt2map = odp_packet_offset(pkt2, off2, &seglen2, NULL);
-
- CU_ASSERT_PTR_NOT_NULL_FATAL(pkt1map);
- CU_ASSERT_PTR_NOT_NULL_FATAL(pkt2map);
- cmplen = seglen1 < seglen2 ? seglen1 : seglen2;
- if (len < cmplen)
- cmplen = len;
-
- ret = memcmp(pkt1map, pkt2map, cmplen);
-
- if (ret) {
- printf("\ncompare_offset failed: line %i, off1 %"
- PRIu32 ", off2 %" PRIu32 "\n", line, off1, off2);
- }
-
- CU_ASSERT(ret == 0);
-
- off1 += cmplen;
- off2 += cmplen;
- len -= cmplen;
- }
-}
-
-void packet_test_copy(void)
-{
- odp_packet_t pkt;
- odp_packet_t pkt_copy, pkt_part;
- odp_pool_t pool;
- uint32_t i, plen, seg_len, src_offset, dst_offset;
- void *pkt_data;
-
- pkt = odp_packet_copy(test_packet, packet_pool_no_uarea);
- CU_ASSERT(pkt == ODP_PACKET_INVALID);
- if (pkt != ODP_PACKET_INVALID)
- odp_packet_free(pkt);
-
- pkt = odp_packet_copy(test_packet, odp_packet_pool(test_packet));
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- packet_compare_data(pkt, test_packet);
- pool = odp_packet_pool(pkt);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
- pkt_copy = odp_packet_copy(pkt, pool);
- CU_ASSERT_FATAL(pkt_copy != ODP_PACKET_INVALID);
-
- CU_ASSERT(pkt != pkt_copy);
- CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(pkt_copy));
- CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt_copy));
-
- _packet_compare_inflags(pkt, pkt_copy);
- packet_compare_data(pkt, pkt_copy);
- CU_ASSERT(odp_packet_user_area_size(pkt) ==
- odp_packet_user_area_size(test_packet));
- _packet_compare_udata(pkt, pkt_copy);
- odp_packet_free(pkt_copy);
- odp_packet_free(pkt);
-
- pkt = odp_packet_copy(test_packet, packet_pool_double_uarea);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- packet_compare_data(pkt, test_packet);
- pool = odp_packet_pool(pkt);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
- pkt_copy = odp_packet_copy(pkt, pool);
- CU_ASSERT_FATAL(pkt_copy != ODP_PACKET_INVALID);
-
- CU_ASSERT(pkt != pkt_copy);
- CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(pkt_copy));
- CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt_copy));
-
- _packet_compare_inflags(pkt, pkt_copy);
- packet_compare_data(pkt, pkt_copy);
- CU_ASSERT(odp_packet_user_area_size(pkt) ==
- 2 * odp_packet_user_area_size(test_packet));
- _packet_compare_udata(pkt, pkt_copy);
- _packet_compare_udata(pkt, test_packet);
- odp_packet_free(pkt_copy);
-
- /* Now test copy_part */
- pkt_part = odp_packet_copy_part(pkt, 0, odp_packet_len(pkt) + 1, pool);
- CU_ASSERT(pkt_part == ODP_PACKET_INVALID);
- pkt_part = odp_packet_copy_part(pkt, odp_packet_len(pkt), 1, pool);
- CU_ASSERT(pkt_part == ODP_PACKET_INVALID);
-
- pkt_part = odp_packet_copy_part(pkt, 0, odp_packet_len(pkt), pool);
- CU_ASSERT_FATAL(pkt_part != ODP_PACKET_INVALID);
- CU_ASSERT(pkt != pkt_part);
- CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(pkt_part));
- CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt_part));
-
- packet_compare_data(pkt, pkt_part);
- odp_packet_free(pkt_part);
-
- plen = odp_packet_len(pkt);
- for (i = 0; i < plen / 2; i += 5) {
- pkt_part = odp_packet_copy_part(pkt, i, plen / 4, pool);
- CU_ASSERT_FATAL(pkt_part != ODP_PACKET_INVALID);
- CU_ASSERT(odp_packet_len(pkt_part) == plen / 4);
- packet_compare_offset(pkt_part, 0, pkt, i, plen / 4);
- odp_packet_free(pkt_part);
- }
-
- /* Test copy and move apis */
- CU_ASSERT(odp_packet_copy_data(pkt, 0, plen - plen / 8, plen / 8) == 0);
- packet_compare_offset(pkt, 0, pkt, plen - plen / 8, plen / 8);
- packet_compare_offset(pkt, 0, test_packet, plen - plen / 8, plen / 8);
-
- /* Test segment crossing if we support segments */
- pkt_data = odp_packet_offset(pkt, 0, &seg_len, NULL);
- CU_ASSERT(pkt_data != NULL);
-
- if (seg_len < plen) {
- src_offset = seg_len - 15;
- dst_offset = seg_len - 5;
- } else {
- src_offset = seg_len - 40;
- dst_offset = seg_len - 25;
- }
-
- pkt_part = odp_packet_copy_part(pkt, src_offset, 20, pool);
- CU_ASSERT(odp_packet_move_data(pkt, dst_offset, src_offset, 20) == 0);
- packet_compare_offset(pkt, dst_offset, pkt_part, 0, 20);
-
- odp_packet_free(pkt_part);
- odp_packet_free(pkt);
-}
-
-void packet_test_copydata(void)
-{
- odp_packet_t pkt = test_packet;
- uint32_t pkt_len = odp_packet_len(pkt);
- uint8_t *data_buf;
- uint32_t i;
- int correct_memory;
-
- CU_ASSERT_FATAL(pkt_len > 0);
-
- data_buf = malloc(pkt_len);
- CU_ASSERT_PTR_NOT_NULL_FATAL(data_buf);
-
- for (i = 0; i < pkt_len; i++)
- data_buf[i] = (uint8_t)i;
-
- CU_ASSERT(!odp_packet_copy_from_mem(pkt, 0, pkt_len, data_buf));
- memset(data_buf, 0, pkt_len);
- CU_ASSERT(!odp_packet_copy_to_mem(pkt, 0, pkt_len, data_buf));
-
- correct_memory = 1;
- for (i = 0; i < pkt_len; i++)
- if (data_buf[i] != (uint8_t)i) {
- correct_memory = 0;
- break;
- }
- CU_ASSERT(correct_memory);
-
- free(data_buf);
-
- pkt = odp_packet_alloc(odp_packet_pool(test_packet), pkt_len / 2);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
-
- CU_ASSERT(odp_packet_copy_from_pkt(pkt, 0, test_packet, 0,
- pkt_len) < 0);
- CU_ASSERT(odp_packet_copy_from_pkt(pkt, pkt_len, test_packet, 0,
- 1) < 0);
-
- for (i = 0; i < pkt_len / 2; i++) {
- CU_ASSERT(odp_packet_copy_from_pkt(pkt, i, test_packet, i,
- 1) == 0);
- }
-
- packet_compare_offset(pkt, 0, test_packet, 0, pkt_len / 2);
- odp_packet_free(pkt);
-
- pkt = odp_packet_alloc(odp_packet_pool(segmented_test_packet),
- odp_packet_len(segmented_test_packet) / 2);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
-
- CU_ASSERT(odp_packet_copy_from_pkt(pkt, 0, segmented_test_packet,
- odp_packet_len(pkt) / 4,
- odp_packet_len(pkt)) == 0);
- packet_compare_offset(pkt, 0, segmented_test_packet,
- odp_packet_len(pkt) / 4,
- odp_packet_len(pkt));
- odp_packet_free(pkt);
-}
-
-void packet_test_concatsplit(void)
-{
- odp_packet_t pkt, pkt2;
- uint32_t pkt_len;
- odp_packet_t splits[4];
- odp_pool_t pool;
-
- pool = odp_packet_pool(test_packet);
- pkt = odp_packet_copy(test_packet, pool);
- pkt2 = odp_packet_copy(test_packet, pool);
- pkt_len = odp_packet_len(test_packet);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID);
- CU_ASSERT(pkt_len == odp_packet_len(pkt));
- CU_ASSERT(pkt_len == odp_packet_len(pkt2));
-
- CU_ASSERT(odp_packet_concat(&pkt, pkt2) >= 0);
- CU_ASSERT(odp_packet_len(pkt) == pkt_len * 2);
- packet_compare_offset(pkt, 0, pkt, pkt_len, pkt_len);
-
- CU_ASSERT(odp_packet_split(&pkt, pkt_len, &pkt2) == 0);
- CU_ASSERT(pkt != pkt2);
- CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(pkt2));
- CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt2));
- packet_compare_data(pkt, pkt2);
- packet_compare_data(pkt, test_packet);
-
- odp_packet_free(pkt);
- odp_packet_free(pkt2);
-
- pkt = odp_packet_copy(segmented_test_packet,
- odp_packet_pool(segmented_test_packet));
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- pkt_len = odp_packet_len(pkt);
-
- packet_compare_data(pkt, segmented_test_packet);
- CU_ASSERT(odp_packet_split(&pkt, pkt_len / 2, &splits[0]) == 0);
- CU_ASSERT(pkt != splits[0]);
- CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(splits[0]));
- CU_ASSERT(odp_packet_len(pkt) == pkt_len / 2);
- CU_ASSERT(odp_packet_len(pkt) + odp_packet_len(splits[0]) == pkt_len);
-
- packet_compare_offset(pkt, 0, segmented_test_packet, 0, pkt_len / 2);
- packet_compare_offset(splits[0], 0, segmented_test_packet,
- pkt_len / 2, odp_packet_len(splits[0]));
-
- CU_ASSERT(odp_packet_concat(&pkt, splits[0]) >= 0);
- packet_compare_offset(pkt, 0, segmented_test_packet, 0, pkt_len / 2);
- packet_compare_offset(pkt, pkt_len / 2, segmented_test_packet,
- pkt_len / 2, pkt_len / 2);
- packet_compare_offset(pkt, 0, segmented_test_packet, 0,
- pkt_len);
-
- CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(segmented_test_packet));
- packet_compare_data(pkt, segmented_test_packet);
-
- CU_ASSERT(odp_packet_split(&pkt, pkt_len / 2, &splits[0]) == 0);
- CU_ASSERT(odp_packet_split(&pkt, pkt_len / 4, &splits[1]) == 0);
- CU_ASSERT(odp_packet_split(&pkt, pkt_len / 8, &splits[2]) == 0);
-
- CU_ASSERT(odp_packet_len(splits[0]) + odp_packet_len(splits[1]) +
- odp_packet_len(splits[2]) + odp_packet_len(pkt) == pkt_len);
-
- CU_ASSERT(odp_packet_concat(&pkt, splits[2]) >= 0);
- CU_ASSERT(odp_packet_concat(&pkt, splits[1]) >= 0);
- CU_ASSERT(odp_packet_concat(&pkt, splits[0]) >= 0);
-
- CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(segmented_test_packet));
- packet_compare_data(pkt, segmented_test_packet);
-
- odp_packet_free(pkt);
-}
-
-void packet_test_concat_small(void)
-{
- odp_pool_capability_t capa;
- odp_pool_t pool;
- odp_pool_param_t param;
- odp_packet_t pkt, pkt2;
- int ret;
- uint8_t *data;
- uint32_t i;
- uint32_t len = 32000;
- uint8_t buf[len];
-
- CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
-
- if (capa.pkt.max_len && capa.pkt.max_len < len)
- len = capa.pkt.max_len;
-
- odp_pool_param_init(&param);
-
- param.type = ODP_POOL_PACKET;
- param.pkt.len = len;
- param.pkt.num = PACKET_POOL_NUM;
-
- pool = odp_pool_create("packet_pool_concat", &param);
- CU_ASSERT(packet_pool != ODP_POOL_INVALID);
-
- pkt = odp_packet_alloc(pool, 1);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
-
- data = odp_packet_data(pkt);
- *data = 0;
-
- for (i = 0; i < len - 1; i++) {
- pkt2 = odp_packet_alloc(pool, 1);
- CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID);
-
- data = odp_packet_data(pkt2);
- *data = i + 1;
-
- ret = odp_packet_concat(&pkt, pkt2);
- CU_ASSERT(ret >= 0);
-
- if (ret < 0) {
- odp_packet_free(pkt2);
- break;
- }
- }
-
- CU_ASSERT(odp_packet_len(pkt) == len);
-
- len = odp_packet_len(pkt);
-
- memset(buf, 0, len);
- CU_ASSERT(odp_packet_copy_to_mem(pkt, 0, len, buf) == 0);
-
- for (i = 0; i < len; i++)
- CU_ASSERT(buf[i] == (i % 256));
-
- odp_packet_free(pkt);
-
- CU_ASSERT(odp_pool_destroy(pool) == 0);
-}
-
-void packet_test_concat_extend_trunc(void)
-{
- odp_pool_capability_t capa;
- odp_pool_t pool;
- odp_pool_param_t param;
- odp_packet_t pkt, pkt2;
- int i, ret;
- uint32_t alloc_len, ext_len, trunc_len, cur_len;
- uint32_t len = 1900;
-
- CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
-
- if (capa.pkt.max_len && capa.pkt.max_len < len)
- len = capa.pkt.max_len;
-
- alloc_len = len / 8;
- ext_len = len / 4;
- trunc_len = len / 3;
-
- odp_pool_param_init(&param);
-
- param.type = ODP_POOL_PACKET;
- param.pkt.len = len;
- param.pkt.num = PACKET_POOL_NUM;
-
- pool = odp_pool_create("packet_pool_concat", &param);
- CU_ASSERT_FATAL(packet_pool != ODP_POOL_INVALID);
-
- pkt = odp_packet_alloc(pool, alloc_len);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
-
- cur_len = odp_packet_len(pkt);
-
- for (i = 0; i < 2; i++) {
- pkt2 = odp_packet_alloc(pool, alloc_len);
- CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID);
-
- ret = odp_packet_concat(&pkt, pkt2);
- CU_ASSERT(ret >= 0);
-
- if (ret < 0)
- odp_packet_free(pkt2);
-
- CU_ASSERT(odp_packet_len(pkt) == (cur_len + alloc_len));
- cur_len = odp_packet_len(pkt);
- CU_ASSERT(cur_len == odp_packet_unshared_len(pkt));
- }
-
- ret = odp_packet_extend_tail(&pkt, ext_len, NULL, NULL);
- CU_ASSERT(ret >= 0);
-
- CU_ASSERT(odp_packet_len(pkt) == (cur_len + ext_len));
- cur_len = odp_packet_len(pkt);
- CU_ASSERT(cur_len == odp_packet_unshared_len(pkt));
-
- ret = odp_packet_extend_head(&pkt, ext_len, NULL, NULL);
- CU_ASSERT(ret >= 0);
-
- CU_ASSERT(odp_packet_len(pkt) == (cur_len + ext_len));
- cur_len = odp_packet_len(pkt);
- CU_ASSERT(cur_len == odp_packet_unshared_len(pkt));
-
- pkt2 = odp_packet_alloc(pool, alloc_len);
- CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID);
-
- ret = odp_packet_concat(&pkt, pkt2);
- CU_ASSERT(ret >= 0);
-
- if (ret < 0)
- odp_packet_free(pkt2);
-
- CU_ASSERT(odp_packet_len(pkt) == (cur_len + alloc_len));
- cur_len = odp_packet_len(pkt);
- CU_ASSERT(cur_len == odp_packet_unshared_len(pkt));
-
- ret = odp_packet_trunc_head(&pkt, trunc_len, NULL, NULL);
- CU_ASSERT(ret >= 0);
-
- CU_ASSERT(odp_packet_len(pkt) == (cur_len - trunc_len));
- cur_len = odp_packet_len(pkt);
- CU_ASSERT(cur_len == odp_packet_unshared_len(pkt));
-
- ret = odp_packet_trunc_tail(&pkt, trunc_len, NULL, NULL);
- CU_ASSERT(ret >= 0);
-
- CU_ASSERT(odp_packet_len(pkt) == (cur_len - trunc_len));
- cur_len = odp_packet_len(pkt);
- CU_ASSERT(cur_len == odp_packet_unshared_len(pkt));
-
- odp_packet_free(pkt);
-
- CU_ASSERT(odp_pool_destroy(pool) == 0);
-}
-
-void packet_test_extend_small(void)
-{
- odp_pool_capability_t capa;
- odp_pool_t pool;
- odp_pool_param_t param;
- odp_packet_t pkt;
- int ret, round;
- uint8_t *data;
- uint32_t i, seg_len;
- int tail = 1;
- uint32_t len = 32000;
- uint8_t buf[len];
-
- CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
-
- if (capa.pkt.max_len && capa.pkt.max_len < len)
- len = capa.pkt.max_len;
-
- odp_pool_param_init(&param);
-
- param.type = ODP_POOL_PACKET;
- param.pkt.len = len;
- param.pkt.num = PACKET_POOL_NUM;
-
- pool = odp_pool_create("packet_pool_extend", &param);
- CU_ASSERT_FATAL(packet_pool != ODP_POOL_INVALID);
-
- for (round = 0; round < 2; round++) {
- pkt = odp_packet_alloc(pool, 1);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
-
- data = odp_packet_data(pkt);
- *data = 0;
-
- for (i = 0; i < len - 1; i++) {
- if (tail) {
- ret = odp_packet_extend_tail(&pkt, 1,
- (void **)&data,
- &seg_len);
- CU_ASSERT(ret >= 0);
- } else {
- ret = odp_packet_extend_head(&pkt, 1,
- (void **)&data,
- &seg_len);
- CU_ASSERT(ret >= 0);
- }
-
- if (ret < 0)
- break;
-
- if (tail) {
- /* assert needs brackets */
- CU_ASSERT(seg_len == 1);
- } else {
- CU_ASSERT(seg_len > 0);
- }
-
- *data = i + 1;
- }
-
- CU_ASSERT(odp_packet_len(pkt) == len);
- CU_ASSERT(odp_packet_unshared_len(pkt) == len);
-
- len = odp_packet_len(pkt);
-
- memset(buf, 0, len);
- CU_ASSERT(odp_packet_copy_to_mem(pkt, 0, len, buf) == 0);
-
- for (i = 0; i < len; i++) {
- int match;
-
- if (tail) {
- match = (buf[i] == (i % 256));
- CU_ASSERT(match);
- } else {
- match = (buf[len - 1 - i] == (i % 256));
- CU_ASSERT(match);
- }
-
- /* Limit the number of failed asserts to
- one per packet */
- if (!match)
- break;
- }
-
- odp_packet_free(pkt);
-
- tail = 0;
- }
-
- CU_ASSERT(odp_pool_destroy(pool) == 0);
-}
-
-void packet_test_extend_large(void)
-{
- odp_pool_capability_t capa;
- odp_pool_t pool;
- odp_pool_param_t param;
- odp_packet_t pkt;
- int ret, round;
- uint8_t *data;
- uint32_t i, seg_len, ext_len, cur_len, cur_data;
- int tail = 1;
- int num_div = 16;
- int div = 1;
- uint32_t len = 32000;
- uint8_t buf[len];
-
- CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
-
- if (capa.pkt.max_len && capa.pkt.max_len < len)
- len = capa.pkt.max_len;
-
- odp_pool_param_init(&param);
-
- param.type = ODP_POOL_PACKET;
- param.pkt.len = len;
- param.pkt.num = PACKET_POOL_NUM;
-
- pool = odp_pool_create("packet_pool_extend", &param);
- CU_ASSERT_FATAL(packet_pool != ODP_POOL_INVALID);
-
- for (round = 0; round < 2 * num_div; round++) {
- ext_len = len / div;
- cur_len = ext_len;
-
- pkt = odp_packet_alloc(pool, ext_len);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
-
- cur_data = 0;
-
- if (tail) {
- ret = fill_data_forward(pkt, 0, ext_len, &cur_data);
- CU_ASSERT(ret == 0);
- } else {
- ret = fill_data_backward(pkt, 0, ext_len, &cur_data);
- CU_ASSERT(ret == 0);
- }
-
- while (cur_len < len) {
- if ((len - cur_len) < ext_len)
- ext_len = len - cur_len;
-
- if (tail) {
- ret = odp_packet_extend_tail(&pkt, ext_len,
- (void **)&data,
- &seg_len);
- CU_ASSERT(ret >= 0);
- } else {
- ret = odp_packet_extend_head(&pkt, ext_len,
- (void **)&data,
- &seg_len);
- CU_ASSERT(ret >= 0);
- }
-
- if (ret < 0)
- break;
-
- if (tail) {
- /* assert needs brackets */
- CU_ASSERT((seg_len > 0) &&
- (seg_len <= ext_len));
- ret = fill_data_forward(pkt, cur_len, ext_len,
- &cur_data);
- CU_ASSERT(ret == 0);
- } else {
- CU_ASSERT(seg_len > 0);
- CU_ASSERT(data == odp_packet_data(pkt));
- ret = fill_data_backward(pkt, 0, ext_len,
- &cur_data);
- CU_ASSERT(ret == 0);
- }
-
- cur_len += ext_len;
- }
-
- CU_ASSERT(odp_packet_len(pkt) == len);
-
- len = odp_packet_len(pkt);
-
- memset(buf, 0, len);
- CU_ASSERT(odp_packet_copy_to_mem(pkt, 0, len, buf) == 0);
-
- for (i = 0; i < len; i++) {
- int match;
-
- if (tail) {
- match = (buf[i] == (i % 256));
- CU_ASSERT(match);
- } else {
- match = (buf[len - 1 - i] == (i % 256));
- CU_ASSERT(match);
- }
-
- /* Limit the number of failed asserts to
- one per packet */
- if (!match)
- break;
- }
-
- odp_packet_free(pkt);
-
- div++;
- if (div > num_div) {
- /* test extend head */
- div = 1;
- tail = 0;
- }
- }
-
- CU_ASSERT(odp_pool_destroy(pool) == 0);
-}
-
-void packet_test_extend_mix(void)
-{
- odp_pool_capability_t capa;
- odp_pool_t pool;
- odp_pool_param_t param;
- odp_packet_t pkt;
- int ret, round;
- uint8_t *data;
- uint32_t i, seg_len, ext_len, cur_len, cur_data;
- int small_count;
- int tail = 1;
- uint32_t len = 32000;
- uint8_t buf[len];
-
- CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
-
- if (capa.pkt.max_len && capa.pkt.max_len < len)
- len = capa.pkt.max_len;
-
- odp_pool_param_init(&param);
-
- param.type = ODP_POOL_PACKET;
- param.pkt.len = len;
- param.pkt.num = PACKET_POOL_NUM;
-
- pool = odp_pool_create("packet_pool_extend", &param);
- CU_ASSERT_FATAL(packet_pool != ODP_POOL_INVALID);
-
- for (round = 0; round < 2; round++) {
- small_count = 30;
- ext_len = len / 10;
- cur_len = ext_len;
-
- pkt = odp_packet_alloc(pool, ext_len);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
-
- cur_data = 0;
-
- if (tail) {
- ret = fill_data_forward(pkt, 0, ext_len, &cur_data);
- CU_ASSERT(ret == 0);
- } else {
- ret = fill_data_backward(pkt, 0, ext_len, &cur_data);
- CU_ASSERT(ret == 0);
- }
-
- while (cur_len < len) {
- if (small_count) {
- small_count--;
- ext_len = len / 100;
- } else {
- ext_len = len / 4;
- }
-
- if ((len - cur_len) < ext_len)
- ext_len = len - cur_len;
-
- if (tail) {
- ret = odp_packet_extend_tail(&pkt, ext_len,
- (void **)&data,
- &seg_len);
- CU_ASSERT(ret >= 0);
- CU_ASSERT((seg_len > 0) &&
- (seg_len <= ext_len));
- ret = fill_data_forward(pkt, cur_len, ext_len,
- &cur_data);
- CU_ASSERT(ret == 0);
- } else {
- ret = odp_packet_extend_head(&pkt, ext_len,
- (void **)&data,
- &seg_len);
- CU_ASSERT(ret >= 0);
- CU_ASSERT(seg_len > 0);
- CU_ASSERT(data == odp_packet_data(pkt));
- ret = fill_data_backward(pkt, 0, ext_len,
- &cur_data);
- CU_ASSERT(ret == 0);
- }
-
- cur_len += ext_len;
- }
-
- CU_ASSERT(odp_packet_len(pkt) == len);
-
- len = odp_packet_len(pkt);
-
- memset(buf, 0, len);
- CU_ASSERT(odp_packet_copy_to_mem(pkt, 0, len, buf) == 0);
-
- for (i = 0; i < len; i++) {
- int match;
-
- if (tail) {
- match = (buf[i] == (i % 256));
- CU_ASSERT(match);
- } else {
- match = (buf[len - 1 - i] == (i % 256));
- CU_ASSERT(match);
- }
-
- /* Limit the number of failed asserts to
- one per packet */
- if (!match)
- break;
- }
-
- odp_packet_free(pkt);
-
- tail = 0;
- }
-
- CU_ASSERT(odp_pool_destroy(pool) == 0);
-}
-
-void packet_test_extend_ref(void)
-{
- odp_packet_t max_pkt, ref;
- uint32_t hr, tr, max_len;
- odp_pool_capability_t capa;
-
- CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
-
- max_pkt = odp_packet_copy(segmented_test_packet,
- odp_packet_pool(segmented_test_packet));
- CU_ASSERT_FATAL(max_pkt != ODP_PACKET_INVALID);
- max_len = odp_packet_len(max_pkt);
-
- /* Maximize the max pkt */
- hr = odp_packet_headroom(max_pkt);
- tr = odp_packet_tailroom(max_pkt);
- odp_packet_push_head(max_pkt, hr);
- odp_packet_push_tail(max_pkt, tr);
-
- /* Max packet should not be extendable at either end */
- if (max_len == capa.pkt.max_len) {
- CU_ASSERT(odp_packet_extend_tail(&max_pkt, 1, NULL, NULL) < 0);
- CU_ASSERT(odp_packet_extend_head(&max_pkt, 1, NULL, NULL) < 0);
- }
-
- /* See if we can trunc and extend anyway */
- CU_ASSERT(odp_packet_trunc_tail(&max_pkt, hr + tr + 1,
- NULL, NULL) >= 0);
- CU_ASSERT(odp_packet_extend_head(&max_pkt, 1, NULL, NULL) >= 0);
- CU_ASSERT(odp_packet_len(max_pkt) == max_len);
- CU_ASSERT(odp_packet_unshared_len(max_pkt) == max_len);
-
- /* Now try with a reference in place */
- CU_ASSERT(odp_packet_trunc_tail(&max_pkt, 100, NULL, NULL) >= 0);
- ref = odp_packet_ref(max_pkt, 100);
-
- /* Verify ref lengths */
- CU_ASSERT(ref != ODP_PACKET_INVALID);
- CU_ASSERT(odp_packet_len(ref) == max_len - 200);
- if (odp_packet_has_ref(ref) == 1) {
- CU_ASSERT(odp_packet_unshared_len(ref) == 0);
-
- /* And ref's affect on max_pkt */
- CU_ASSERT(odp_packet_has_ref(max_pkt) == 1);
- CU_ASSERT(odp_packet_unshared_len(max_pkt) == 100);
- } else {
- CU_ASSERT(odp_packet_unshared_len(ref) == odp_packet_len(ref));
- CU_ASSERT(odp_packet_unshared_len(max_pkt) ==
- odp_packet_len(max_pkt));
- }
-
- /* Now extend max_pkt and verify effect */
- CU_ASSERT(odp_packet_extend_head(&max_pkt, 10, NULL, NULL) >= 0);
- CU_ASSERT(odp_packet_len(max_pkt) == max_len - 90);
-
- if (odp_packet_has_ref(max_pkt) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(max_pkt) == 110);
- }
-
- /* Extend on max_pkt should not affect ref */
- CU_ASSERT(odp_packet_len(ref) == max_len - 200);
-
- if (odp_packet_has_ref(ref) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref) == 0);
- }
-
- /* Now extend ref and verify effect*/
- CU_ASSERT(odp_packet_extend_head(&ref, 20, NULL, NULL) >= 0);
- CU_ASSERT(odp_packet_len(ref) == max_len - 180);
-
- if (odp_packet_has_ref(ref) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref) == 20);
- }
-
- /* Extend on ref should not affect max_pkt */
- CU_ASSERT(odp_packet_len(max_pkt) == max_len - 90);
-
- if (odp_packet_has_ref(max_pkt) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(max_pkt) == 110);
- }
-
- /* Trunc max_pkt of all unshared len */
- CU_ASSERT(odp_packet_trunc_head(&max_pkt, 110, NULL, NULL) >= 0);
-
- /* Verify effect on max_pkt */
- CU_ASSERT(odp_packet_len(max_pkt) == max_len - 200);
-
- if (odp_packet_has_ref(max_pkt) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(max_pkt) == 0);
- }
-
- /* Verify that ref is unchanged */
- CU_ASSERT(odp_packet_len(ref) == max_len - 180);
-
- if (odp_packet_has_ref(ref) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref) == 20);
- }
-
- /* Free ref and verify that max_pkt is back to being unreferenced */
- odp_packet_free(ref);
- CU_ASSERT(odp_packet_has_ref(max_pkt) == 0);
- CU_ASSERT(odp_packet_len(max_pkt) == max_len - 200);
- CU_ASSERT(odp_packet_unshared_len(max_pkt) == max_len - 200);
-
- odp_packet_free(max_pkt);
-}
-
-void packet_test_align(void)
-{
- odp_packet_t pkt;
- uint32_t pkt_len, seg_len, offset, aligned_seglen;
- void *pkt_data, *aligned_data;
- const uint32_t max_align = 32;
-
- pkt = odp_packet_copy_part(segmented_test_packet, 0,
- odp_packet_len(segmented_test_packet) / 2,
- odp_packet_pool(segmented_test_packet));
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
-
- pkt_len = odp_packet_len(pkt);
- seg_len = odp_packet_seg_len(pkt);
-
- if (odp_packet_is_segmented(pkt)) {
- /* Can't address across segment boundaries */
- CU_ASSERT(odp_packet_align(&pkt, 0, pkt_len, 0) < 0);
-
- offset = seg_len - 5;
- (void)odp_packet_offset(pkt, offset, &seg_len, NULL);
-
- /* Realign for addressability */
- CU_ASSERT(odp_packet_align(&pkt, offset,
- seg_len + 2, 0) >= 0);
-
- /* Alignment doesn't change packet length or contents */
- CU_ASSERT(odp_packet_len(pkt) == pkt_len);
- (void)odp_packet_offset(pkt, offset, &aligned_seglen, NULL);
- packet_compare_offset(pkt, offset,
- segmented_test_packet, offset,
- aligned_seglen);
-
- /* Verify requested contiguous addressabilty */
- CU_ASSERT(aligned_seglen >= seg_len + 2);
- }
-
- /* Get a misaligned address */
- pkt_data = odp_packet_offset(pkt, 0, &seg_len, NULL);
- offset = seg_len - 5;
- pkt_data = odp_packet_offset(pkt, offset, &seg_len, NULL);
- if ((uintptr_t)pkt_data % max_align == 0) {
- offset--;
- pkt_data = odp_packet_offset(pkt, offset, &seg_len, NULL);
- }
-
- /* Realign for alignment */
- CU_ASSERT(odp_packet_align(&pkt, offset, 1, max_align) >= 0);
- aligned_data = odp_packet_offset(pkt, offset, &aligned_seglen, NULL);
-
- CU_ASSERT(odp_packet_len(pkt) == pkt_len);
- packet_compare_offset(pkt, offset, segmented_test_packet, offset,
- aligned_seglen);
- CU_ASSERT((uintptr_t)aligned_data % max_align == 0);
-
- odp_packet_free(pkt);
-}
-
-void packet_test_offset(void)
-{
- odp_packet_t pkt = test_packet;
- uint32_t seg_len, full_seg_len;
- odp_packet_seg_t seg;
- uint8_t *ptr, *start_ptr;
- uint32_t offset;
-
- ptr = odp_packet_offset(pkt, 0, &seg_len, &seg);
- CU_ASSERT(seg != ODP_PACKET_SEG_INVALID);
- CU_ASSERT(seg_len > 1);
- CU_ASSERT(seg_len == odp_packet_seg_len(pkt));
- CU_ASSERT(seg_len == odp_packet_seg_data_len(pkt, seg));
- CU_ASSERT_PTR_NOT_NULL(ptr);
- CU_ASSERT(ptr == odp_packet_data(pkt));
- CU_ASSERT(ptr == odp_packet_seg_data(pkt, seg));
-
- /* Query a second byte */
- start_ptr = ptr;
- full_seg_len = seg_len;
- offset = 1;
-
- ptr = odp_packet_offset(pkt, offset, &seg_len, NULL);
- CU_ASSERT_PTR_NOT_NULL(ptr);
- CU_ASSERT(ptr == start_ptr + offset);
- CU_ASSERT(seg_len == full_seg_len - offset);
-
- /* Query the last byte in a segment */
- offset = full_seg_len - 1;
-
- ptr = odp_packet_offset(pkt, offset, &seg_len, NULL);
- CU_ASSERT_PTR_NOT_NULL(ptr);
- CU_ASSERT(ptr == start_ptr + offset);
- CU_ASSERT(seg_len == full_seg_len - offset);
-
- /* Query the last byte in a packet */
- offset = odp_packet_len(pkt) - 1;
- ptr = odp_packet_offset(pkt, offset, &seg_len, NULL);
- CU_ASSERT_PTR_NOT_NULL(ptr);
- CU_ASSERT(seg_len == 1);
-
- /* Pass NULL to [out] arguments */
- ptr = odp_packet_offset(pkt, 0, NULL, NULL);
- CU_ASSERT_PTR_NOT_NULL(ptr);
-}
-
-void packet_test_ref(void)
-{
- odp_packet_t base_pkt, segmented_base_pkt, hdr_pkt[4],
- ref_pkt[4], refhdr_pkt[4], hdr_cpy;
- uint32_t pkt_len, segmented_pkt_len, hdr_len[4], offset[4], hr[4],
- base_hr, ref_len[4];
- int i;
-
- base_pkt = odp_packet_copy(test_packet, odp_packet_pool(test_packet));
- base_hr = odp_packet_headroom(base_pkt);
- pkt_len = odp_packet_len(test_packet);
- CU_ASSERT_FATAL(base_pkt != ODP_PACKET_INVALID);
-
- segmented_base_pkt =
- odp_packet_copy(segmented_test_packet,
- odp_packet_pool(segmented_test_packet));
- segmented_pkt_len = odp_packet_len(segmented_test_packet);
- CU_ASSERT_FATAL(segmented_base_pkt != ODP_PACKET_INVALID);
-
- CU_ASSERT(odp_packet_has_ref(base_pkt) == 0);
-
- hdr_pkt[0] =
- odp_packet_copy_part(segmented_test_packet, 0,
- odp_packet_len(segmented_test_packet) / 4,
- odp_packet_pool(segmented_test_packet));
- CU_ASSERT_FATAL(hdr_pkt[0] != ODP_PACKET_INVALID);
- hdr_len[0] = odp_packet_len(hdr_pkt[0]);
- offset[0] = 0;
-
- hdr_pkt[1] =
- odp_packet_copy_part(segmented_test_packet, 10,
- odp_packet_len(segmented_test_packet) / 8,
- odp_packet_pool(segmented_test_packet));
- CU_ASSERT_FATAL(hdr_pkt[1] != ODP_PACKET_INVALID);
- hdr_len[1] = odp_packet_len(hdr_pkt[1]);
- offset[1] = 5;
-
- hdr_pkt[2] = odp_packet_copy_part(test_packet, 0,
- odp_packet_len(test_packet) / 4,
- odp_packet_pool(test_packet));
- CU_ASSERT_FATAL(hdr_pkt[2] != ODP_PACKET_INVALID);
- hdr_len[2] = odp_packet_len(hdr_pkt[2]);
- offset[2] = 64;
-
- hdr_pkt[3] = odp_packet_copy_part(test_packet, 0,
- odp_packet_len(test_packet) / 4,
- odp_packet_pool(test_packet));
- CU_ASSERT_FATAL(hdr_pkt[3] != ODP_PACKET_INVALID);
- hdr_len[3] = odp_packet_len(hdr_pkt[3]);
- offset[3] = 64;
-
- /* Nothing is a ref or has a ref before we start */
- for (i = 0; i < 4; i++) {
- CU_ASSERT(odp_packet_has_ref(hdr_pkt[i]) == 0);
- CU_ASSERT(odp_packet_len(hdr_pkt[i]) ==
- odp_packet_unshared_len(hdr_pkt[i]));
- }
-
- /* Create a couple of refs */
- refhdr_pkt[0] = odp_packet_ref_pkt(base_pkt, offset[0], hdr_pkt[0]);
- refhdr_pkt[1] = odp_packet_ref_pkt(base_pkt, offset[1], hdr_pkt[1]);
-
- CU_ASSERT(refhdr_pkt[0] != ODP_PACKET_INVALID);
- CU_ASSERT(refhdr_pkt[1] != ODP_PACKET_INVALID);
-
- /* If base packet has now references, ref packet should be also
- * references. */
- if (odp_packet_has_ref(base_pkt) == 1) {
- CU_ASSERT(odp_packet_has_ref(refhdr_pkt[0]) == 1);
- CU_ASSERT(odp_packet_has_ref(refhdr_pkt[1]) == 1);
-
- CU_ASSERT(odp_packet_unshared_len(base_pkt) == 0);
- } else {
- CU_ASSERT(odp_packet_unshared_len(base_pkt) == pkt_len);
- }
-
- CU_ASSERT(odp_packet_len(refhdr_pkt[0]) ==
- hdr_len[0] + pkt_len - offset[0]);
- CU_ASSERT(odp_packet_len(refhdr_pkt[1]) ==
- hdr_len[1] + pkt_len - offset[1]);
-
- if (odp_packet_has_ref(refhdr_pkt[0]) == 1) {
- CU_ASSERT(odp_packet_unshared_len(refhdr_pkt[0]) == hdr_len[0]);
- } else {
- CU_ASSERT(odp_packet_unshared_len(refhdr_pkt[0]) ==
- odp_packet_len(refhdr_pkt[0]));
- }
-
- if (odp_packet_has_ref(refhdr_pkt[1]) == 1) {
- CU_ASSERT(odp_packet_unshared_len(refhdr_pkt[1]) == hdr_len[1]);
- } else {
- CU_ASSERT(odp_packet_unshared_len(refhdr_pkt[1]) ==
- odp_packet_len(refhdr_pkt[1]));
- }
-
- packet_compare_offset(refhdr_pkt[0], hdr_len[0],
- base_pkt, offset[0],
- pkt_len - offset[0]);
-
- packet_compare_offset(refhdr_pkt[1], hdr_len[1],
- base_pkt, offset[1],
- pkt_len - offset[1]);
-
- /* See if compound references are supported and if so that they
- * operate properly */
- hdr_cpy = odp_packet_copy(hdr_pkt[2], odp_packet_pool(hdr_pkt[2]));
- CU_ASSERT_FATAL(hdr_cpy != ODP_PACKET_INVALID);
-
- refhdr_pkt[2] = odp_packet_ref_pkt(refhdr_pkt[0], 2, hdr_cpy);
- CU_ASSERT(refhdr_pkt[2] != ODP_PACKET_INVALID);
-
- if (odp_packet_has_ref(refhdr_pkt[2]) == 1) {
- CU_ASSERT(odp_packet_has_ref(refhdr_pkt[0]) == 1);
- CU_ASSERT(odp_packet_unshared_len(refhdr_pkt[2]) == hdr_len[2]);
- CU_ASSERT(odp_packet_unshared_len(refhdr_pkt[0]) == 2);
- }
-
- /* Delete the refs */
- odp_packet_free(refhdr_pkt[0]);
- odp_packet_free(refhdr_pkt[1]);
- odp_packet_free(refhdr_pkt[2]);
-
- /* Verify that base_pkt no longer has a ref */
- CU_ASSERT(odp_packet_has_ref(base_pkt) == 0);
-
- /* Now create a two more shared refs */
- refhdr_pkt[2] = odp_packet_ref_pkt(base_pkt, offset[2], hdr_pkt[2]);
- refhdr_pkt[3] = odp_packet_ref_pkt(base_pkt, offset[3], hdr_pkt[3]);
-
- CU_ASSERT(hdr_pkt[2] != ODP_PACKET_INVALID);
- CU_ASSERT(hdr_pkt[3] != ODP_PACKET_INVALID);
-
- if (odp_packet_has_ref(base_pkt) == 1) {
- CU_ASSERT(odp_packet_has_ref(refhdr_pkt[2]) == 1);
- CU_ASSERT(odp_packet_has_ref(refhdr_pkt[3]) == 1);
- }
-
- CU_ASSERT(odp_packet_len(refhdr_pkt[2]) ==
- odp_packet_len(refhdr_pkt[3]));
-
- packet_compare_offset(refhdr_pkt[2], 0,
- refhdr_pkt[3], 0,
- odp_packet_len(hdr_pkt[2]));
-
- /* Delete the headers */
- odp_packet_free(refhdr_pkt[2]);
- odp_packet_free(refhdr_pkt[3]);
-
- /* Verify that base_pkt is no longer ref'd */
- CU_ASSERT(odp_packet_has_ref(base_pkt) == 0);
-
- /* Create a static reference */
- ref_pkt[0] = odp_packet_ref_static(base_pkt);
- CU_ASSERT(ref_pkt[0] != ODP_PACKET_INVALID);
-
- if (odp_packet_has_ref(base_pkt) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_has_ref(ref_pkt[0]) == 1);
- }
-
- CU_ASSERT(odp_packet_len(ref_pkt[0]) == odp_packet_len(base_pkt));
- packet_compare_offset(ref_pkt[0], 0, base_pkt, 0,
- odp_packet_len(base_pkt));
-
- /* Now delete it */
- odp_packet_free(ref_pkt[0]);
- CU_ASSERT(odp_packet_has_ref(base_pkt) == 0);
-
- /* Create references */
- ref_pkt[0] = odp_packet_ref(segmented_base_pkt, offset[0]);
- CU_ASSERT_FATAL(ref_pkt[0] != ODP_PACKET_INVALID);
-
- if (odp_packet_has_ref(ref_pkt[0]) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_has_ref(segmented_base_pkt) == 1);
- }
-
- ref_pkt[1] = odp_packet_ref(segmented_base_pkt, offset[1]);
- CU_ASSERT_FATAL(ref_pkt[1] != ODP_PACKET_INVALID);
-
- if (odp_packet_has_ref(ref_pkt[1]) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_has_ref(segmented_base_pkt) == 1);
- }
-
- /* Verify reference lengths */
- CU_ASSERT(odp_packet_len(ref_pkt[0]) == segmented_pkt_len - offset[0]);
- CU_ASSERT(odp_packet_len(ref_pkt[1]) == segmented_pkt_len - offset[1]);
-
- if (odp_packet_has_ref(ref_pkt[0]) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref_pkt[0]) == 0);
- }
-
- if (odp_packet_has_ref(ref_pkt[1]) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref_pkt[1]) == 0);
- }
-
- /* Free the base pkts -- references should still be valid */
- odp_packet_free(base_pkt);
- odp_packet_free(segmented_base_pkt);
-
- packet_compare_offset(ref_pkt[0], 0,
- segmented_test_packet, offset[0],
- segmented_pkt_len - offset[0]);
- packet_compare_offset(ref_pkt[1], 0,
- segmented_test_packet, offset[1],
- segmented_pkt_len - offset[1]);
-
- /* Verify we can modify the refs */
- hr[0] = odp_packet_headroom(ref_pkt[0]);
- hr[1] = odp_packet_headroom(ref_pkt[1]);
-
- CU_ASSERT(odp_packet_push_head(ref_pkt[0], hr[0]) != NULL);
-
- if (odp_packet_has_ref(ref_pkt[0]) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref_pkt[0]) == hr[0]);
- }
-
- CU_ASSERT(odp_packet_len(ref_pkt[0]) ==
- hr[0] + segmented_pkt_len - offset[0]);
-
- CU_ASSERT(odp_packet_pull_head(ref_pkt[0], hr[0] / 2) != NULL);
-
- if (odp_packet_has_ref(ref_pkt[0]) == 1) {
- CU_ASSERT(odp_packet_unshared_len(ref_pkt[0]) ==
- hr[0] - (hr[0] / 2));
- }
-
- if (hr[1] > 0) {
- CU_ASSERT(odp_packet_push_head(ref_pkt[1], 1) != NULL);
- if (odp_packet_has_ref(ref_pkt[1]) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref_pkt[1]) == 1);
- }
- CU_ASSERT(odp_packet_len(ref_pkt[1]) ==
- 1 + segmented_pkt_len - offset[1]);
- CU_ASSERT(odp_packet_pull_head(ref_pkt[1], 1) != NULL);
- if (odp_packet_has_ref(ref_pkt[1]) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref_pkt[1]) == 0);
- }
- CU_ASSERT(odp_packet_len(ref_pkt[1]) ==
- segmented_pkt_len - offset[1]);
- }
-
- odp_packet_free(ref_pkt[0]);
- odp_packet_free(ref_pkt[1]);
-
- /* Verify we can modify base packet after reference is created */
- base_pkt = odp_packet_copy(test_packet, odp_packet_pool(test_packet));
-
- ref_pkt[1] = odp_packet_ref(base_pkt, offset[1]);
- CU_ASSERT_FATAL(ref_pkt[1] != ODP_PACKET_INVALID);
- ref_len[1] = odp_packet_len(ref_pkt[1]);
- CU_ASSERT(ref_len[1] == odp_packet_len(base_pkt) - offset[1]);
-
- if (odp_packet_has_ref(ref_pkt[1]) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref_pkt[1]) == 0);
- }
-
- CU_ASSERT(odp_packet_push_head(base_pkt, base_hr / 2) != NULL);
-
- if (odp_packet_has_ref(base_pkt) == 1) {
- CU_ASSERT(odp_packet_unshared_len(base_pkt) ==
- base_hr / 2 + offset[1]);
- }
- CU_ASSERT(odp_packet_len(ref_pkt[1]) == ref_len[1]);
- if (odp_packet_has_ref(ref_pkt[1]) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref_pkt[1]) == 0);
- }
-
- ref_pkt[0] = odp_packet_ref(base_pkt, offset[0]);
- CU_ASSERT_FATAL(ref_pkt[0] != ODP_PACKET_INVALID);
- ref_len[0] = odp_packet_len(ref_pkt[0]);
- CU_ASSERT(ref_len[0] = odp_packet_len(base_pkt) - offset[0]);
- if (odp_packet_has_ref(ref_pkt[0]) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref_pkt[0]) == 0);
- }
-
- CU_ASSERT(odp_packet_push_head(base_pkt,
- base_hr - base_hr / 2) != NULL);
- if (odp_packet_has_ref(base_pkt) == 1) {
- CU_ASSERT(odp_packet_unshared_len(base_pkt) ==
- base_hr - base_hr / 2 + offset[0]);
- }
- CU_ASSERT(odp_packet_len(ref_pkt[1]) == ref_len[1]);
- CU_ASSERT(odp_packet_len(ref_pkt[0]) == ref_len[0]);
-
- if (odp_packet_has_ref(ref_pkt[1]) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref_pkt[1]) == 0);
- }
- if (odp_packet_has_ref(ref_pkt[0]) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref_pkt[0]) == 0);
- }
-
- hr[0] = odp_packet_headroom(ref_pkt[0]);
- hr[1] = odp_packet_headroom(ref_pkt[1]);
- CU_ASSERT(odp_packet_push_head(ref_pkt[0], hr[0]) != NULL);
- CU_ASSERT(odp_packet_push_head(ref_pkt[1], hr[1]) != NULL);
- if (odp_packet_has_ref(ref_pkt[0]) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref_pkt[0]) == hr[0]);
- }
- if (odp_packet_has_ref(ref_pkt[1]) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(ref_pkt[1]) == hr[1]);
- }
- if (odp_packet_has_ref(base_pkt) == 1) {
- /* CU_ASSERT needs braces */
- CU_ASSERT(odp_packet_unshared_len(base_pkt) ==
- base_hr - base_hr / 2 + offset[0]);
- }
-
- odp_packet_free(base_pkt);
- odp_packet_free(ref_pkt[0]);
- odp_packet_free(ref_pkt[1]);
-}
-
-odp_testinfo_t packet_suite[] = {
- ODP_TEST_INFO(packet_test_alloc_free),
- ODP_TEST_INFO(packet_test_alloc_free_multi),
- ODP_TEST_INFO(packet_test_alloc_segmented),
- ODP_TEST_INFO(packet_test_basic_metadata),
- ODP_TEST_INFO(packet_test_debug),
- ODP_TEST_INFO(packet_test_segments),
- ODP_TEST_INFO(packet_test_length),
- ODP_TEST_INFO(packet_test_prefetch),
- ODP_TEST_INFO(packet_test_headroom),
- ODP_TEST_INFO(packet_test_tailroom),
- ODP_TEST_INFO(packet_test_context),
- ODP_TEST_INFO(packet_test_event_conversion),
- ODP_TEST_INFO(packet_test_layer_offsets),
- ODP_TEST_INFO(packet_test_segment_last),
- ODP_TEST_INFO(packet_test_in_flags),
- ODP_TEST_INFO(packet_test_error_flags),
- ODP_TEST_INFO(packet_test_add_rem_data),
- ODP_TEST_INFO(packet_test_copy),
- ODP_TEST_INFO(packet_test_copydata),
- ODP_TEST_INFO(packet_test_concatsplit),
- ODP_TEST_INFO(packet_test_concat_small),
- ODP_TEST_INFO(packet_test_concat_extend_trunc),
- ODP_TEST_INFO(packet_test_extend_small),
- ODP_TEST_INFO(packet_test_extend_large),
- ODP_TEST_INFO(packet_test_extend_mix),
- ODP_TEST_INFO(packet_test_extend_ref),
- ODP_TEST_INFO(packet_test_align),
- ODP_TEST_INFO(packet_test_offset),
- ODP_TEST_INFO(packet_test_ref),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t packet_suites[] = {
- { .pName = "packet tests",
- .pTests = packet_suite,
- .pInitFunc = packet_suite_init,
- .pCleanupFunc = packet_suite_term,
- },
- ODP_SUITE_INFO_NULL,
-};
-
-int packet_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- ret = odp_cunit_register(packet_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/packet/packet.h b/test/common_plat/validation/api/packet/packet.h
deleted file mode 100644
index 783b7a117..000000000
--- a/test/common_plat/validation/api/packet/packet.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_PACKET_H_
-#define _ODP_TEST_PACKET_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void packet_test_alloc_free(void);
-void packet_test_alloc_free_multi(void);
-void packet_test_alloc_segmented(void);
-void packet_test_event_conversion(void);
-void packet_test_basic_metadata(void);
-void packet_test_length(void);
-void packet_test_prefetch(void);
-void packet_test_debug(void);
-void packet_test_context(void);
-void packet_test_layer_offsets(void);
-void packet_test_headroom(void);
-void packet_test_tailroom(void);
-void packet_test_segments(void);
-void packet_test_segment_last(void);
-void packet_test_in_flags(void);
-void packet_test_error_flags(void);
-void packet_test_add_rem_data(void);
-void packet_test_copy(void);
-void packet_test_copydata(void);
-void packet_test_concatsplit(void);
-void packet_test_concat_small(void);
-void packet_test_concat_extend_trunc(void);
-void packet_test_extend_small(void);
-void packet_test_extend_large(void);
-void packet_test_extend_mix(void);
-void packet_test_extend_ref(void);
-void packet_test_align(void);
-void packet_test_offset(void);
-void packet_test_ref(void);
-
-/* test arrays: */
-extern odp_testinfo_t packet_suite[];
-
-/* test array init/term functions: */
-int packet_suite_init(void);
-int packet_suite_term(void);
-
-/* test registry: */
-extern odp_suiteinfo_t packet_suites[];
-
-/* main test program: */
-int packet_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/packet/packet_main.c b/test/common_plat/validation/api/packet/packet_main.c
deleted file mode 100644
index 511bb104b..000000000
--- a/test/common_plat/validation/api/packet/packet_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "packet.h"
-
-int main(int argc, char *argv[])
-{
- return packet_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/pktio/Makefile.am b/test/common_plat/validation/api/pktio/Makefile.am
deleted file mode 100644
index 466d690dc..000000000
--- a/test/common_plat/validation/api/pktio/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestpktio.la
-libtestpktio_la_SOURCES = pktio.c
-
-test_PROGRAMS = pktio_main$(EXEEXT)
-dist_pktio_main_SOURCES = pktio_main.c
-pktio_main_LDADD = libtestpktio.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = pktio.h
diff --git a/test/common_plat/validation/api/pktio/pktio.c b/test/common_plat/validation/api/pktio/pktio.c
deleted file mode 100644
index 54f206efc..000000000
--- a/test/common_plat/validation/api/pktio/pktio.c
+++ /dev/null
@@ -1,2205 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#include <odp_api.h>
-#include <odp_cunit_common.h>
-
-#include <odp/helper/odph_api.h>
-
-#include <stdlib.h>
-#include "pktio.h"
-
-#define PKT_BUF_NUM 32
-#define PKT_BUF_SIZE (9 * 1024)
-#define PKT_LEN_NORMAL 64
-#define PKT_LEN_MAX (PKT_BUF_SIZE - ODPH_ETHHDR_LEN - \
- ODPH_IPV4HDR_LEN - ODPH_UDPHDR_LEN)
-
-#define USE_MTU 0
-#define MAX_NUM_IFACES 2
-#define TEST_SEQ_INVALID ((uint32_t)~0)
-#define TEST_SEQ_MAGIC 0x92749451
-#define TX_BATCH_LEN 4
-#define MAX_QUEUES 128
-
-#define PKTIN_TS_INTERVAL (50 * ODP_TIME_MSEC_IN_NS)
-#define PKTIN_TS_MIN_RES 1000
-#define PKTIN_TS_MAX_RES 10000000000
-#define PKTIN_TS_CMP_RES 1
-
-#define PKTIO_SRC_MAC {1, 2, 3, 4, 5, 6}
-#define PKTIO_DST_MAC {6, 5, 4, 3, 2, 1}
-#undef DEBUG_STATS
-
-/** interface names used for testing */
-static const char *iface_name[MAX_NUM_IFACES];
-
-/** number of interfaces being used (1=loopback, 2=pair) */
-static int num_ifaces;
-
-/** while testing real-world interfaces additional time may be
- needed for external network to enable link to pktio
- interface that just become up.*/
-static bool wait_for_network;
-
-/** local container for pktio attributes */
-typedef struct {
- const char *name;
- odp_pktio_t id;
- odp_pktout_queue_t pktout;
- odp_queue_t queue_out;
- odp_queue_t inq;
- odp_pktin_mode_t in_mode;
-} pktio_info_t;
-
-/** magic number and sequence at start of UDP payload */
-typedef struct ODP_PACKED {
- odp_u32be_t magic;
- odp_u32be_t seq;
-} pkt_head_t;
-
-/** magic number at end of UDP payload */
-typedef struct ODP_PACKED {
- odp_u32be_t magic;
-} pkt_tail_t;
-
-/** Run mode */
-typedef enum {
- PKT_POOL_UNSEGMENTED,
- PKT_POOL_SEGMENTED,
-} pkt_segmented_e;
-
-typedef enum {
- TXRX_MODE_SINGLE,
- TXRX_MODE_MULTI,
- TXRX_MODE_MULTI_EVENT
-} txrx_mode_e;
-
-typedef enum {
- RECV_TMO,
- RECV_MQ_TMO,
- RECV_MQ_TMO_NO_IDX,
-} recv_tmo_mode_e;
-
-/** size of transmitted packets */
-static uint32_t packet_len = PKT_LEN_NORMAL;
-
-/** default packet pool */
-odp_pool_t default_pkt_pool = ODP_POOL_INVALID;
-
-/** sequence number of IP packets */
-odp_atomic_u32_t ip_seq;
-
-/** Type of pool segmentation */
-pkt_segmented_e pool_segmentation = PKT_POOL_UNSEGMENTED;
-
-odp_pool_t pool[MAX_NUM_IFACES] = {ODP_POOL_INVALID, ODP_POOL_INVALID};
-
-static inline void _pktio_wait_linkup(odp_pktio_t pktio)
-{
- /* wait 1 second for link up */
- uint64_t wait_ns = (10 * ODP_TIME_MSEC_IN_NS);
- int wait_num = 100;
- int i;
- int ret = -1;
-
- for (i = 0; i < wait_num; i++) {
- ret = odp_pktio_link_status(pktio);
- if (ret < 0 || ret == 1)
- break;
- /* link is down, call status again after delay */
- odp_time_wait_ns(wait_ns);
- }
-
- if (ret != -1) {
- /* assert only if link state supported and
- * it's down. */
- CU_ASSERT_FATAL(ret == 1);
- }
-}
-
-static void set_pool_len(odp_pool_param_t *params, odp_pool_capability_t *capa)
-{
- uint32_t seg_len;
-
- seg_len = capa->pkt.max_seg_len ? capa->pkt.max_seg_len : PKT_BUF_SIZE;
-
- switch (pool_segmentation) {
- case PKT_POOL_SEGMENTED:
- /* Force segment to minimum size */
- params->pkt.seg_len = 0;
- params->pkt.len = PKT_BUF_SIZE;
- break;
- case PKT_POOL_UNSEGMENTED:
- default:
- params->pkt.seg_len = seg_len;
- params->pkt.len = PKT_BUF_SIZE;
- break;
- }
-}
-
-static void pktio_pkt_set_macs(odp_packet_t pkt,
- odp_pktio_t src, odp_pktio_t dst)
-{
- uint32_t len;
- odph_ethhdr_t *eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, &len);
- int ret;
-
- ret = odp_pktio_mac_addr(src, &eth->src, ODP_PKTIO_MACADDR_MAXSIZE);
- CU_ASSERT(ret == ODPH_ETHADDR_LEN);
- CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
-
- ret = odp_pktio_mac_addr(dst, &eth->dst, ODP_PKTIO_MACADDR_MAXSIZE);
- CU_ASSERT(ret == ODPH_ETHADDR_LEN);
- CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
-}
-
-static uint32_t pktio_pkt_set_seq(odp_packet_t pkt)
-{
- static uint32_t tstseq;
- size_t off;
- pkt_head_t head;
- pkt_tail_t tail;
-
- off = odp_packet_l4_offset(pkt);
- if (off == ODP_PACKET_OFFSET_INVALID) {
- CU_FAIL("packet L4 offset not set");
- return TEST_SEQ_INVALID;
- }
-
- head.magic = TEST_SEQ_MAGIC;
- head.seq = tstseq;
-
- off += ODPH_UDPHDR_LEN;
- if (odp_packet_copy_from_mem(pkt, off, sizeof(head), &head) != 0)
- return TEST_SEQ_INVALID;
-
- tail.magic = TEST_SEQ_MAGIC;
- off = odp_packet_len(pkt) - sizeof(pkt_tail_t);
- if (odp_packet_copy_from_mem(pkt, off, sizeof(tail), &tail) != 0)
- return TEST_SEQ_INVALID;
-
- tstseq++;
-
- return head.seq;
-}
-
-static uint32_t pktio_pkt_seq(odp_packet_t pkt)
-{
- size_t off;
- uint32_t seq = TEST_SEQ_INVALID;
- pkt_head_t head;
- pkt_tail_t tail;
-
- if (pkt == ODP_PACKET_INVALID) {
- fprintf(stderr, "error: pkt invalid\n");
- return TEST_SEQ_INVALID;
- }
-
- off = odp_packet_l4_offset(pkt);
- if (off == ODP_PACKET_OFFSET_INVALID) {
- fprintf(stderr, "error: offset invalid\n");
- return TEST_SEQ_INVALID;
- }
-
- off += ODPH_UDPHDR_LEN;
- if (odp_packet_copy_to_mem(pkt, off, sizeof(head), &head) != 0) {
- fprintf(stderr, "error: header copy failed\n");
- return TEST_SEQ_INVALID;
- }
-
- if (head.magic != TEST_SEQ_MAGIC) {
- fprintf(stderr, "error: header magic invalid %" PRIu32 "\n",
- head.magic);
- return TEST_SEQ_INVALID;
- }
-
- if (odp_packet_len(pkt) == packet_len) {
- off = packet_len - sizeof(tail);
- if (odp_packet_copy_to_mem(pkt, off, sizeof(tail),
- &tail) != 0) {
- fprintf(stderr, "error: header copy failed\n");
- return TEST_SEQ_INVALID;
- }
-
- if (tail.magic == TEST_SEQ_MAGIC) {
- seq = head.seq;
- CU_ASSERT(seq != TEST_SEQ_INVALID);
- } else {
- fprintf(stderr,
- "error: tail magic invalid %" PRIu32 "\n",
- tail.magic);
- }
- } else {
- fprintf(stderr,
- "error: packet length invalid: "
- "%" PRIu32 "(%" PRIu32 ")\n",
- odp_packet_len(pkt), packet_len);
- }
-
- return seq;
-}
-
-static uint32_t pktio_init_packet(odp_packet_t pkt)
-{
- odph_ethhdr_t *eth;
- odph_ipv4hdr_t *ip;
- odph_udphdr_t *udp;
- char *buf;
- uint16_t seq;
- uint8_t src_mac[ODP_PKTIO_MACADDR_MAXSIZE] = PKTIO_SRC_MAC;
- uint8_t dst_mac[ODP_PKTIO_MACADDR_MAXSIZE] = PKTIO_DST_MAC;
- int pkt_len = odp_packet_len(pkt);
-
- buf = odp_packet_data(pkt);
-
- /* Ethernet */
- odp_packet_l2_offset_set(pkt, 0);
- eth = (odph_ethhdr_t *)buf;
- memcpy(eth->src.addr, src_mac, ODPH_ETHADDR_LEN);
- memcpy(eth->dst.addr, dst_mac, ODPH_ETHADDR_LEN);
- eth->type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
-
- /* IP */
- odp_packet_l3_offset_set(pkt, ODPH_ETHHDR_LEN);
- ip = (odph_ipv4hdr_t *)(buf + ODPH_ETHHDR_LEN);
- ip->dst_addr = odp_cpu_to_be_32(0x0a000064);
- ip->src_addr = odp_cpu_to_be_32(0x0a000001);
- ip->ver_ihl = ODPH_IPV4 << 4 | ODPH_IPV4HDR_IHL_MIN;
- ip->tot_len = odp_cpu_to_be_16(pkt_len - ODPH_ETHHDR_LEN);
- ip->ttl = 128;
- ip->proto = ODPH_IPPROTO_UDP;
- seq = odp_atomic_fetch_inc_u32(&ip_seq);
- ip->id = odp_cpu_to_be_16(seq);
- ip->chksum = 0;
- odph_ipv4_csum_update(pkt);
-
- /* UDP */
- odp_packet_l4_offset_set(pkt, ODPH_ETHHDR_LEN + ODPH_IPV4HDR_LEN);
- udp = (odph_udphdr_t *)(buf + ODPH_ETHHDR_LEN + ODPH_IPV4HDR_LEN);
- udp->src_port = odp_cpu_to_be_16(12049);
- udp->dst_port = odp_cpu_to_be_16(12050);
- udp->length = odp_cpu_to_be_16(pkt_len -
- ODPH_ETHHDR_LEN - ODPH_IPV4HDR_LEN);
- udp->chksum = 0;
-
- return pktio_pkt_set_seq(pkt);
-}
-
-static int pktio_fixup_checksums(odp_packet_t pkt)
-{
- odph_ipv4hdr_t *ip;
- odph_udphdr_t *udp;
- uint32_t len;
-
- ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, &len);
-
- if (ip->proto != ODPH_IPPROTO_UDP) {
- CU_FAIL("unexpected L4 protocol");
- return -1;
- }
-
- udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, &len);
-
- ip->chksum = 0;
- odph_ipv4_csum_update(pkt);
- udp->chksum = 0;
- udp->chksum = odph_ipv4_udp_chksum(pkt);
-
- return 0;
-}
-
-static int default_pool_create(void)
-{
- odp_pool_param_t params;
- odp_pool_capability_t pool_capa;
- char pool_name[ODP_POOL_NAME_LEN];
-
- if (odp_pool_capability(&pool_capa) != 0)
- return -1;
-
- if (default_pkt_pool != ODP_POOL_INVALID)
- return -1;
-
- odp_pool_param_init(&params);
- set_pool_len(&params, &pool_capa);
- params.pkt.num = PKT_BUF_NUM;
- params.type = ODP_POOL_PACKET;
-
- snprintf(pool_name, sizeof(pool_name),
- "pkt_pool_default_%d", pool_segmentation);
- default_pkt_pool = odp_pool_create(pool_name, &params);
- if (default_pkt_pool == ODP_POOL_INVALID)
- return -1;
-
- return 0;
-}
-
-static odp_pktio_t create_pktio(int iface_idx, odp_pktin_mode_t imode,
- odp_pktout_mode_t omode)
-{
- odp_pktio_t pktio;
- odp_pktio_param_t pktio_param;
- odp_pktin_queue_param_t pktin_param;
- const char *iface = iface_name[iface_idx];
-
- odp_pktio_param_init(&pktio_param);
-
- pktio_param.in_mode = imode;
- pktio_param.out_mode = omode;
-
- pktio = odp_pktio_open(iface, pool[iface_idx], &pktio_param);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- CU_ASSERT(odp_pktio_to_u64(pktio) !=
- odp_pktio_to_u64(ODP_PKTIO_INVALID));
-
- odp_pktin_queue_param_init(&pktin_param);
-
- /* Atomic queue when in scheduled mode */
- pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
-
- /* By default, single input and output queue in all modes. Config can
- * be overridden before starting the interface. */
- CU_ASSERT(odp_pktin_queue_config(pktio, &pktin_param) == 0);
- CU_ASSERT(odp_pktout_queue_config(pktio, NULL) == 0);
-
- if (wait_for_network)
- odp_time_wait_ns(ODP_TIME_SEC_IN_NS / 4);
-
- return pktio;
-}
-
-static int flush_input_queue(odp_pktio_t pktio, odp_pktin_mode_t imode)
-{
- odp_event_t ev;
- odp_queue_t queue = ODP_QUEUE_INVALID;
-
- if (imode == ODP_PKTIN_MODE_QUEUE) {
- /* Assert breaks else-if without brackets */
- CU_ASSERT_FATAL(odp_pktin_event_queue(pktio, &queue, 1) == 1);
- } else if (imode == ODP_PKTIN_MODE_DIRECT) {
- return 0;
- }
-
- /* flush any pending events */
- while (1) {
- if (queue != ODP_QUEUE_INVALID)
- ev = odp_queue_deq(queue);
- else
- ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
-
- if (ev != ODP_EVENT_INVALID)
- odp_event_free(ev);
- else
- break;
- }
-
- return 0;
-}
-
-static int create_packets(odp_packet_t pkt_tbl[], uint32_t pkt_seq[], int num,
- odp_pktio_t pktio_src, odp_pktio_t pktio_dst)
-{
- int i;
-
- for (i = 0; i < num; i++) {
- pkt_tbl[i] = odp_packet_alloc(default_pkt_pool, packet_len);
- if (pkt_tbl[i] == ODP_PACKET_INVALID)
- break;
-
- pkt_seq[i] = pktio_init_packet(pkt_tbl[i]);
- if (pkt_seq[i] == TEST_SEQ_INVALID) {
- odp_packet_free(pkt_tbl[i]);
- break;
- }
-
- pktio_pkt_set_macs(pkt_tbl[i], pktio_src, pktio_dst);
-
- if (pktio_fixup_checksums(pkt_tbl[i]) != 0) {
- odp_packet_free(pkt_tbl[i]);
- break;
- }
- }
-
- return i;
-}
-
-static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
- int num, txrx_mode_e mode)
-{
- odp_event_t evt_tbl[num];
- int num_evts = 0;
- int num_pkts = 0;
- int i;
-
- if (pktio_rx->in_mode == ODP_PKTIN_MODE_DIRECT) {
- odp_pktin_queue_t pktin;
-
- CU_ASSERT_FATAL(odp_pktin_queue(pktio_rx->id, &pktin, 1) == 1);
- return odp_pktin_recv(pktin, pkt_tbl, num);
- }
-
- if (mode == TXRX_MODE_MULTI) {
- if (pktio_rx->in_mode == ODP_PKTIN_MODE_QUEUE)
- num_evts = odp_queue_deq_multi(pktio_rx->inq, evt_tbl,
- num);
- else
- num_evts = odp_schedule_multi(NULL, ODP_SCHED_NO_WAIT,
- evt_tbl, num);
- } else {
- odp_event_t evt_tmp;
-
- if (pktio_rx->in_mode == ODP_PKTIN_MODE_QUEUE)
- evt_tmp = odp_queue_deq(pktio_rx->inq);
- else
- evt_tmp = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
-
- if (evt_tmp != ODP_EVENT_INVALID)
- evt_tbl[num_evts++] = evt_tmp;
- }
-
- /* convert events to packets, discarding any non-packet events */
- for (i = 0; i < num_evts; ++i) {
- if (odp_event_type(evt_tbl[i]) == ODP_EVENT_PACKET)
- pkt_tbl[num_pkts++] = odp_packet_from_event(evt_tbl[i]);
- else
- odp_event_free(evt_tbl[i]);
- }
-
- return num_pkts;
-}
-
-static int wait_for_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
- uint32_t seq_tbl[], int num, txrx_mode_e mode,
- uint64_t ns)
-{
- odp_time_t wait_time, end;
- int num_rx = 0;
- int i;
- odp_packet_t pkt_tmp[num];
-
- wait_time = odp_time_local_from_ns(ns);
- end = odp_time_sum(odp_time_local(), wait_time);
-
- do {
- int n = get_packets(pktio_rx, pkt_tmp, num - num_rx, mode);
-
- if (n < 0)
- break;
-
- for (i = 0; i < n; ++i) {
- if (pktio_pkt_seq(pkt_tmp[i]) == seq_tbl[num_rx])
- pkt_tbl[num_rx++] = pkt_tmp[i];
- else
- odp_packet_free(pkt_tmp[i]);
- }
- } while (num_rx < num && odp_time_cmp(end, odp_time_local()) > 0);
-
- return num_rx;
-}
-
-static int recv_packets_tmo(odp_pktio_t pktio, odp_packet_t pkt_tbl[],
- uint32_t seq_tbl[], int num, recv_tmo_mode_e mode,
- uint64_t tmo, uint64_t ns)
-{
- odp_packet_t pkt_tmp[num];
- odp_pktin_queue_t pktin[MAX_QUEUES];
- odp_time_t ts1, ts2;
- int num_rx = 0;
- int num_q;
- int i;
- int n;
- unsigned from_val;
- unsigned *from = NULL;
-
- if (mode == RECV_MQ_TMO)
- from = &from_val;
-
- num_q = odp_pktin_queue(pktio, pktin, MAX_QUEUES);
- CU_ASSERT_FATAL(num_q > 0);
-
- /** Multiple odp_pktin_recv_tmo()/odp_pktin_recv_mq_tmo() calls may be
- * required to discard possible non-test packets. */
- do {
- ts1 = odp_time_global();
- if (mode == RECV_TMO)
- n = odp_pktin_recv_tmo(pktin[0], pkt_tmp, num - num_rx,
- tmo);
- else
- n = odp_pktin_recv_mq_tmo(pktin, (unsigned)num_q,
- from, pkt_tmp,
- num - num_rx, tmo);
- ts2 = odp_time_global();
-
- if (n <= 0)
- break;
- for (i = 0; i < n; i++) {
- if (pktio_pkt_seq(pkt_tmp[i]) == seq_tbl[num_rx])
- pkt_tbl[num_rx++] = pkt_tmp[i];
- else
- odp_packet_free(pkt_tmp[i]);
- }
- if (mode == RECV_MQ_TMO)
- CU_ASSERT(from_val < (unsigned)num_q);
- } while (num_rx < num);
-
- if (tmo == ODP_PKTIN_WAIT)
- CU_ASSERT(num_rx == num);
- if (num_rx < num)
- CU_ASSERT(odp_time_to_ns(odp_time_diff(ts2, ts1)) >= ns);
-
- return num_rx;
-}
-
-static int send_packets(odp_pktout_queue_t pktout,
- odp_packet_t *pkt_tbl, unsigned pkts)
-{
- int ret;
- unsigned sent = 0;
-
- while (sent < pkts) {
- ret = odp_pktout_send(pktout, &pkt_tbl[sent], pkts - sent);
-
- if (ret < 0) {
- CU_FAIL_FATAL("failed to send test packet");
- return -1;
- }
-
- sent += ret;
- }
-
- return 0;
-}
-
-static int send_packet_events(odp_queue_t queue,
- odp_packet_t *pkt_tbl, unsigned pkts)
-{
- int ret;
- unsigned i;
- unsigned sent = 0;
- odp_event_t ev_tbl[pkts];
-
- for (i = 0; i < pkts; i++)
- ev_tbl[i] = odp_packet_to_event(pkt_tbl[i]);
-
- while (sent < pkts) {
- ret = odp_queue_enq_multi(queue, &ev_tbl[sent], pkts - sent);
-
- if (ret < 0) {
- CU_FAIL_FATAL("failed to send test packet as events");
- return -1;
- }
-
- sent += ret;
- }
-
- return 0;
-}
-
-static void pktio_txrx_multi(pktio_info_t *pktio_a, pktio_info_t *pktio_b,
- int num_pkts, txrx_mode_e mode)
-{
- odp_packet_t tx_pkt[num_pkts];
- odp_packet_t rx_pkt[num_pkts];
- uint32_t tx_seq[num_pkts];
- int i, ret, num_rx;
-
- if (packet_len == USE_MTU) {
- odp_pool_capability_t pool_capa;
- uint32_t mtu;
-
- mtu = odp_pktio_mtu(pktio_a->id);
- if (odp_pktio_mtu(pktio_b->id) < mtu)
- mtu = odp_pktio_mtu(pktio_b->id);
- CU_ASSERT_FATAL(mtu > 0);
- packet_len = mtu;
- if (packet_len > PKT_LEN_MAX)
- packet_len = PKT_LEN_MAX;
-
- CU_ASSERT_FATAL(odp_pool_capability(&pool_capa) == 0);
-
- if (pool_capa.pkt.max_len &&
- packet_len > pool_capa.pkt.max_len)
- packet_len = pool_capa.pkt.max_len;
- }
-
- /* generate test packets to send */
- ret = create_packets(tx_pkt, tx_seq, num_pkts, pktio_a->id,
- pktio_b->id);
- if (ret != num_pkts) {
- CU_FAIL("failed to generate test packets");
- return;
- }
-
- /* send packet(s) out */
- if (mode == TXRX_MODE_SINGLE) {
- for (i = 0; i < num_pkts; ++i) {
- ret = odp_pktout_send(pktio_a->pktout, &tx_pkt[i], 1);
- if (ret != 1) {
- CU_FAIL_FATAL("failed to send test packet");
- odp_packet_free(tx_pkt[i]);
- return;
- }
- }
- } else if (mode == TXRX_MODE_MULTI) {
- send_packets(pktio_a->pktout, tx_pkt, num_pkts);
- } else {
- send_packet_events(pktio_a->queue_out, tx_pkt, num_pkts);
- }
-
- /* and wait for them to arrive back */
- num_rx = wait_for_packets(pktio_b, rx_pkt, tx_seq,
- num_pkts, mode, ODP_TIME_SEC_IN_NS);
- CU_ASSERT(num_rx == num_pkts);
-
- for (i = 0; i < num_rx; ++i) {
- CU_ASSERT_FATAL(rx_pkt[i] != ODP_PACKET_INVALID);
- CU_ASSERT(odp_packet_input(rx_pkt[i]) == pktio_b->id);
- CU_ASSERT(odp_packet_has_error(rx_pkt[i]) == 0);
- odp_packet_free(rx_pkt[i]);
- }
-}
-
-static void test_txrx(odp_pktin_mode_t in_mode, int num_pkts,
- txrx_mode_e mode)
-{
- int ret, i, if_b;
- pktio_info_t pktios[MAX_NUM_IFACES];
- pktio_info_t *io;
-
- /* create pktios and associate input/output queues */
- for (i = 0; i < num_ifaces; ++i) {
- odp_pktout_queue_t pktout;
- odp_queue_t queue;
- odp_pktout_mode_t out_mode = ODP_PKTOUT_MODE_DIRECT;
-
- if (mode == TXRX_MODE_MULTI_EVENT)
- out_mode = ODP_PKTOUT_MODE_QUEUE;
-
- io = &pktios[i];
-
- io->name = iface_name[i];
- io->id = create_pktio(i, in_mode, out_mode);
- if (io->id == ODP_PKTIO_INVALID) {
- CU_FAIL("failed to open iface");
- return;
- }
-
- if (mode == TXRX_MODE_MULTI_EVENT) {
- CU_ASSERT_FATAL(odp_pktout_event_queue(io->id,
- &queue, 1) == 1);
- } else {
- CU_ASSERT_FATAL(odp_pktout_queue(io->id,
- &pktout, 1) == 1);
- io->pktout = pktout;
- queue = ODP_QUEUE_INVALID;
- }
-
- io->queue_out = queue;
- io->in_mode = in_mode;
-
- if (in_mode == ODP_PKTIN_MODE_QUEUE) {
- CU_ASSERT_FATAL(odp_pktin_event_queue(io->id, &queue, 1)
- == 1);
- io->inq = queue;
- } else {
- io->inq = ODP_QUEUE_INVALID;
- }
-
- ret = odp_pktio_start(io->id);
- CU_ASSERT(ret == 0);
-
- _pktio_wait_linkup(io->id);
- }
-
- /* if we have two interfaces then send through one and receive on
- * another but if there's only one assume it's a loopback */
- if_b = (num_ifaces == 1) ? 0 : 1;
- pktio_txrx_multi(&pktios[0], &pktios[if_b], num_pkts, mode);
-
- for (i = 0; i < num_ifaces; ++i) {
- ret = odp_pktio_stop(pktios[i].id);
- CU_ASSERT_FATAL(ret == 0);
- flush_input_queue(pktios[i].id, in_mode);
- ret = odp_pktio_close(pktios[i].id);
- CU_ASSERT(ret == 0);
- }
-}
-
-void pktio_test_plain_queue(void)
-{
- test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_SINGLE);
- test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_SINGLE);
-}
-
-void pktio_test_plain_multi(void)
-{
- test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_MULTI);
- test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_MULTI);
-}
-
-void pktio_test_plain_multi_event(void)
-{
- test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_MULTI_EVENT);
- test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT);
-}
-
-void pktio_test_sched_queue(void)
-{
- test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_SINGLE);
- test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_SINGLE);
-}
-
-void pktio_test_sched_multi(void)
-{
- test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_MULTI);
- test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_MULTI);
-}
-
-void pktio_test_sched_multi_event(void)
-{
- test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_MULTI_EVENT);
- test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT);
-}
-
-void pktio_test_recv(void)
-{
- test_txrx(ODP_PKTIN_MODE_DIRECT, 1, TXRX_MODE_SINGLE);
-}
-
-void pktio_test_recv_multi(void)
-{
- test_txrx(ODP_PKTIN_MODE_DIRECT, TX_BATCH_LEN, TXRX_MODE_MULTI);
-}
-
-void pktio_test_recv_multi_event(void)
-{
- test_txrx(ODP_PKTIN_MODE_DIRECT, 1, TXRX_MODE_MULTI_EVENT);
- test_txrx(ODP_PKTIN_MODE_DIRECT, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT);
-}
-
-void pktio_test_recv_queue(void)
-{
- odp_pktio_t pktio_tx, pktio_rx;
- odp_pktio_t pktio[MAX_NUM_IFACES];
- odp_pktio_capability_t capa;
- odp_pktin_queue_param_t in_queue_param;
- odp_pktout_queue_param_t out_queue_param;
- odp_pktout_queue_t pktout_queue[MAX_QUEUES];
- odp_pktin_queue_t pktin_queue[MAX_QUEUES];
- odp_packet_t pkt_tbl[TX_BATCH_LEN];
- odp_packet_t tmp_pkt[TX_BATCH_LEN];
- uint32_t pkt_seq[TX_BATCH_LEN];
- odp_time_t wait_time, end;
- int num_rx = 0;
- int num_queues;
- int ret;
- int i;
-
- CU_ASSERT_FATAL(num_ifaces >= 1);
-
- /* Open and configure interfaces */
- for (i = 0; i < num_ifaces; ++i) {
- pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
- ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
-
- CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &capa) == 0);
-
- odp_pktin_queue_param_init(&in_queue_param);
- num_queues = capa.max_input_queues;
- in_queue_param.num_queues = num_queues;
- in_queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
- in_queue_param.hash_proto.proto.ipv4_udp = 1;
-
- ret = odp_pktin_queue_config(pktio[i], &in_queue_param);
- CU_ASSERT_FATAL(ret == 0);
-
- odp_pktout_queue_param_init(&out_queue_param);
- out_queue_param.num_queues = capa.max_output_queues;
-
- ret = odp_pktout_queue_config(pktio[i], &out_queue_param);
- CU_ASSERT_FATAL(ret == 0);
-
- CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
- }
-
- for (i = 0; i < num_ifaces; ++i)
- _pktio_wait_linkup(pktio[i]);
-
- pktio_tx = pktio[0];
- if (num_ifaces > 1)
- pktio_rx = pktio[1];
- else
- pktio_rx = pktio_tx;
-
- /* Allocate and initialize test packets */
- ret = create_packets(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
- pktio_rx);
- if (ret != TX_BATCH_LEN) {
- CU_FAIL("Failed to generate test packets");
- return;
- }
-
- /* Send packets */
- num_queues = odp_pktout_queue(pktio_tx, pktout_queue, MAX_QUEUES);
- CU_ASSERT_FATAL(num_queues > 0);
- if (num_queues > MAX_QUEUES)
- num_queues = MAX_QUEUES;
-
- ret = odp_pktout_send(pktout_queue[num_queues - 1], pkt_tbl,
- TX_BATCH_LEN);
- CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
-
- /* Receive packets */
- num_queues = odp_pktin_queue(pktio_rx, pktin_queue, MAX_QUEUES);
- CU_ASSERT_FATAL(num_queues > 0);
- if (num_queues > MAX_QUEUES)
- num_queues = MAX_QUEUES;
-
- wait_time = odp_time_local_from_ns(ODP_TIME_SEC_IN_NS);
- end = odp_time_sum(odp_time_local(), wait_time);
- do {
- int n = 0;
-
- for (i = 0; i < num_queues; i++) {
- n = odp_pktin_recv(pktin_queue[i], tmp_pkt,
- TX_BATCH_LEN);
- if (n != 0)
- break;
- }
- if (n < 0)
- break;
- for (i = 0; i < n; i++) {
- if (pktio_pkt_seq(tmp_pkt[i]) == pkt_seq[num_rx])
- pkt_tbl[num_rx++] = tmp_pkt[i];
- else
- odp_packet_free(tmp_pkt[i]);
- }
- } while (num_rx < TX_BATCH_LEN &&
- odp_time_cmp(end, odp_time_local()) > 0);
-
- CU_ASSERT(num_rx == TX_BATCH_LEN);
-
- for (i = 0; i < num_rx; i++)
- odp_packet_free(pkt_tbl[i]);
-
- for (i = 0; i < num_ifaces; i++) {
- CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
- CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
- }
-}
-
-static void test_recv_tmo(recv_tmo_mode_e mode)
-{
- odp_pktio_t pktio_tx, pktio_rx;
- odp_pktio_t pktio[MAX_NUM_IFACES];
- odp_pktio_capability_t capa;
- odp_pktin_queue_param_t in_queue_param;
- odp_pktout_queue_t pktout_queue;
- int test_pkt_count = 6;
- odp_packet_t pkt_tbl[test_pkt_count];
- uint32_t pkt_seq[test_pkt_count];
- uint64_t ns;
- unsigned num_q;
- int ret;
- int i;
-
- CU_ASSERT_FATAL(num_ifaces >= 1);
-
- /* Open and configure interfaces */
- for (i = 0; i < num_ifaces; ++i) {
- pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
- ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
-
- CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &capa) == 0);
-
- odp_pktin_queue_param_init(&in_queue_param);
- if (mode == RECV_TMO)
- num_q = 1;
- else
- num_q = (capa.max_input_queues < MAX_QUEUES) ?
- capa.max_input_queues : MAX_QUEUES;
- in_queue_param.num_queues = num_q;
- in_queue_param.hash_enable = (num_q > 1) ? 1 : 0;
- in_queue_param.hash_proto.proto.ipv4_udp = 1;
-
- ret = odp_pktin_queue_config(pktio[i], &in_queue_param);
- CU_ASSERT_FATAL(ret == 0);
-
- CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
- }
-
- for (i = 0; i < num_ifaces; i++)
- _pktio_wait_linkup(pktio[i]);
-
- pktio_tx = pktio[0];
- pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
-
- ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
- CU_ASSERT_FATAL(ret > 0);
-
- memset(pkt_seq, 0, sizeof(pkt_seq));
-
- /* No packets sent yet, so should wait */
- ns = 100 * ODP_TIME_MSEC_IN_NS;
- ret = recv_packets_tmo(pktio_rx, &pkt_tbl[0], &pkt_seq[0], 1, mode,
- odp_pktin_wait_time(ns), ns);
- CU_ASSERT(ret == 0);
-
- ret = create_packets(pkt_tbl, pkt_seq, test_pkt_count, pktio_tx,
- pktio_rx);
- CU_ASSERT_FATAL(ret == test_pkt_count);
-
- ret = odp_pktout_send(pktout_queue, pkt_tbl, test_pkt_count);
- CU_ASSERT_FATAL(ret == test_pkt_count);
-
- ret = recv_packets_tmo(pktio_rx, &pkt_tbl[0], &pkt_seq[0], 1, mode,
- ODP_PKTIN_WAIT, 0);
- CU_ASSERT_FATAL(ret == 1);
-
- ret = recv_packets_tmo(pktio_rx, &pkt_tbl[1], &pkt_seq[1], 1, mode,
- ODP_PKTIN_NO_WAIT, 0);
- CU_ASSERT_FATAL(ret == 1);
-
- ret = recv_packets_tmo(pktio_rx, &pkt_tbl[2], &pkt_seq[2], 1, mode,
- odp_pktin_wait_time(0), 0);
- CU_ASSERT_FATAL(ret == 1);
-
- ret = recv_packets_tmo(pktio_rx, &pkt_tbl[3], &pkt_seq[3], 3, mode,
- odp_pktin_wait_time(ns), ns);
- CU_ASSERT_FATAL(ret == 3);
-
- for (i = 0; i < test_pkt_count; i++)
- odp_packet_free(pkt_tbl[i]);
-
- for (i = 0; i < num_ifaces; i++) {
- CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
- CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
- }
-}
-
-void pktio_test_recv_tmo(void)
-{
- test_recv_tmo(RECV_TMO);
-}
-
-void pktio_test_recv_mq_tmo(void)
-{
- test_recv_tmo(RECV_MQ_TMO);
- test_recv_tmo(RECV_MQ_TMO_NO_IDX);
-}
-
-void pktio_test_recv_mtu(void)
-{
- packet_len = USE_MTU;
- pktio_test_sched_multi();
- packet_len = PKT_LEN_NORMAL;
-}
-
-void pktio_test_mtu(void)
-{
- int ret;
- uint32_t mtu;
-
- odp_pktio_t pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
-
- mtu = odp_pktio_mtu(pktio);
- CU_ASSERT(mtu > 0);
-
- printf(" %" PRIu32 " ", mtu);
-
- ret = odp_pktio_close(pktio);
- CU_ASSERT(ret == 0);
-}
-
-void pktio_test_promisc(void)
-{
- int ret;
- odp_pktio_capability_t capa;
-
- odp_pktio_t pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
-
- ret = odp_pktio_promisc_mode(pktio);
- CU_ASSERT(ret >= 0);
-
- CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0);
- if (!capa.set_op.op.promisc_mode) {
- printf("promiscuous mode not supported\n");
- ret = odp_pktio_close(pktio);
- CU_ASSERT(ret == 0);
- return;
- }
-
- ret = odp_pktio_promisc_mode_set(pktio, 1);
- CU_ASSERT(0 == ret);
-
- /* Verify that promisc mode set */
- ret = odp_pktio_promisc_mode(pktio);
- CU_ASSERT(1 == ret);
-
- ret = odp_pktio_promisc_mode_set(pktio, 0);
- CU_ASSERT(0 == ret);
-
- /* Verify that promisc mode is not set */
- ret = odp_pktio_promisc_mode(pktio);
- CU_ASSERT(0 == ret);
-
- ret = odp_pktio_close(pktio);
- CU_ASSERT(ret == 0);
-}
-
-void pktio_test_mac(void)
-{
- unsigned char mac_addr[ODP_PKTIO_MACADDR_MAXSIZE];
- int mac_len;
- int ret;
- odp_pktio_t pktio;
-
- pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
-
- printf("testing mac for %s\n", iface_name[0]);
-
- mac_len = odp_pktio_mac_addr(pktio, mac_addr,
- ODP_PKTIO_MACADDR_MAXSIZE);
- CU_ASSERT(ODPH_ETHADDR_LEN == mac_len);
- CU_ASSERT(ODP_PKTIO_MACADDR_MAXSIZE >= mac_len);
-
- printf(" %X:%X:%X:%X:%X:%X ",
- mac_addr[0], mac_addr[1], mac_addr[2],
- mac_addr[3], mac_addr[4], mac_addr[5]);
-
- /* Fail case: wrong addr_size. Expected <0. */
- mac_len = odp_pktio_mac_addr(pktio, mac_addr, 2);
- CU_ASSERT(mac_len < 0);
-
- ret = odp_pktio_close(pktio);
- CU_ASSERT(0 == ret);
-}
-
-void pktio_test_open(void)
-{
- odp_pktio_t pktio;
- odp_pktio_param_t pktio_param;
- int i;
-
- /* test the sequence open->close->open->close() */
- for (i = 0; i < 2; ++i) {
- pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- CU_ASSERT(odp_pktio_close(pktio) == 0);
- }
-
- odp_pktio_param_init(&pktio_param);
- pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
-
- pktio = odp_pktio_open("nothere", default_pkt_pool, &pktio_param);
- CU_ASSERT(pktio == ODP_PKTIO_INVALID);
-}
-
-void pktio_test_lookup(void)
-{
- odp_pktio_t pktio, pktio_inval;
- odp_pktio_param_t pktio_param;
-
- odp_pktio_param_init(&pktio_param);
- pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
-
- pktio = odp_pktio_open(iface_name[0], default_pkt_pool, &pktio_param);
- CU_ASSERT(pktio != ODP_PKTIO_INVALID);
-
- CU_ASSERT(odp_pktio_lookup(iface_name[0]) == pktio);
-
- pktio_inval = odp_pktio_open(iface_name[0], default_pkt_pool,
- &pktio_param);
- CU_ASSERT(odp_errno() != 0);
- CU_ASSERT(pktio_inval == ODP_PKTIO_INVALID);
-
- CU_ASSERT(odp_pktio_close(pktio) == 0);
-
- CU_ASSERT(odp_pktio_lookup(iface_name[0]) == ODP_PKTIO_INVALID);
-}
-
-void pktio_test_index(void)
-{
- odp_pktio_t pktio, pktio_inval = ODP_PKTIO_INVALID;
- odp_pktio_param_t pktio_param;
- int ndx;
-
- odp_pktio_param_init(&pktio_param);
- pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
-
- pktio = odp_pktio_open(iface_name[0], default_pkt_pool, &pktio_param);
- CU_ASSERT(pktio != ODP_PKTIO_INVALID);
-
- ndx = odp_pktio_index(pktio);
- CU_ASSERT(ndx >= 0);
- CU_ASSERT(odp_pktio_index(pktio_inval) < 0);
-
- CU_ASSERT(odp_pktio_close(pktio) == 0);
- CU_ASSERT(odp_pktio_index(pktio) < 0);
-}
-
-static void pktio_test_print(void)
-{
- odp_pktio_t pktio;
- int i;
-
- for (i = 0; i < num_ifaces; ++i) {
- pktio = create_pktio(i, ODP_PKTIN_MODE_QUEUE,
- ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
-
- /* Print pktio debug info and test that the
- * odp_pktio_print() function is implemented. */
- odp_pktio_print(pktio);
-
- CU_ASSERT(odp_pktio_close(pktio) == 0);
- }
-}
-
-void pktio_test_pktio_config(void)
-{
- odp_pktio_t pktio;
- odp_pktio_capability_t capa;
- odp_pktio_config_t config;
-
- pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
-
- odp_pktio_config_init(&config);
-
- CU_ASSERT(odp_pktio_config(pktio, NULL) == 0);
-
- CU_ASSERT(odp_pktio_config(pktio, &config) == 0);
-
- CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0);
-
- config = capa.config;
- CU_ASSERT(odp_pktio_config(pktio, &config) == 0);
-
- CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
-}
-
-void pktio_test_info(void)
-{
- odp_pktio_t pktio;
- odp_pktio_info_t pktio_info;
- int i;
-
- for (i = 0; i < num_ifaces; i++) {
- pktio = create_pktio(i, ODP_PKTIN_MODE_QUEUE,
- ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
-
- CU_ASSERT_FATAL(odp_pktio_info(pktio, &pktio_info) == 0);
-
- printf("pktio %d\n name %s\n driver %s\n", i,
- pktio_info.name, pktio_info.drv_name);
-
- CU_ASSERT(strcmp(pktio_info.name, iface_name[i]) == 0);
- CU_ASSERT(pktio_info.pool == pool[i]);
- CU_ASSERT(pktio_info.param.in_mode == ODP_PKTIN_MODE_QUEUE);
- CU_ASSERT(pktio_info.param.out_mode == ODP_PKTOUT_MODE_DIRECT);
-
- CU_ASSERT(odp_pktio_info(ODP_PKTIO_INVALID, &pktio_info) < 0);
-
- CU_ASSERT(odp_pktio_close(pktio) == 0);
- }
-}
-
-void pktio_test_pktin_queue_config_direct(void)
-{
- odp_pktio_t pktio;
- odp_pktio_capability_t capa;
- odp_pktin_queue_param_t queue_param;
- odp_pktin_queue_t pktin_queues[MAX_QUEUES];
- odp_queue_t in_queues[MAX_QUEUES];
- int num_queues;
-
- pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
-
- CU_ASSERT(odp_pktio_capability(ODP_PKTIO_INVALID, &capa) < 0);
-
- CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
- capa.max_input_queues > 0);
- num_queues = capa.max_input_queues;
-
- odp_pktin_queue_param_init(&queue_param);
-
- queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
- queue_param.hash_proto.proto.ipv4_udp = 1;
- queue_param.num_queues = num_queues;
- CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
-
- CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES)
- == num_queues);
- CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES) < 0);
-
- queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
- queue_param.num_queues = 1;
- CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
-
- CU_ASSERT(odp_pktin_queue_config(ODP_PKTIO_INVALID, &queue_param) < 0);
-
- queue_param.num_queues = capa.max_input_queues + 1;
- CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
-
- CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
-}
-
-void pktio_test_pktin_queue_config_sched(void)
-{
- odp_pktio_t pktio;
- odp_pktio_capability_t capa;
- odp_pktin_queue_param_t queue_param;
- odp_pktin_queue_t pktin_queues[MAX_QUEUES];
- odp_queue_t in_queues[MAX_QUEUES];
- int num_queues;
-
- pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED, ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
-
- CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
- capa.max_input_queues > 0);
- num_queues = capa.max_input_queues;
-
- odp_pktin_queue_param_init(&queue_param);
-
- queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
- queue_param.hash_proto.proto.ipv4_udp = 1;
- queue_param.num_queues = num_queues;
- queue_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
- queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
- CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
-
- CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES)
- == num_queues);
- CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES) < 0);
-
- queue_param.num_queues = 1;
- CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
-
- queue_param.num_queues = capa.max_input_queues + 1;
- CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
-
- CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
-}
-
-void pktio_test_pktin_queue_config_queue(void)
-{
- odp_pktio_t pktio;
- odp_pktio_capability_t capa;
- odp_pktin_queue_param_t queue_param;
- odp_pktin_queue_t pktin_queues[MAX_QUEUES];
- odp_queue_t in_queues[MAX_QUEUES];
- int num_queues;
-
- pktio = create_pktio(0, ODP_PKTIN_MODE_QUEUE, ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
-
- CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
- capa.max_input_queues > 0);
- num_queues = capa.max_input_queues;
-
- odp_pktin_queue_param_init(&queue_param);
-
- queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
- queue_param.hash_proto.proto.ipv4_udp = 1;
- queue_param.num_queues = num_queues;
- CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
-
- CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES)
- == num_queues);
- CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES) < 0);
-
- queue_param.num_queues = 1;
- CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
-
- queue_param.num_queues = capa.max_input_queues + 1;
- CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
-
- CU_ASSERT(odp_pktio_close(pktio) == 0);
-}
-
-void pktio_test_pktout_queue_config(void)
-{
- odp_pktio_t pktio;
- odp_pktio_capability_t capa;
- odp_pktout_queue_param_t queue_param;
- odp_pktout_queue_t pktout_queues[MAX_QUEUES];
- int num_queues;
-
- pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
-
- CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
- capa.max_output_queues > 0);
- num_queues = capa.max_output_queues;
-
- odp_pktout_queue_param_init(&queue_param);
-
- queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
- queue_param.num_queues = num_queues;
- CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) == 0);
-
- CU_ASSERT(odp_pktout_queue(pktio, pktout_queues, MAX_QUEUES)
- == num_queues);
-
- queue_param.op_mode = ODP_PKTIO_OP_MT;
- queue_param.num_queues = 1;
- CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) == 0);
-
- CU_ASSERT(odp_pktout_queue_config(ODP_PKTIO_INVALID, &queue_param) < 0);
-
- queue_param.num_queues = capa.max_output_queues + 1;
- CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) < 0);
-
- CU_ASSERT(odp_pktio_close(pktio) == 0);
-}
-
-#ifdef DEBUG_STATS
-static void _print_pktio_stats(odp_pktio_stats_t *s, const char *name)
-{
- fprintf(stderr, "\n%s:\n"
- " in_octets %" PRIu64 "\n"
- " in_ucast_pkts %" PRIu64 "\n"
- " in_discards %" PRIu64 "\n"
- " in_errors %" PRIu64 "\n"
- " in_unknown_protos %" PRIu64 "\n"
- " out_octets %" PRIu64 "\n"
- " out_ucast_pkts %" PRIu64 "\n"
- " out_discards %" PRIu64 "\n"
- " out_errors %" PRIu64 "\n",
- name,
- s->in_octets,
- s->in_ucast_pkts,
- s->in_discards,
- s->in_errors,
- s->in_unknown_protos,
- s->out_octets,
- s->out_ucast_pkts,
- s->out_discards,
- s->out_errors);
-}
-#endif
-
-/* some pktio like netmap support various methods to
- * get statistics counters. ethtool strings are not standardised
- * and sysfs may not be supported. skip pktio_stats test until
- * we will solve that.*/
-int pktio_check_statistics_counters(void)
-{
- odp_pktio_t pktio;
- odp_pktio_stats_t stats;
- int ret;
- odp_pktio_param_t pktio_param;
- const char *iface = iface_name[0];
-
- odp_pktio_param_init(&pktio_param);
- pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
-
- pktio = odp_pktio_open(iface, pool[0], &pktio_param);
- if (pktio == ODP_PKTIO_INVALID)
- return ODP_TEST_INACTIVE;
-
- ret = odp_pktio_stats(pktio, &stats);
- (void)odp_pktio_close(pktio);
-
- if (ret == 0)
- return ODP_TEST_ACTIVE;
-
- return ODP_TEST_INACTIVE;
-}
-
-void pktio_test_statistics_counters(void)
-{
- odp_pktio_t pktio_rx, pktio_tx;
- odp_pktio_t pktio[MAX_NUM_IFACES];
- odp_packet_t pkt;
- odp_packet_t tx_pkt[1000];
- uint32_t pkt_seq[1000];
- odp_event_t ev;
- int i, pkts, tx_pkts, ret, alloc = 0;
- odp_pktout_queue_t pktout;
- uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
- odp_pktio_stats_t stats[2];
-
- for (i = 0; i < num_ifaces; i++) {
- pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_DIRECT);
-
- CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
- }
- pktio_tx = pktio[0];
- pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
-
- CU_ASSERT(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
-
- ret = odp_pktio_start(pktio_tx);
- CU_ASSERT(ret == 0);
- if (num_ifaces > 1) {
- ret = odp_pktio_start(pktio_rx);
- CU_ASSERT(ret == 0);
- }
-
- /* flush packets with magic number in pipes */
- for (i = 0; i < 1000; i++) {
- ev = odp_schedule(NULL, wait);
- if (ev != ODP_EVENT_INVALID)
- odp_event_free(ev);
- }
-
- alloc = create_packets(tx_pkt, pkt_seq, 1000, pktio_tx, pktio_rx);
-
- ret = odp_pktio_stats_reset(pktio_tx);
- CU_ASSERT(ret == 0);
- if (num_ifaces > 1) {
- ret = odp_pktio_stats_reset(pktio_rx);
- CU_ASSERT(ret == 0);
- }
-
- /* send */
- for (pkts = 0; pkts != alloc; ) {
- ret = odp_pktout_send(pktout, &tx_pkt[pkts], alloc - pkts);
- if (ret < 0) {
- CU_FAIL("unable to send packet\n");
- break;
- }
- pkts += ret;
- }
- tx_pkts = pkts;
-
- /* get */
- for (i = 0, pkts = 0; i < 1000 && pkts != tx_pkts; i++) {
- ev = odp_schedule(NULL, wait);
- if (ev != ODP_EVENT_INVALID) {
- if (odp_event_type(ev) == ODP_EVENT_PACKET) {
- pkt = odp_packet_from_event(ev);
- if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
- pkts++;
- }
- odp_event_free(ev);
- }
- }
-
- CU_ASSERT(pkts == tx_pkts);
-
- ret = odp_pktio_stats(pktio_tx, &stats[0]);
- CU_ASSERT(ret == 0);
-
- if (num_ifaces > 1) {
- ret = odp_pktio_stats(pktio_rx, &stats[1]);
- CU_ASSERT(ret == 0);
- CU_ASSERT((stats[1].in_ucast_pkts == 0) ||
- (stats[1].in_ucast_pkts >= (uint64_t)pkts));
- CU_ASSERT((stats[0].out_octets == 0) ||
- (stats[0].out_octets >=
- (PKT_LEN_NORMAL * (uint64_t)pkts)));
- } else {
- CU_ASSERT((stats[0].in_ucast_pkts == 0) ||
- (stats[0].in_ucast_pkts == (uint64_t)pkts));
- CU_ASSERT((stats[0].in_octets == 0) ||
- (stats[0].in_octets ==
- (PKT_LEN_NORMAL * (uint64_t)pkts)));
- }
-
- CU_ASSERT(0 == stats[0].in_discards);
- CU_ASSERT(0 == stats[0].in_errors);
- CU_ASSERT(0 == stats[0].in_unknown_protos);
- CU_ASSERT(0 == stats[0].out_discards);
- CU_ASSERT(0 == stats[0].out_errors);
-
- for (i = 0; i < num_ifaces; i++) {
- CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
-#ifdef DEBUG_STATS
- _print_pktio_stats(&stats[i], iface_name[i]);
-#endif
- flush_input_queue(pktio[i], ODP_PKTIN_MODE_SCHED);
- CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
- }
-}
-
-void pktio_test_start_stop(void)
-{
- odp_pktio_t pktio[MAX_NUM_IFACES];
- odp_pktio_t pktio_in;
- odp_packet_t pkt;
- odp_packet_t tx_pkt[1000];
- uint32_t pkt_seq[1000];
- odp_event_t ev;
- int i, pkts, ret, alloc = 0;
- odp_pktout_queue_t pktout;
- uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
-
- for (i = 0; i < num_ifaces; i++) {
- pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
- }
-
- CU_ASSERT(odp_pktout_queue(pktio[0], &pktout, 1) == 1);
-
- /* Interfaces are stopped by default,
- * Check that stop when stopped generates an error */
- ret = odp_pktio_stop(pktio[0]);
- CU_ASSERT(ret < 0);
-
- /* start first */
- ret = odp_pktio_start(pktio[0]);
- CU_ASSERT(ret == 0);
- /* Check that start when started generates an error */
- ret = odp_pktio_start(pktio[0]);
- CU_ASSERT(ret < 0);
-
- _pktio_wait_linkup(pktio[0]);
-
- /* Test Rx on a stopped interface. Only works if there are 2 */
- if (num_ifaces > 1) {
- alloc = create_packets(tx_pkt, pkt_seq, 1000, pktio[0],
- pktio[1]);
-
- for (pkts = 0; pkts != alloc; ) {
- ret = odp_pktout_send(pktout, &tx_pkt[pkts],
- alloc - pkts);
- if (ret < 0) {
- CU_FAIL("unable to enqueue packet\n");
- break;
- }
- pkts += ret;
- }
- /* check that packets did not arrive */
- for (i = 0, pkts = 0; i < 1000; i++) {
- ev = odp_schedule(NULL, wait);
- if (ev == ODP_EVENT_INVALID)
- continue;
-
- if (odp_event_type(ev) == ODP_EVENT_PACKET) {
- pkt = odp_packet_from_event(ev);
- if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
- pkts++;
- }
- odp_event_free(ev);
- }
- if (pkts)
- CU_FAIL("pktio stopped, received unexpected events");
-
- /* start both, send and get packets */
- /* 0 already started */
- ret = odp_pktio_start(pktio[1]);
- CU_ASSERT(ret == 0);
-
- _pktio_wait_linkup(pktio[1]);
-
- /* flush packets with magic number in pipes */
- for (i = 0; i < 1000; i++) {
- ev = odp_schedule(NULL, wait);
- if (ev != ODP_EVENT_INVALID)
- odp_event_free(ev);
- }
- }
-
- if (num_ifaces > 1)
- pktio_in = pktio[1];
- else
- pktio_in = pktio[0];
-
- alloc = create_packets(tx_pkt, pkt_seq, 1000, pktio[0], pktio_in);
-
- /* send */
- for (pkts = 0; pkts != alloc; ) {
- ret = odp_pktout_send(pktout, &tx_pkt[pkts], alloc - pkts);
- if (ret < 0) {
- CU_FAIL("unable to enqueue packet\n");
- break;
- }
- pkts += ret;
- }
-
- /* get */
- for (i = 0, pkts = 0; i < 1000; i++) {
- ev = odp_schedule(NULL, wait);
- if (ev != ODP_EVENT_INVALID) {
- if (odp_event_type(ev) == ODP_EVENT_PACKET) {
- pkt = odp_packet_from_event(ev);
- if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
- pkts++;
- }
- odp_event_free(ev);
- }
- }
- CU_ASSERT(pkts == alloc);
-
- for (i = 0; i < num_ifaces; i++) {
- CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
- CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
- }
-
- /* Verify that a schedule call after stop and close does not generate
- errors. */
- ev = odp_schedule(NULL, wait);
- CU_ASSERT(ev == ODP_EVENT_INVALID);
- if (ev != ODP_EVENT_INVALID)
- odp_event_free(ev);
-}
-
-/*
- * This is a pre-condition check that the pktio_test_send_failure()
- * test case can be run. If the TX interface MTU is larger that the
- * biggest packet we can allocate then the test won't be able to
- * attempt to send packets larger than the MTU, so skip the test.
- */
-int pktio_check_send_failure(void)
-{
- odp_pktio_t pktio_tx;
- uint32_t mtu;
- odp_pktio_param_t pktio_param;
- int iface_idx = 0;
- const char *iface = iface_name[iface_idx];
- odp_pool_capability_t pool_capa;
-
- if (odp_pool_capability(&pool_capa) < 0) {
- fprintf(stderr, "%s: pool capability failed\n", __func__);
- return ODP_TEST_INACTIVE;
- };
-
- memset(&pktio_param, 0, sizeof(pktio_param));
-
- pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
-
- pktio_tx = odp_pktio_open(iface, pool[iface_idx], &pktio_param);
- if (pktio_tx == ODP_PKTIO_INVALID) {
- fprintf(stderr, "%s: failed to open pktio\n", __func__);
- return ODP_TEST_INACTIVE;
- }
-
- /* read the MTU from the transmit interface */
- mtu = odp_pktio_mtu(pktio_tx);
-
- odp_pktio_close(pktio_tx);
-
- /* Failure test supports only single segment */
- if (pool_capa.pkt.max_seg_len &&
- pool_capa.pkt.max_seg_len < mtu + 32)
- return ODP_TEST_INACTIVE;
-
- return ODP_TEST_ACTIVE;
-}
-
-void pktio_test_send_failure(void)
-{
- odp_pktio_t pktio_tx, pktio_rx;
- odp_packet_t pkt_tbl[TX_BATCH_LEN];
- uint32_t pkt_seq[TX_BATCH_LEN];
- int ret, i, alloc_pkts;
- uint32_t mtu;
- odp_pool_param_t pool_params;
- odp_pool_t pkt_pool;
- int long_pkt_idx = TX_BATCH_LEN / 2;
- pktio_info_t info_rx;
- odp_pktout_queue_t pktout;
- odp_pool_capability_t pool_capa;
-
- pktio_tx = create_pktio(0, ODP_PKTIN_MODE_DIRECT,
- ODP_PKTOUT_MODE_DIRECT);
- if (pktio_tx == ODP_PKTIO_INVALID) {
- CU_FAIL("failed to open pktio");
- return;
- }
-
- CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
-
- /* read the MTU from the transmit interface */
- mtu = odp_pktio_mtu(pktio_tx);
-
- ret = odp_pktio_start(pktio_tx);
- CU_ASSERT_FATAL(ret == 0);
-
- _pktio_wait_linkup(pktio_tx);
-
- CU_ASSERT_FATAL(odp_pool_capability(&pool_capa) == 0);
-
- if (pool_capa.pkt.max_seg_len &&
- pool_capa.pkt.max_seg_len < mtu + 32) {
- CU_FAIL("Max packet seg length is too small.");
- return;
- }
-
- /* configure the pool so that we can generate test packets larger
- * than the interface MTU */
- odp_pool_param_init(&pool_params);
- pool_params.pkt.len = mtu + 32;
- pool_params.pkt.seg_len = pool_params.pkt.len;
- pool_params.pkt.num = TX_BATCH_LEN + 1;
- pool_params.type = ODP_POOL_PACKET;
- pkt_pool = odp_pool_create("pkt_pool_oversize", &pool_params);
- CU_ASSERT_FATAL(pkt_pool != ODP_POOL_INVALID);
-
- if (num_ifaces > 1) {
- pktio_rx = create_pktio(1, ODP_PKTIN_MODE_DIRECT,
- ODP_PKTOUT_MODE_DIRECT);
- ret = odp_pktio_start(pktio_rx);
- CU_ASSERT_FATAL(ret == 0);
-
- _pktio_wait_linkup(pktio_rx);
- } else {
- pktio_rx = pktio_tx;
- }
-
- /* generate a batch of packets with a single overly long packet
- * in the middle */
- for (i = 0; i < TX_BATCH_LEN; ++i) {
- uint32_t pkt_len;
-
- if (i == long_pkt_idx)
- pkt_len = pool_params.pkt.len;
- else
- pkt_len = PKT_LEN_NORMAL;
-
- pkt_tbl[i] = odp_packet_alloc(pkt_pool, pkt_len);
- if (pkt_tbl[i] == ODP_PACKET_INVALID)
- break;
-
- pkt_seq[i] = pktio_init_packet(pkt_tbl[i]);
-
- pktio_pkt_set_macs(pkt_tbl[i], pktio_tx, pktio_rx);
- if (pktio_fixup_checksums(pkt_tbl[i]) != 0) {
- odp_packet_free(pkt_tbl[i]);
- break;
- }
-
- if (pkt_seq[i] == TEST_SEQ_INVALID) {
- odp_packet_free(pkt_tbl[i]);
- break;
- }
- }
- alloc_pkts = i;
-
- if (alloc_pkts == TX_BATCH_LEN) {
- /* try to send the batch with the long packet in the middle,
- * the initial short packets should be sent successfully */
- odp_errno_zero();
- ret = odp_pktout_send(pktout, pkt_tbl, TX_BATCH_LEN);
- CU_ASSERT_FATAL(ret == long_pkt_idx);
- CU_ASSERT(odp_errno() == 0);
-
- info_rx.id = pktio_rx;
- info_rx.inq = ODP_QUEUE_INVALID;
- info_rx.in_mode = ODP_PKTIN_MODE_DIRECT;
-
- i = wait_for_packets(&info_rx, pkt_tbl, pkt_seq, ret,
- TXRX_MODE_MULTI, ODP_TIME_SEC_IN_NS);
-
- if (i == ret) {
- /* now try to send starting with the too-long packet
- * and verify it fails */
- odp_errno_zero();
- ret = odp_pktout_send(pktout,
- &pkt_tbl[long_pkt_idx],
- TX_BATCH_LEN - long_pkt_idx);
- CU_ASSERT(ret == -1);
- CU_ASSERT(odp_errno() != 0);
- } else {
- CU_FAIL("failed to receive transmitted packets\n");
- }
-
- /* now reduce the size of the long packet and attempt to send
- * again - should work this time */
- i = long_pkt_idx;
- odp_packet_pull_tail(pkt_tbl[i],
- odp_packet_len(pkt_tbl[i]) -
- PKT_LEN_NORMAL);
- pkt_seq[i] = pktio_init_packet(pkt_tbl[i]);
-
- pktio_pkt_set_macs(pkt_tbl[i], pktio_tx, pktio_rx);
- ret = pktio_fixup_checksums(pkt_tbl[i]);
- CU_ASSERT_FATAL(ret == 0);
-
- CU_ASSERT_FATAL(pkt_seq[i] != TEST_SEQ_INVALID);
- ret = odp_pktout_send(pktout, &pkt_tbl[i], TX_BATCH_LEN - i);
- CU_ASSERT_FATAL(ret == (TX_BATCH_LEN - i));
-
- i = wait_for_packets(&info_rx, &pkt_tbl[i], &pkt_seq[i], ret,
- TXRX_MODE_MULTI, ODP_TIME_SEC_IN_NS);
- CU_ASSERT(i == ret);
- } else {
- CU_FAIL("failed to generate test packets\n");
- }
-
- for (i = 0; i < alloc_pkts; ++i) {
- if (pkt_tbl[i] != ODP_PACKET_INVALID)
- odp_packet_free(pkt_tbl[i]);
- }
-
- if (pktio_rx != pktio_tx) {
- CU_ASSERT(odp_pktio_stop(pktio_rx) == 0);
- CU_ASSERT(odp_pktio_close(pktio_rx) == 0);
- }
- CU_ASSERT(odp_pktio_stop(pktio_tx) == 0);
- CU_ASSERT(odp_pktio_close(pktio_tx) == 0);
- CU_ASSERT(odp_pool_destroy(pkt_pool) == 0);
-}
-
-void pktio_test_recv_on_wonly(void)
-{
- odp_pktio_t pktio;
- int ret;
- odp_pktin_queue_t pktin;
-
- pktio = create_pktio(0, ODP_PKTIN_MODE_DISABLED,
- ODP_PKTOUT_MODE_DIRECT);
-
- if (pktio == ODP_PKTIO_INVALID) {
- CU_FAIL("failed to open pktio");
- return;
- }
-
- CU_ASSERT(odp_pktin_queue(pktio, &pktin, 1) == 0);
-
- ret = odp_pktio_start(pktio);
- CU_ASSERT_FATAL(ret == 0);
-
- _pktio_wait_linkup(pktio);
-
- ret = odp_pktio_stop(pktio);
- CU_ASSERT_FATAL(ret == 0);
-
- ret = odp_pktio_close(pktio);
- CU_ASSERT_FATAL(ret == 0);
-}
-
-void pktio_test_send_on_ronly(void)
-{
- odp_pktio_t pktio;
- int ret;
- odp_pktout_queue_t pktout;
-
- pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT,
- ODP_PKTOUT_MODE_DISABLED);
-
- if (pktio == ODP_PKTIO_INVALID) {
- CU_FAIL("failed to open pktio");
- return;
- }
-
- CU_ASSERT(odp_pktout_queue(pktio, &pktout, 1) == 0);
-
- ret = odp_pktio_start(pktio);
- CU_ASSERT_FATAL(ret == 0);
-
- _pktio_wait_linkup(pktio);
-
- ret = odp_pktio_stop(pktio);
- CU_ASSERT_FATAL(ret == 0);
-
- ret = odp_pktio_close(pktio);
- CU_ASSERT_FATAL(ret == 0);
-}
-
-int pktio_check_pktin_ts(void)
-{
- odp_pktio_t pktio;
- odp_pktio_capability_t capa;
- odp_pktio_param_t pktio_param;
- int ret;
-
- odp_pktio_param_init(&pktio_param);
- pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
-
- pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
- if (pktio == ODP_PKTIO_INVALID)
- return ODP_TEST_INACTIVE;
-
- ret = odp_pktio_capability(pktio, &capa);
- (void)odp_pktio_close(pktio);
-
- if (ret < 0 || !capa.config.pktin.bit.ts_all)
- return ODP_TEST_INACTIVE;
-
- return ODP_TEST_ACTIVE;
-}
-
-void pktio_test_pktin_ts(void)
-{
- odp_pktio_t pktio_tx, pktio_rx;
- odp_pktio_t pktio[MAX_NUM_IFACES];
- pktio_info_t pktio_rx_info;
- odp_pktio_capability_t capa;
- odp_pktio_config_t config;
- odp_pktout_queue_t pktout_queue;
- odp_packet_t pkt_tbl[TX_BATCH_LEN];
- uint32_t pkt_seq[TX_BATCH_LEN];
- uint64_t ns1, ns2;
- uint64_t res;
- odp_time_t ts_prev;
- odp_time_t ts;
- int num_rx = 0;
- int ret;
- int i;
-
- CU_ASSERT_FATAL(num_ifaces >= 1);
-
- /* Open and configure interfaces */
- for (i = 0; i < num_ifaces; ++i) {
- pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
- ODP_PKTOUT_MODE_DIRECT);
- CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
-
- CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &capa) == 0);
- CU_ASSERT_FATAL(capa.config.pktin.bit.ts_all);
-
- odp_pktio_config_init(&config);
- config.pktin.bit.ts_all = 1;
- CU_ASSERT_FATAL(odp_pktio_config(pktio[i], &config) == 0);
-
- CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
- }
-
- for (i = 0; i < num_ifaces; i++)
- _pktio_wait_linkup(pktio[i]);
-
- pktio_tx = pktio[0];
- pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
- pktio_rx_info.id = pktio_rx;
- pktio_rx_info.inq = ODP_QUEUE_INVALID;
- pktio_rx_info.in_mode = ODP_PKTIN_MODE_DIRECT;
-
- /* Test odp_pktin_ts_res() and odp_pktin_ts_from_ns() */
- res = odp_pktin_ts_res(pktio_tx);
- CU_ASSERT(res > PKTIN_TS_MIN_RES);
- CU_ASSERT(res < PKTIN_TS_MAX_RES);
- ns1 = 100;
- ts = odp_pktin_ts_from_ns(pktio_tx, ns1);
- ns2 = odp_time_to_ns(ts);
- /* Allow some arithmetic tolerance */
- CU_ASSERT((ns2 <= (ns1 + PKTIN_TS_CMP_RES)) &&
- (ns2 >= (ns1 - PKTIN_TS_CMP_RES)));
-
- ret = create_packets(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
- pktio_rx);
- CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
-
- ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
- CU_ASSERT_FATAL(ret > 0);
-
- /* Send packets one at a time and add delay between the packets */
- for (i = 0; i < TX_BATCH_LEN; i++) {
- CU_ASSERT_FATAL(odp_pktout_send(pktout_queue,
- &pkt_tbl[i], 1) == 1);
- ret = wait_for_packets(&pktio_rx_info, &pkt_tbl[i], &pkt_seq[i],
- 1, TXRX_MODE_SINGLE, ODP_TIME_SEC_IN_NS);
- if (ret != 1)
- break;
- odp_time_wait_ns(PKTIN_TS_INTERVAL);
- }
- num_rx = i;
- CU_ASSERT(num_rx == TX_BATCH_LEN);
-
- ts_prev = ODP_TIME_NULL;
- for (i = 0; i < num_rx; i++) {
- ts = odp_packet_ts(pkt_tbl[i]);
-
- CU_ASSERT(odp_time_cmp(ts, ts_prev) > 0);
-
- ts_prev = ts;
- odp_packet_free(pkt_tbl[i]);
- }
-
- for (i = 0; i < num_ifaces; i++) {
- CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
- CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
- }
-}
-
-static int create_pool(const char *iface, int num)
-{
- char pool_name[ODP_POOL_NAME_LEN];
- odp_pool_param_t params;
- odp_pool_capability_t pool_capa;
-
- if (odp_pool_capability(&pool_capa) != 0)
- return -1;
-
- odp_pool_param_init(&params);
- set_pool_len(&params, &pool_capa);
- params.pkt.num = PKT_BUF_NUM;
- params.type = ODP_POOL_PACKET;
-
- snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s_%d",
- iface, pool_segmentation);
-
- pool[num] = odp_pool_create(pool_name, &params);
- if (ODP_POOL_INVALID == pool[num]) {
- fprintf(stderr, "%s: failed to create pool: %d",
- __func__, odp_errno());
- return -1;
- }
-
- return 0;
-}
-
-static int pktio_suite_init(void)
-{
- int i;
-
- odp_atomic_init_u32(&ip_seq, 0);
-
- if (getenv("ODP_WAIT_FOR_NETWORK"))
- wait_for_network = true;
-
- iface_name[0] = getenv("ODP_PKTIO_IF0");
- iface_name[1] = getenv("ODP_PKTIO_IF1");
- num_ifaces = 1;
-
- if (!iface_name[0]) {
- printf("No interfaces specified, using default \"loop\".\n");
- iface_name[0] = "loop";
- } else if (!iface_name[1]) {
- printf("Using loopback interface: %s\n", iface_name[0]);
- } else {
- num_ifaces = 2;
- printf("Using paired interfaces: %s %s\n",
- iface_name[0], iface_name[1]);
- }
-
- for (i = 0; i < num_ifaces; i++) {
- if (create_pool(iface_name[i], i) != 0)
- return -1;
- }
-
- if (default_pool_create() != 0) {
- fprintf(stderr, "error: failed to create default pool\n");
- return -1;
- }
-
- return 0;
-}
-
-int pktio_suite_init_unsegmented(void)
-{
- pool_segmentation = PKT_POOL_UNSEGMENTED;
- return pktio_suite_init();
-}
-
-int pktio_suite_init_segmented(void)
-{
- pool_segmentation = PKT_POOL_SEGMENTED;
- return pktio_suite_init();
-}
-
-int pktio_suite_term(void)
-{
- char pool_name[ODP_POOL_NAME_LEN];
- odp_pool_t pool;
- int i;
- int ret = 0;
-
- for (i = 0; i < num_ifaces; ++i) {
- snprintf(pool_name, sizeof(pool_name),
- "pkt_pool_%s_%d", iface_name[i], pool_segmentation);
- pool = odp_pool_lookup(pool_name);
- if (pool == ODP_POOL_INVALID)
- continue;
-
- if (odp_pool_destroy(pool) != 0) {
- fprintf(stderr, "error: failed to destroy pool %s\n",
- pool_name);
- ret = -1;
- }
- }
-
- if (odp_pool_destroy(default_pkt_pool) != 0) {
- fprintf(stderr, "error: failed to destroy default pool\n");
- ret = -1;
- }
- default_pkt_pool = ODP_POOL_INVALID;
-
- return ret;
-}
-
-odp_testinfo_t pktio_suite_unsegmented[] = {
- ODP_TEST_INFO(pktio_test_open),
- ODP_TEST_INFO(pktio_test_lookup),
- ODP_TEST_INFO(pktio_test_index),
- ODP_TEST_INFO(pktio_test_print),
- ODP_TEST_INFO(pktio_test_pktio_config),
- ODP_TEST_INFO(pktio_test_info),
- ODP_TEST_INFO(pktio_test_pktin_queue_config_direct),
- ODP_TEST_INFO(pktio_test_pktin_queue_config_sched),
- ODP_TEST_INFO(pktio_test_pktin_queue_config_queue),
- ODP_TEST_INFO(pktio_test_pktout_queue_config),
- ODP_TEST_INFO(pktio_test_plain_queue),
- ODP_TEST_INFO(pktio_test_plain_multi),
- ODP_TEST_INFO(pktio_test_sched_queue),
- ODP_TEST_INFO(pktio_test_sched_multi),
- ODP_TEST_INFO(pktio_test_recv),
- ODP_TEST_INFO(pktio_test_recv_multi),
- ODP_TEST_INFO(pktio_test_recv_queue),
- ODP_TEST_INFO(pktio_test_recv_tmo),
- ODP_TEST_INFO(pktio_test_recv_mq_tmo),
- ODP_TEST_INFO(pktio_test_recv_mtu),
- ODP_TEST_INFO_CONDITIONAL(pktio_test_send_failure,
- pktio_check_send_failure),
- ODP_TEST_INFO(pktio_test_mtu),
- ODP_TEST_INFO(pktio_test_promisc),
- ODP_TEST_INFO(pktio_test_mac),
- ODP_TEST_INFO(pktio_test_start_stop),
- ODP_TEST_INFO(pktio_test_recv_on_wonly),
- ODP_TEST_INFO(pktio_test_send_on_ronly),
- ODP_TEST_INFO(pktio_test_plain_multi_event),
- ODP_TEST_INFO(pktio_test_sched_multi_event),
- ODP_TEST_INFO(pktio_test_recv_multi_event),
- ODP_TEST_INFO_CONDITIONAL(pktio_test_statistics_counters,
- pktio_check_statistics_counters),
- ODP_TEST_INFO_CONDITIONAL(pktio_test_pktin_ts,
- pktio_check_pktin_ts),
- ODP_TEST_INFO_NULL
-};
-
-odp_testinfo_t pktio_suite_segmented[] = {
- ODP_TEST_INFO(pktio_test_plain_queue),
- ODP_TEST_INFO(pktio_test_plain_multi),
- ODP_TEST_INFO(pktio_test_sched_queue),
- ODP_TEST_INFO(pktio_test_sched_multi),
- ODP_TEST_INFO(pktio_test_recv),
- ODP_TEST_INFO(pktio_test_recv_multi),
- ODP_TEST_INFO(pktio_test_recv_mtu),
- ODP_TEST_INFO_CONDITIONAL(pktio_test_send_failure,
- pktio_check_send_failure),
- ODP_TEST_INFO_NULL
-};
-
-odp_suiteinfo_t pktio_suites[] = {
- {"Packet I/O Unsegmented", pktio_suite_init_unsegmented,
- pktio_suite_term, pktio_suite_unsegmented},
- {"Packet I/O Segmented", pktio_suite_init_segmented,
- pktio_suite_term, pktio_suite_segmented},
- ODP_SUITE_INFO_NULL
-};
-
-int pktio_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- ret = odp_cunit_register(pktio_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/pktio/pktio.h b/test/common_plat/validation/api/pktio/pktio.h
deleted file mode 100644
index 8131d05fe..000000000
--- a/test/common_plat/validation/api/pktio/pktio.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_PKTIO_H_
-#define _ODP_TEST_PKTIO_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void pktio_test_plain_queue(void);
-void pktio_test_plain_multi(void);
-void pktio_test_sched_queue(void);
-void pktio_test_sched_multi(void);
-void pktio_test_recv(void);
-void pktio_test_recv_multi(void);
-void pktio_test_recv_queue(void);
-void pktio_test_recv_tmo(void);
-void pktio_test_recv_mq_tmo(void);
-void pktio_test_recv_mtu(void);
-void pktio_test_mtu(void);
-void pktio_test_promisc(void);
-void pktio_test_mac(void);
-void pktio_test_inq_remdef(void);
-void pktio_test_open(void);
-void pktio_test_lookup(void);
-void pktio_test_index(void);
-void pktio_test_info(void);
-void pktio_test_inq(void);
-void pktio_test_pktio_config(void);
-void pktio_test_pktin_queue_config_direct(void);
-void pktio_test_pktin_queue_config_sched(void);
-void pktio_test_pktin_queue_config_queue(void);
-void pktio_test_pktout_queue_config(void);
-void pktio_test_start_stop(void);
-int pktio_check_send_failure(void);
-void pktio_test_send_failure(void);
-void pktio_test_recv_on_wonly(void);
-void pktio_test_send_on_ronly(void);
-void pktio_test_plain_multi_event(void);
-void pktio_test_sched_multi_event(void);
-void pktio_test_recv_multi_event(void);
-int pktio_check_statistics_counters(void);
-void pktio_test_statistics_counters(void);
-int pktio_check_pktin_ts(void);
-void pktio_test_pktin_ts(void);
-
-/* test arrays: */
-extern odp_testinfo_t pktio_suite[];
-
-/* test array init/term functions: */
-int pktio_suite_term(void);
-int pktio_suite_init_segmented(void);
-int pktio_suite_init_unsegmented(void);
-
-/* test registry: */
-extern odp_suiteinfo_t pktio_suites[];
-
-/* main test program: */
-int pktio_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/pktio/pktio_main.c b/test/common_plat/validation/api/pktio/pktio_main.c
deleted file mode 100644
index 2928e1b8a..000000000
--- a/test/common_plat/validation/api/pktio/pktio_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "pktio.h"
-
-int main(int argc, char *argv[])
-{
- return pktio_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/pool/Makefile.am b/test/common_plat/validation/api/pool/Makefile.am
deleted file mode 100644
index 1eb8d714b..000000000
--- a/test/common_plat/validation/api/pool/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestpool.la
-libtestpool_la_SOURCES = pool.c
-
-test_PROGRAMS = pool_main$(EXEEXT)
-dist_pool_main_SOURCES = pool_main.c
-pool_main_LDADD = libtestpool.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = pool.h
diff --git a/test/common_plat/validation/api/pool/pool.c b/test/common_plat/validation/api/pool/pool.c
deleted file mode 100644
index 8687941f7..000000000
--- a/test/common_plat/validation/api/pool/pool.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_api.h>
-#include "odp_cunit_common.h"
-#include "pool.h"
-
-static const int default_buffer_size = 1500;
-static const int default_buffer_num = 1000;
-
-static void pool_create_destroy(odp_pool_param_t *params)
-{
- odp_pool_t pool;
-
- pool = odp_pool_create(NULL, params);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
- CU_ASSERT(odp_pool_to_u64(pool) !=
- odp_pool_to_u64(ODP_POOL_INVALID));
- CU_ASSERT(odp_pool_destroy(pool) == 0);
-}
-
-void pool_test_create_destroy_buffer(void)
-{
- odp_pool_param_t params = {
- .buf = {
- .size = default_buffer_size,
- .align = ODP_CACHE_LINE_SIZE,
- .num = default_buffer_num,
- },
- .type = ODP_POOL_BUFFER,
- };
-
- pool_create_destroy(&params);
-}
-
-void pool_test_create_destroy_packet(void)
-{
- odp_pool_param_t params = {
- .pkt = {
- .seg_len = 0,
- .len = default_buffer_size,
- .num = default_buffer_num,
- },
- .type = ODP_POOL_PACKET,
- };
-
- pool_create_destroy(&params);
-}
-
-void pool_test_create_destroy_timeout(void)
-{
- odp_pool_param_t params = {
- .tmo = {
- .num = default_buffer_num,
- },
- .type = ODP_POOL_TIMEOUT,
- };
-
- pool_create_destroy(&params);
-}
-
-void pool_test_lookup_info_print(void)
-{
- odp_pool_t pool;
- const char pool_name[] = "pool_for_lookup_test";
- odp_pool_info_t info;
- odp_pool_param_t params = {
- .buf = {
- .size = default_buffer_size,
- .align = ODP_CACHE_LINE_SIZE,
- .num = default_buffer_num,
- },
- .type = ODP_POOL_BUFFER,
- };
-
- pool = odp_pool_create(pool_name, &params);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- pool = odp_pool_lookup(pool_name);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- CU_ASSERT_FATAL(odp_pool_info(pool, &info) == 0);
- CU_ASSERT(strncmp(pool_name, info.name, sizeof(pool_name)) == 0);
- CU_ASSERT(params.buf.size <= info.params.buf.size);
- CU_ASSERT(params.buf.align <= info.params.buf.align);
- CU_ASSERT(params.buf.num <= info.params.buf.num);
- CU_ASSERT(params.type == info.params.type);
-
- odp_pool_print(pool);
-
- CU_ASSERT(odp_pool_destroy(pool) == 0);
-}
-
-odp_testinfo_t pool_suite[] = {
- ODP_TEST_INFO(pool_test_create_destroy_buffer),
- ODP_TEST_INFO(pool_test_create_destroy_packet),
- ODP_TEST_INFO(pool_test_create_destroy_timeout),
- ODP_TEST_INFO(pool_test_lookup_info_print),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t pool_suites[] = {
- { .pName = "Pool tests",
- .pTests = pool_suite,
- },
- ODP_SUITE_INFO_NULL,
-};
-
-int pool_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- ret = odp_cunit_register(pool_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/pool/pool.h b/test/common_plat/validation/api/pool/pool.h
deleted file mode 100644
index 29e517633..000000000
--- a/test/common_plat/validation/api/pool/pool.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_POOL_H_
-#define _ODP_TEST_POOL_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void pool_test_create_destroy_buffer(void);
-void pool_test_create_destroy_packet(void);
-void pool_test_create_destroy_timeout(void);
-void pool_test_create_destroy_buffer_shm(void);
-void pool_test_lookup_info_print(void);
-
-/* test arrays: */
-extern odp_testinfo_t pool_suite[];
-
-/* test registry: */
-extern odp_suiteinfo_t pool_suites[];
-
-/* main test program: */
-int pool_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/pool/pool_main.c b/test/common_plat/validation/api/pool/pool_main.c
deleted file mode 100644
index bf06585b5..000000000
--- a/test/common_plat/validation/api/pool/pool_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "pool.h"
-
-int main(int argc, char *argv[])
-{
- return pool_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/queue/Makefile.am b/test/common_plat/validation/api/queue/Makefile.am
deleted file mode 100644
index a477e3c56..000000000
--- a/test/common_plat/validation/api/queue/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestqueue.la
-libtestqueue_la_SOURCES = queue.c
-
-test_PROGRAMS = queue_main$(EXEEXT)
-dist_queue_main_SOURCES = queue_main.c
-queue_main_LDADD = libtestqueue.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = queue.h
diff --git a/test/common_plat/validation/api/queue/queue.c b/test/common_plat/validation/api/queue/queue.c
deleted file mode 100644
index 1f7913a12..000000000
--- a/test/common_plat/validation/api/queue/queue.c
+++ /dev/null
@@ -1,329 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_api.h>
-#include <odp_cunit_common.h>
-#include "queue.h"
-
-#define MAX_BUFFER_QUEUE (8)
-#define MSG_POOL_SIZE (4 * 1024 * 1024)
-#define CONFIG_MAX_ITERATION (100)
-#define MAX_QUEUES (64 * 1024)
-
-static int queue_context = 0xff;
-static odp_pool_t pool;
-
-static void generate_name(char *name, uint32_t index)
-{
- /* Uniqueue name for up to 300M queues */
- name[0] = 'A' + ((index / (26 * 26 * 26 * 26 * 26)) % 26);
- name[1] = 'A' + ((index / (26 * 26 * 26 * 26)) % 26);
- name[2] = 'A' + ((index / (26 * 26 * 26)) % 26);
- name[3] = 'A' + ((index / (26 * 26)) % 26);
- name[4] = 'A' + ((index / 26) % 26);
- name[5] = 'A' + (index % 26);
-}
-
-int queue_suite_init(void)
-{
- odp_pool_param_t params;
-
- params.buf.size = 0;
- params.buf.align = ODP_CACHE_LINE_SIZE;
- params.buf.num = 1024 * 10;
- params.type = ODP_POOL_BUFFER;
-
- pool = odp_pool_create("msg_pool", &params);
-
- if (ODP_POOL_INVALID == pool) {
- printf("Pool create failed.\n");
- return -1;
- }
- return 0;
-}
-
-int queue_suite_term(void)
-{
- return odp_pool_destroy(pool);
-}
-
-void queue_test_capa(void)
-{
- odp_queue_capability_t capa;
- odp_queue_param_t qparams;
- char name[ODP_QUEUE_NAME_LEN];
- odp_queue_t queue[MAX_QUEUES];
- uint32_t num_queues, i;
-
- memset(&capa, 0, sizeof(odp_queue_capability_t));
- CU_ASSERT(odp_queue_capability(&capa) == 0);
-
- CU_ASSERT(capa.max_queues != 0);
- CU_ASSERT(capa.max_ordered_locks != 0);
- CU_ASSERT(capa.max_sched_groups != 0);
- CU_ASSERT(capa.sched_prios != 0);
-
- for (i = 0; i < ODP_QUEUE_NAME_LEN; i++)
- name[i] = 'A' + (i % 26);
-
- name[ODP_QUEUE_NAME_LEN - 1] = 0;
-
- if (capa.max_queues > MAX_QUEUES)
- num_queues = MAX_QUEUES;
- else
- num_queues = capa.max_queues;
-
- odp_queue_param_init(&qparams);
-
- for (i = 0; i < num_queues; i++) {
- generate_name(name, i);
- queue[i] = odp_queue_create(name, &qparams);
-
- if (queue[i] == ODP_QUEUE_INVALID) {
- CU_FAIL("Queue create failed");
- num_queues = i;
- break;
- }
-
- CU_ASSERT(odp_queue_lookup(name) != ODP_QUEUE_INVALID);
- }
-
- for (i = 0; i < num_queues; i++)
- CU_ASSERT(odp_queue_destroy(queue[i]) == 0);
-}
-
-void queue_test_mode(void)
-{
- odp_queue_param_t qparams;
- odp_queue_t queue;
- int i, j;
- odp_queue_op_mode_t mode[3] = { ODP_QUEUE_OP_MT,
- ODP_QUEUE_OP_MT_UNSAFE,
- ODP_QUEUE_OP_DISABLED };
-
- odp_queue_param_init(&qparams);
-
- /* Plain queue modes */
- for (i = 0; i < 3; i++) {
- for (j = 0; j < 3; j++) {
- /* Should not disable both enq and deq */
- if (i == 2 && j == 2)
- break;
-
- qparams.enq_mode = mode[i];
- qparams.deq_mode = mode[j];
- queue = odp_queue_create("test_queue", &qparams);
- CU_ASSERT(queue != ODP_QUEUE_INVALID);
- if (queue != ODP_QUEUE_INVALID)
- CU_ASSERT(odp_queue_destroy(queue) == 0);
- }
- }
-
- odp_queue_param_init(&qparams);
- qparams.type = ODP_QUEUE_TYPE_SCHED;
-
- /* Scheduled queue modes. Dequeue mode is fixed. */
- for (i = 0; i < 3; i++) {
- qparams.enq_mode = mode[i];
- queue = odp_queue_create("test_queue", &qparams);
- CU_ASSERT(queue != ODP_QUEUE_INVALID);
- if (queue != ODP_QUEUE_INVALID)
- CU_ASSERT(odp_queue_destroy(queue) == 0);
- }
-}
-
-void queue_test_param(void)
-{
- odp_queue_t queue, null_queue;
- odp_event_t enev[MAX_BUFFER_QUEUE];
- odp_event_t deev[MAX_BUFFER_QUEUE];
- odp_buffer_t buf;
- odp_event_t ev;
- odp_pool_t msg_pool;
- odp_event_t *pev_tmp;
- int i, deq_ret, ret;
- int nr_deq_entries = 0;
- int max_iteration = CONFIG_MAX_ITERATION;
- odp_queue_param_t qparams;
- odp_buffer_t enbuf;
-
- /* Schedule type queue */
- odp_queue_param_init(&qparams);
- qparams.type = ODP_QUEUE_TYPE_SCHED;
- qparams.sched.prio = ODP_SCHED_PRIO_LOWEST;
- qparams.sched.sync = ODP_SCHED_SYNC_PARALLEL;
- qparams.sched.group = ODP_SCHED_GROUP_WORKER;
-
- queue = odp_queue_create("test_queue", &qparams);
- CU_ASSERT(ODP_QUEUE_INVALID != queue);
- CU_ASSERT(odp_queue_to_u64(queue) !=
- odp_queue_to_u64(ODP_QUEUE_INVALID));
- CU_ASSERT(queue == odp_queue_lookup("test_queue"));
- CU_ASSERT(ODP_QUEUE_TYPE_SCHED == odp_queue_type(queue));
- CU_ASSERT(ODP_SCHED_PRIO_LOWEST == odp_queue_sched_prio(queue));
- CU_ASSERT(ODP_SCHED_SYNC_PARALLEL == odp_queue_sched_type(queue));
- CU_ASSERT(ODP_SCHED_GROUP_WORKER == odp_queue_sched_group(queue));
-
- CU_ASSERT(0 == odp_queue_context_set(queue, &queue_context,
- sizeof(queue_context)));
-
- CU_ASSERT(&queue_context == odp_queue_context(queue));
- CU_ASSERT(odp_queue_destroy(queue) == 0);
-
- /* Create queue with no name */
- odp_queue_param_init(&qparams);
- null_queue = odp_queue_create(NULL, &qparams);
- CU_ASSERT(ODP_QUEUE_INVALID != null_queue);
-
- /* Plain type queue */
- odp_queue_param_init(&qparams);
- qparams.type = ODP_QUEUE_TYPE_PLAIN;
- qparams.context = &queue_context;
- qparams.context_len = sizeof(queue_context);
-
- queue = odp_queue_create("test_queue", &qparams);
- CU_ASSERT(ODP_QUEUE_INVALID != queue);
- CU_ASSERT(queue == odp_queue_lookup("test_queue"));
- CU_ASSERT(ODP_QUEUE_TYPE_PLAIN == odp_queue_type(queue));
- CU_ASSERT(&queue_context == odp_queue_context(queue));
-
- /* Destroy queue with no name */
- CU_ASSERT(odp_queue_destroy(null_queue) == 0);
-
- msg_pool = odp_pool_lookup("msg_pool");
- buf = odp_buffer_alloc(msg_pool);
- CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
- ev = odp_buffer_to_event(buf);
-
- if (!(CU_ASSERT(odp_queue_enq(queue, ev) == 0))) {
- odp_buffer_free(buf);
- } else {
- CU_ASSERT(ev == odp_queue_deq(queue));
- odp_buffer_free(buf);
- }
-
- for (i = 0; i < MAX_BUFFER_QUEUE; i++) {
- buf = odp_buffer_alloc(msg_pool);
- enev[i] = odp_buffer_to_event(buf);
- }
-
- /*
- * odp_queue_enq_multi may return 0..n buffers due to the resource
- * constraints in the implementation at that given point of time.
- * But here we assume that we succeed in enqueuing all buffers.
- */
- ret = odp_queue_enq_multi(queue, enev, MAX_BUFFER_QUEUE);
- CU_ASSERT(MAX_BUFFER_QUEUE == ret);
- i = ret < 0 ? 0 : ret;
- for ( ; i < MAX_BUFFER_QUEUE; i++)
- odp_event_free(enev[i]);
-
- pev_tmp = deev;
- do {
- deq_ret = odp_queue_deq_multi(queue, pev_tmp,
- MAX_BUFFER_QUEUE);
- nr_deq_entries += deq_ret;
- max_iteration--;
- pev_tmp += deq_ret;
- CU_ASSERT(max_iteration >= 0);
- } while (nr_deq_entries < MAX_BUFFER_QUEUE);
-
- for (i = 0; i < MAX_BUFFER_QUEUE; i++) {
- enbuf = odp_buffer_from_event(enev[i]);
- CU_ASSERT(enev[i] == deev[i]);
- odp_buffer_free(enbuf);
- }
-
- CU_ASSERT(odp_queue_destroy(queue) == 0);
-}
-
-void queue_test_info(void)
-{
- odp_queue_t q_plain, q_order;
- const char *const nq_plain = "test_q_plain";
- const char *const nq_order = "test_q_order";
- odp_queue_info_t info;
- odp_queue_param_t param;
- char q_plain_ctx[] = "test_q_plain context data";
- char q_order_ctx[] = "test_q_order context data";
- unsigned lock_count;
- char *ctx;
- int ret;
-
- /* Create a plain queue and set context */
- q_plain = odp_queue_create(nq_plain, NULL);
- CU_ASSERT(ODP_QUEUE_INVALID != q_plain);
- CU_ASSERT(odp_queue_context_set(q_plain, q_plain_ctx,
- sizeof(q_plain_ctx)) == 0);
-
- /* Create a scheduled ordered queue with explicitly set params */
- odp_queue_param_init(&param);
- param.type = ODP_QUEUE_TYPE_SCHED;
- param.sched.prio = ODP_SCHED_PRIO_NORMAL;
- param.sched.sync = ODP_SCHED_SYNC_ORDERED;
- param.sched.group = ODP_SCHED_GROUP_ALL;
- param.sched.lock_count = 1;
- param.context = q_order_ctx;
- q_order = odp_queue_create(nq_order, &param);
- CU_ASSERT(ODP_QUEUE_INVALID != q_order);
-
- /* Check info for the plain queue */
- CU_ASSERT(odp_queue_info(q_plain, &info) == 0);
- CU_ASSERT(strcmp(nq_plain, info.name) == 0);
- CU_ASSERT(info.param.type == ODP_QUEUE_TYPE_PLAIN);
- CU_ASSERT(info.param.type == odp_queue_type(q_plain));
- ctx = info.param.context; /* 'char' context ptr */
- CU_ASSERT(ctx == q_plain_ctx);
- CU_ASSERT(info.param.context == odp_queue_context(q_plain));
-
- /* Check info for the scheduled ordered queue */
- CU_ASSERT(odp_queue_info(q_order, &info) == 0);
- CU_ASSERT(strcmp(nq_order, info.name) == 0);
- CU_ASSERT(info.param.type == ODP_QUEUE_TYPE_SCHED);
- CU_ASSERT(info.param.type == odp_queue_type(q_order));
- ctx = info.param.context; /* 'char' context ptr */
- CU_ASSERT(ctx == q_order_ctx);
- CU_ASSERT(info.param.context == odp_queue_context(q_order));
- CU_ASSERT(info.param.sched.prio == odp_queue_sched_prio(q_order));
- CU_ASSERT(info.param.sched.sync == odp_queue_sched_type(q_order));
- CU_ASSERT(info.param.sched.group == odp_queue_sched_group(q_order));
- ret = odp_queue_lock_count(q_order);
- CU_ASSERT(ret >= 0);
- lock_count = (unsigned)ret;
- CU_ASSERT(info.param.sched.lock_count == lock_count);
-
- CU_ASSERT(odp_queue_destroy(q_plain) == 0);
- CU_ASSERT(odp_queue_destroy(q_order) == 0);
-}
-
-odp_testinfo_t queue_suite[] = {
- ODP_TEST_INFO(queue_test_capa),
- ODP_TEST_INFO(queue_test_mode),
- ODP_TEST_INFO(queue_test_param),
- ODP_TEST_INFO(queue_test_info),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t queue_suites[] = {
- {"Queue", queue_suite_init, queue_suite_term, queue_suite},
- ODP_SUITE_INFO_NULL,
-};
-
-int queue_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- ret = odp_cunit_register(queue_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/queue/queue.h b/test/common_plat/validation/api/queue/queue.h
deleted file mode 100644
index 6b787b1d6..000000000
--- a/test/common_plat/validation/api/queue/queue.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_QUEUE_H_
-#define _ODP_TEST_QUEUE_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void queue_test_capa(void);
-void queue_test_mode(void);
-void queue_test_param(void);
-void queue_test_info(void);
-
-/* test arrays: */
-extern odp_testinfo_t queue_suite[];
-
-/* test array init/term functions: */
-int queue_suite_init(void);
-int queue_suite_term(void);
-
-/* test registry: */
-extern odp_suiteinfo_t queue_suites[];
-
-/* main test program: */
-int queue_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/queue/queue_main.c b/test/common_plat/validation/api/queue/queue_main.c
deleted file mode 100644
index b461b860a..000000000
--- a/test/common_plat/validation/api/queue/queue_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "queue.h"
-
-int main(int argc, char *argv[])
-{
- return queue_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/random/Makefile.am b/test/common_plat/validation/api/random/Makefile.am
deleted file mode 100644
index 69259a4db..000000000
--- a/test/common_plat/validation/api/random/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestrandom.la
-libtestrandom_la_SOURCES = random.c
-
-test_PROGRAMS = random_main$(EXEEXT)
-dist_random_main_SOURCES = random_main.c
-random_main_LDADD = libtestrandom.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = random.h
diff --git a/test/common_plat/validation/api/random/random.c b/test/common_plat/validation/api/random/random.c
deleted file mode 100644
index a0e2ef72f..000000000
--- a/test/common_plat/validation/api/random/random.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_api.h>
-#include <odp_cunit_common.h>
-#include "random.h"
-
-void random_test_get_size(void)
-{
- int32_t ret;
- uint8_t buf[32];
-
- ret = odp_random_data(buf, sizeof(buf), ODP_RANDOM_BASIC);
- CU_ASSERT(ret == sizeof(buf));
-}
-
-void random_test_kind(void)
-{
- int32_t rc;
- uint8_t buf[4096];
- uint32_t buf_size = sizeof(buf);
- odp_random_kind_t max_kind = odp_random_max_kind();
-
- rc = odp_random_data(buf, buf_size, max_kind);
- CU_ASSERT(rc > 0);
-
- switch (max_kind) {
- case ODP_RANDOM_BASIC:
- rc = odp_random_data(buf, 4, ODP_RANDOM_CRYPTO);
- CU_ASSERT(rc < 0);
- /* Fall through */
-
- case ODP_RANDOM_CRYPTO:
- rc = odp_random_data(buf, 4, ODP_RANDOM_TRUE);
- CU_ASSERT(rc < 0);
- break;
-
- default:
- break;
- }
-}
-
-void random_test_repeat(void)
-{
- uint8_t buf1[1024];
- uint8_t buf2[1024];
- int32_t rc;
- uint64_t seed1 = 12345897;
- uint64_t seed2 = seed1;
-
- rc = odp_random_test_data(buf1, sizeof(buf1), &seed1);
- CU_ASSERT(rc == sizeof(buf1));
-
- rc = odp_random_test_data(buf2, sizeof(buf2), &seed2);
- CU_ASSERT(rc == sizeof(buf2));
-
- CU_ASSERT(seed1 == seed2);
- CU_ASSERT(memcmp(buf1, buf2, sizeof(buf1)) == 0);
-}
-
-odp_testinfo_t random_suite[] = {
- ODP_TEST_INFO(random_test_get_size),
- ODP_TEST_INFO(random_test_kind),
- ODP_TEST_INFO(random_test_repeat),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t random_suites[] = {
- {"Random", NULL, NULL, random_suite},
- ODP_SUITE_INFO_NULL,
-};
-
-int random_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- ret = odp_cunit_register(random_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/random/random.h b/test/common_plat/validation/api/random/random.h
deleted file mode 100644
index c4bca7827..000000000
--- a/test/common_plat/validation/api/random/random.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_RANDOM_H_
-#define _ODP_TEST_RANDOM_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void random_test_get_size(void);
-void random_test_kind(void);
-void random_test_repeat(void);
-
-/* test arrays: */
-extern odp_testinfo_t random_suite[];
-
-/* test registry: */
-extern odp_suiteinfo_t random_suites[];
-
-/* main test program: */
-int random_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/random/random_main.c b/test/common_plat/validation/api/random/random_main.c
deleted file mode 100644
index 8f38a84c6..000000000
--- a/test/common_plat/validation/api/random/random_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "random.h"
-
-int main(int argc, char *argv[])
-{
- return random_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/scheduler/.gitignore b/test/common_plat/validation/api/scheduler/.gitignore
deleted file mode 100644
index b4eb30091..000000000
--- a/test/common_plat/validation/api/scheduler/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-scheduler_main
diff --git a/test/common_plat/validation/api/scheduler/Makefile.am b/test/common_plat/validation/api/scheduler/Makefile.am
deleted file mode 100644
index 2555cab81..000000000
--- a/test/common_plat/validation/api/scheduler/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestscheduler.la
-libtestscheduler_la_SOURCES = scheduler.c
-
-test_PROGRAMS = scheduler_main$(EXEEXT)
-dist_scheduler_main_SOURCES = scheduler_main.c
-scheduler_main_LDADD = libtestscheduler.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = scheduler.h
diff --git a/test/common_plat/validation/api/scheduler/scheduler.c b/test/common_plat/validation/api/scheduler/scheduler.c
deleted file mode 100644
index 952561cd3..000000000
--- a/test/common_plat/validation/api/scheduler/scheduler.c
+++ /dev/null
@@ -1,1669 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_api.h>
-#include "odp_cunit_common.h"
-#include "scheduler.h"
-
-#define MAX_WORKERS_THREADS 32
-#define MAX_ORDERED_LOCKS 2
-#define MSG_POOL_SIZE (64 * 1024)
-#define QUEUES_PER_PRIO 16
-#define BUF_SIZE 64
-#define BUFS_PER_QUEUE 100
-#define BUFS_PER_QUEUE_EXCL 10000
-#define BURST_BUF_SIZE 4
-#define NUM_BUFS_PAUSE 1000
-#define NUM_BUFS_BEFORE_PAUSE 10
-#define NUM_GROUPS 2
-
-#define GLOBALS_SHM_NAME "test_globals"
-#define MSG_POOL_NAME "msg_pool"
-#define QUEUE_CTX_POOL_NAME "queue_ctx_pool"
-#define SHM_THR_ARGS_NAME "shm_thr_args"
-
-#define ONE_Q 1
-#define MANY_QS QUEUES_PER_PRIO
-
-#define ONE_PRIO 1
-
-#define SCHD_ONE 0
-#define SCHD_MULTI 1
-
-#define DISABLE_EXCL_ATOMIC 0
-#define ENABLE_EXCL_ATOMIC 1
-
-#define MAGIC 0xdeadbeef
-#define MAGIC1 0xdeadbeef
-#define MAGIC2 0xcafef00d
-
-#define CHAOS_NUM_QUEUES 6
-#define CHAOS_NUM_BUFS_PER_QUEUE 6
-#define CHAOS_NUM_ROUNDS 1000
-#define CHAOS_NUM_EVENTS (CHAOS_NUM_QUEUES * CHAOS_NUM_BUFS_PER_QUEUE)
-#define CHAOS_DEBUG (CHAOS_NUM_ROUNDS < 1000)
-#define CHAOS_PTR_TO_NDX(p) ((uint64_t)(uint32_t)(uintptr_t)p)
-#define CHAOS_NDX_TO_PTR(n) ((void *)(uintptr_t)n)
-
-#define ODP_WAIT_TOLERANCE (60 * ODP_TIME_MSEC_IN_NS)
-
-/* Test global variables */
-typedef struct {
- int num_workers;
- odp_barrier_t barrier;
- int buf_count;
- int buf_count_cpy;
- odp_ticketlock_t lock;
- odp_spinlock_t atomic_lock;
- struct {
- odp_queue_t handle;
- char name[ODP_QUEUE_NAME_LEN];
- } chaos_q[CHAOS_NUM_QUEUES];
-} test_globals_t;
-
-typedef struct {
- pthrd_arg cu_thr;
- test_globals_t *globals;
- odp_schedule_sync_t sync;
- int num_queues;
- int num_prio;
- int num_bufs;
- int num_workers;
- int enable_schd_multi;
- int enable_excl_atomic;
-} thread_args_t;
-
-typedef struct {
- uint64_t sequence;
- uint64_t lock_sequence[MAX_ORDERED_LOCKS];
- uint64_t output_sequence;
-} buf_contents;
-
-typedef struct {
- odp_buffer_t ctx_handle;
- odp_queue_t pq_handle;
- uint64_t sequence;
- uint64_t lock_sequence[MAX_ORDERED_LOCKS];
-} queue_context;
-
-typedef struct {
- uint64_t evno;
- uint64_t seqno;
-} chaos_buf;
-
-odp_pool_t pool;
-odp_pool_t queue_ctx_pool;
-
-static int drain_queues(void)
-{
- odp_event_t ev;
- uint64_t wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
- int ret = 0;
-
- while ((ev = odp_schedule(NULL, wait)) != ODP_EVENT_INVALID) {
- odp_event_free(ev);
- ret++;
- }
-
- return ret;
-}
-
-static int exit_schedule_loop(void)
-{
- odp_event_t ev;
- int ret = 0;
-
- odp_schedule_pause();
-
- while ((ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT))
- != ODP_EVENT_INVALID) {
- odp_event_free(ev);
- ret++;
- }
-
- odp_schedule_resume();
-
- return ret;
-}
-
-void scheduler_test_wait_time(void)
-{
- int i;
- odp_queue_t queue;
- uint64_t wait_time;
- odp_queue_param_t qp;
- odp_time_t lower_limit, upper_limit;
- odp_time_t start_time, end_time, diff;
-
- /* check on read */
- wait_time = odp_schedule_wait_time(0);
- wait_time = odp_schedule_wait_time(1);
-
- /* check ODP_SCHED_NO_WAIT */
- odp_queue_param_init(&qp);
- qp.type = ODP_QUEUE_TYPE_SCHED;
- qp.sched.sync = ODP_SCHED_SYNC_PARALLEL;
- qp.sched.prio = ODP_SCHED_PRIO_NORMAL;
- qp.sched.group = ODP_SCHED_GROUP_ALL;
- queue = odp_queue_create("dummy_queue", &qp);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- wait_time = odp_schedule_wait_time(ODP_TIME_SEC_IN_NS);
- start_time = odp_time_local();
- odp_schedule(&queue, ODP_SCHED_NO_WAIT);
- end_time = odp_time_local();
-
- diff = odp_time_diff(end_time, start_time);
- lower_limit = ODP_TIME_NULL;
- upper_limit = odp_time_local_from_ns(ODP_WAIT_TOLERANCE);
-
- CU_ASSERT(odp_time_cmp(diff, lower_limit) >= 0);
- CU_ASSERT(odp_time_cmp(diff, upper_limit) <= 0);
-
- /* check time correctness */
- start_time = odp_time_local();
- for (i = 1; i < 6; i++) {
- odp_schedule(&queue, wait_time);
- printf("%d..", i);
- }
- end_time = odp_time_local();
-
- diff = odp_time_diff(end_time, start_time);
- lower_limit = odp_time_local_from_ns(5 * ODP_TIME_SEC_IN_NS -
- ODP_WAIT_TOLERANCE);
- upper_limit = odp_time_local_from_ns(5 * ODP_TIME_SEC_IN_NS +
- ODP_WAIT_TOLERANCE);
-
- CU_ASSERT(odp_time_cmp(diff, lower_limit) >= 0);
- CU_ASSERT(odp_time_cmp(diff, upper_limit) <= 0);
-
- CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
-}
-
-void scheduler_test_num_prio(void)
-{
- int prio;
-
- prio = odp_schedule_num_prio();
-
- CU_ASSERT(prio > 0);
- CU_ASSERT(prio == odp_schedule_num_prio());
-}
-
-void scheduler_test_queue_destroy(void)
-{
- odp_pool_t p;
- odp_pool_param_t params;
- odp_queue_param_t qp;
- odp_queue_t queue, from;
- odp_buffer_t buf;
- odp_event_t ev;
- uint32_t *u32;
- int i;
- odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
- ODP_SCHED_SYNC_ATOMIC,
- ODP_SCHED_SYNC_ORDERED};
-
- odp_queue_param_init(&qp);
- odp_pool_param_init(&params);
- params.buf.size = 100;
- params.buf.align = 0;
- params.buf.num = 1;
- params.type = ODP_POOL_BUFFER;
-
- p = odp_pool_create("sched_destroy_pool", &params);
-
- CU_ASSERT_FATAL(p != ODP_POOL_INVALID);
-
- for (i = 0; i < 3; i++) {
- qp.type = ODP_QUEUE_TYPE_SCHED;
- qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- qp.sched.sync = sync[i];
- qp.sched.group = ODP_SCHED_GROUP_ALL;
-
- queue = odp_queue_create("sched_destroy_queue", &qp);
-
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- buf = odp_buffer_alloc(p);
-
- CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
-
- u32 = odp_buffer_addr(buf);
- u32[0] = MAGIC;
-
- ev = odp_buffer_to_event(buf);
- if (!(CU_ASSERT(odp_queue_enq(queue, ev) == 0)))
- odp_buffer_free(buf);
-
- ev = odp_schedule(&from, ODP_SCHED_WAIT);
-
- CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
-
- CU_ASSERT_FATAL(from == queue);
-
- buf = odp_buffer_from_event(ev);
- u32 = odp_buffer_addr(buf);
-
- CU_ASSERT_FATAL(u32[0] == MAGIC);
-
- odp_buffer_free(buf);
- odp_schedule_release_ordered();
-
- CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
- }
-
- CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
-}
-
-void scheduler_test_groups(void)
-{
- odp_pool_t p;
- odp_pool_param_t params;
- odp_queue_t queue_grp1, queue_grp2;
- odp_buffer_t buf;
- odp_event_t ev;
- uint32_t *u32;
- int i, j, rc;
- odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
- ODP_SCHED_SYNC_ATOMIC,
- ODP_SCHED_SYNC_ORDERED};
- int thr_id = odp_thread_id();
- odp_thrmask_t zeromask, mymask, testmask;
- odp_schedule_group_t mygrp1, mygrp2, null_grp, lookup;
- odp_schedule_group_info_t info;
-
- odp_thrmask_zero(&zeromask);
- odp_thrmask_zero(&mymask);
- odp_thrmask_set(&mymask, thr_id);
-
- /* Can't find a group before we create it */
- lookup = odp_schedule_group_lookup("Test Group 1");
- CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);
-
- /* Now create the group */
- mygrp1 = odp_schedule_group_create("Test Group 1", &zeromask);
- CU_ASSERT_FATAL(mygrp1 != ODP_SCHED_GROUP_INVALID);
-
- /* Verify we can now find it */
- lookup = odp_schedule_group_lookup("Test Group 1");
- CU_ASSERT(lookup == mygrp1);
-
- /* Threadmask should be retrievable and be what we expect */
- rc = odp_schedule_group_thrmask(mygrp1, &testmask);
- CU_ASSERT(rc == 0);
- CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));
-
- /* Now join the group and verify we're part of it */
- rc = odp_schedule_group_join(mygrp1, &mymask);
- CU_ASSERT(rc == 0);
-
- rc = odp_schedule_group_thrmask(mygrp1, &testmask);
- CU_ASSERT(rc == 0);
- CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));
-
- /* Info struct */
- memset(&info, 0, sizeof(odp_schedule_group_info_t));
- rc = odp_schedule_group_info(mygrp1, &info);
- CU_ASSERT(rc == 0);
- CU_ASSERT(odp_thrmask_equal(&info.thrmask, &mymask) != 0);
- CU_ASSERT(strcmp(info.name, "Test Group 1") == 0);
-
- /* We can't join or leave an unknown group */
- rc = odp_schedule_group_join(ODP_SCHED_GROUP_INVALID, &mymask);
- CU_ASSERT(rc != 0);
-
- rc = odp_schedule_group_leave(ODP_SCHED_GROUP_INVALID, &mymask);
- CU_ASSERT(rc != 0);
-
- /* But we can leave our group */
- rc = odp_schedule_group_leave(mygrp1, &mymask);
- CU_ASSERT(rc == 0);
-
- rc = odp_schedule_group_thrmask(mygrp1, &testmask);
- CU_ASSERT(rc == 0);
- CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));
-
- /* Create group with no name */
- null_grp = odp_schedule_group_create(NULL, &zeromask);
- CU_ASSERT(null_grp != ODP_SCHED_GROUP_INVALID);
-
- /* We shouldn't be able to find our second group before creating it */
- lookup = odp_schedule_group_lookup("Test Group 2");
- CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);
-
- /* Now create it and verify we can find it */
- mygrp2 = odp_schedule_group_create("Test Group 2", &zeromask);
- CU_ASSERT_FATAL(mygrp2 != ODP_SCHED_GROUP_INVALID);
-
- lookup = odp_schedule_group_lookup("Test Group 2");
- CU_ASSERT(lookup == mygrp2);
-
- /* Destroy group with no name */
- CU_ASSERT_FATAL(odp_schedule_group_destroy(null_grp) == 0);
-
- /* Verify we're not part of it */
- rc = odp_schedule_group_thrmask(mygrp2, &testmask);
- CU_ASSERT(rc == 0);
- CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));
-
- /* Now join the group and verify we're part of it */
- rc = odp_schedule_group_join(mygrp2, &mymask);
- CU_ASSERT(rc == 0);
-
- rc = odp_schedule_group_thrmask(mygrp2, &testmask);
- CU_ASSERT(rc == 0);
- CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));
-
- /* Now verify scheduler adherence to groups */
- odp_pool_param_init(&params);
- params.buf.size = 100;
- params.buf.align = 0;
- params.buf.num = 2;
- params.type = ODP_POOL_BUFFER;
-
- p = odp_pool_create("sched_group_pool", &params);
-
- CU_ASSERT_FATAL(p != ODP_POOL_INVALID);
-
- for (i = 0; i < 3; i++) {
- odp_queue_param_t qp;
- odp_queue_t queue, from;
- odp_schedule_group_t mygrp[NUM_GROUPS];
- odp_queue_t queue_grp[NUM_GROUPS];
- int num = NUM_GROUPS;
-
- odp_queue_param_init(&qp);
- qp.type = ODP_QUEUE_TYPE_SCHED;
- qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- qp.sched.sync = sync[i];
- qp.sched.group = mygrp1;
-
- /* Create and populate a group in group 1 */
- queue_grp1 = odp_queue_create("sched_group_test_queue_1", &qp);
- CU_ASSERT_FATAL(queue_grp1 != ODP_QUEUE_INVALID);
- CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp1) == mygrp1);
-
- buf = odp_buffer_alloc(p);
-
- CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
-
- u32 = odp_buffer_addr(buf);
- u32[0] = MAGIC1;
-
- ev = odp_buffer_to_event(buf);
- rc = odp_queue_enq(queue_grp1, ev);
- CU_ASSERT(rc == 0);
- if (rc)
- odp_buffer_free(buf);
-
- /* Now create and populate a queue in group 2 */
- qp.sched.group = mygrp2;
- queue_grp2 = odp_queue_create("sched_group_test_queue_2", &qp);
- CU_ASSERT_FATAL(queue_grp2 != ODP_QUEUE_INVALID);
- CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp2) == mygrp2);
-
- buf = odp_buffer_alloc(p);
- CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
-
- u32 = odp_buffer_addr(buf);
- u32[0] = MAGIC2;
-
- ev = odp_buffer_to_event(buf);
- rc = odp_queue_enq(queue_grp2, ev);
- CU_ASSERT(rc == 0);
- if (rc)
- odp_buffer_free(buf);
-
- /* Swap between two groups. Application should serve both
- * groups to avoid potential head of line blocking in
- * scheduler. */
- mygrp[0] = mygrp1;
- mygrp[1] = mygrp2;
- queue_grp[0] = queue_grp1;
- queue_grp[1] = queue_grp2;
- j = 0;
-
- /* Ensure that each test run starts from mygrp1 */
- odp_schedule_group_leave(mygrp1, &mymask);
- odp_schedule_group_leave(mygrp2, &mymask);
- odp_schedule_group_join(mygrp1, &mymask);
-
- while (num) {
- queue = queue_grp[j];
- ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
-
- if (ev == ODP_EVENT_INVALID) {
- /* change group */
- rc = odp_schedule_group_leave(mygrp[j],
- &mymask);
- CU_ASSERT_FATAL(rc == 0);
-
- j = (j + 1) % NUM_GROUPS;
- rc = odp_schedule_group_join(mygrp[j],
- &mymask);
- CU_ASSERT_FATAL(rc == 0);
- continue;
- }
-
- CU_ASSERT_FATAL(from == queue);
-
- buf = odp_buffer_from_event(ev);
- u32 = odp_buffer_addr(buf);
-
- if (from == queue_grp1) {
- /* CU_ASSERT_FATAL needs these brackets */
- CU_ASSERT_FATAL(u32[0] == MAGIC1);
- } else {
- CU_ASSERT_FATAL(u32[0] == MAGIC2);
- }
-
- odp_buffer_free(buf);
-
- /* Tell scheduler we're about to request an event.
- * Not needed, but a convenient place to test this API.
- */
- odp_schedule_prefetch(1);
-
- num--;
- }
-
- /* Release schduler context and leave groups */
- odp_schedule_group_join(mygrp1, &mymask);
- odp_schedule_group_join(mygrp2, &mymask);
- CU_ASSERT(exit_schedule_loop() == 0);
- odp_schedule_group_leave(mygrp1, &mymask);
- odp_schedule_group_leave(mygrp2, &mymask);
-
- /* Done with queues for this round */
- CU_ASSERT_FATAL(odp_queue_destroy(queue_grp1) == 0);
- CU_ASSERT_FATAL(odp_queue_destroy(queue_grp2) == 0);
-
- /* Verify we can no longer find our queues */
- CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_1") ==
- ODP_QUEUE_INVALID);
- CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_2") ==
- ODP_QUEUE_INVALID);
- }
-
- CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp1) == 0);
- CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp2) == 0);
- CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
-}
-
-static int chaos_thread(void *arg)
-{
- uint64_t i, wait;
- int rc;
- chaos_buf *cbuf;
- odp_event_t ev;
- odp_queue_t from;
- thread_args_t *args = (thread_args_t *)arg;
- test_globals_t *globals = args->globals;
- int me = odp_thread_id();
- odp_time_t start_time, end_time, diff;
-
- if (CHAOS_DEBUG)
- printf("Chaos thread %d starting...\n", me);
-
- /* Wait for all threads to start */
- odp_barrier_wait(&globals->barrier);
- start_time = odp_time_local();
-
- /* Run the test */
- wait = odp_schedule_wait_time(5 * ODP_TIME_MSEC_IN_NS);
- for (i = 0; i < CHAOS_NUM_ROUNDS; i++) {
- ev = odp_schedule(&from, wait);
- if (ev == ODP_EVENT_INVALID)
- continue;
-
- cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
- CU_ASSERT_FATAL(cbuf != NULL);
- if (CHAOS_DEBUG)
- printf("Thread %d received event %" PRIu64
- " seq %" PRIu64
- " from Q %s, sending to Q %s\n",
- me, cbuf->evno, cbuf->seqno,
- globals->
- chaos_q
- [CHAOS_PTR_TO_NDX(odp_queue_context(from))].name,
- globals->
- chaos_q[cbuf->seqno % CHAOS_NUM_QUEUES].name);
-
- rc = odp_queue_enq(
- globals->
- chaos_q[cbuf->seqno++ % CHAOS_NUM_QUEUES].handle,
- ev);
- CU_ASSERT_FATAL(rc == 0);
- }
-
- if (CHAOS_DEBUG)
- printf("Thread %d completed %d rounds...terminating\n",
- odp_thread_id(), CHAOS_NUM_EVENTS);
-
- exit_schedule_loop();
-
- end_time = odp_time_local();
- diff = odp_time_diff(end_time, start_time);
-
- printf("Thread %d ends, elapsed time = %" PRIu64 "us\n",
- odp_thread_id(), odp_time_to_ns(diff) / 1000);
-
- return 0;
-}
-
-static void chaos_run(unsigned int qtype)
-{
- odp_pool_t pool;
- odp_pool_param_t params;
- odp_queue_param_t qp;
- odp_buffer_t buf;
- chaos_buf *cbuf;
- test_globals_t *globals;
- thread_args_t *args;
- odp_shm_t shm;
- int i, rc;
- odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
- ODP_SCHED_SYNC_ATOMIC,
- ODP_SCHED_SYNC_ORDERED};
- const unsigned num_sync = (sizeof(sync) / sizeof(odp_schedule_sync_t));
- const char *const qtypes[] = {"parallel", "atomic", "ordered"};
-
- /* Set up the scheduling environment */
- shm = odp_shm_lookup(GLOBALS_SHM_NAME);
- CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
- globals = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
-
- shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
- CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
- args = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL_FATAL(args);
-
- args->globals = globals;
- args->cu_thr.numthrds = globals->num_workers;
-
- odp_queue_param_init(&qp);
- odp_pool_param_init(&params);
- params.buf.size = sizeof(chaos_buf);
- params.buf.align = 0;
- params.buf.num = CHAOS_NUM_EVENTS;
- params.type = ODP_POOL_BUFFER;
-
- pool = odp_pool_create("sched_chaos_pool", &params);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
- qp.type = ODP_QUEUE_TYPE_SCHED;
- qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- qp.sched.group = ODP_SCHED_GROUP_ALL;
-
- for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
- uint32_t ndx = (qtype == num_sync ? i % num_sync : qtype);
-
- qp.sched.sync = sync[ndx];
- snprintf(globals->chaos_q[i].name,
- sizeof(globals->chaos_q[i].name),
- "chaos queue %d - %s", i,
- qtypes[ndx]);
-
- globals->chaos_q[i].handle =
- odp_queue_create(globals->chaos_q[i].name, &qp);
- CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
- ODP_QUEUE_INVALID);
- rc = odp_queue_context_set(globals->chaos_q[i].handle,
- CHAOS_NDX_TO_PTR(i), 0);
- CU_ASSERT_FATAL(rc == 0);
- }
-
- /* Now populate the queues with the initial seed elements */
- for (i = 0; i < CHAOS_NUM_EVENTS; i++) {
- buf = odp_buffer_alloc(pool);
- CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
- cbuf = odp_buffer_addr(buf);
- cbuf->evno = i;
- cbuf->seqno = 0;
- rc = odp_queue_enq(
- globals->chaos_q[i % CHAOS_NUM_QUEUES].handle,
- odp_buffer_to_event(buf));
- CU_ASSERT_FATAL(rc == 0);
- }
-
- /* Run the test */
- odp_cunit_thread_create(chaos_thread, &args->cu_thr);
- odp_cunit_thread_exit(&args->cu_thr);
-
- if (CHAOS_DEBUG)
- printf("Thread %d returning from chaos threads..cleaning up\n",
- odp_thread_id());
-
- drain_queues();
- exit_schedule_loop();
-
- for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
- if (CHAOS_DEBUG)
- printf("Destroying queue %s\n",
- globals->chaos_q[i].name);
- rc = odp_queue_destroy(globals->chaos_q[i].handle);
- CU_ASSERT(rc == 0);
- }
-
- rc = odp_pool_destroy(pool);
- CU_ASSERT(rc == 0);
-}
-
-void scheduler_test_parallel(void)
-{
- chaos_run(0);
-}
-
-void scheduler_test_atomic(void)
-{
- chaos_run(1);
-}
-
-void scheduler_test_ordered(void)
-{
- chaos_run(2);
-}
-
-void scheduler_test_chaos(void)
-{
- chaos_run(3);
-}
-
-static int schedule_common_(void *arg)
-{
- thread_args_t *args = (thread_args_t *)arg;
- odp_schedule_sync_t sync;
- test_globals_t *globals;
- queue_context *qctx;
- buf_contents *bctx, *bctx_cpy;
- odp_pool_t pool;
- int locked;
- int num;
- odp_event_t ev;
- odp_buffer_t buf, buf_cpy;
- odp_queue_t from;
-
- globals = args->globals;
- sync = args->sync;
-
- pool = odp_pool_lookup(MSG_POOL_NAME);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- if (args->num_workers > 1)
- odp_barrier_wait(&globals->barrier);
-
- while (1) {
- from = ODP_QUEUE_INVALID;
- num = 0;
-
- odp_ticketlock_lock(&globals->lock);
- if (globals->buf_count == 0) {
- odp_ticketlock_unlock(&globals->lock);
- break;
- }
- odp_ticketlock_unlock(&globals->lock);
-
- if (args->enable_schd_multi) {
- odp_event_t events[BURST_BUF_SIZE],
- ev_cpy[BURST_BUF_SIZE];
- odp_buffer_t buf_cpy[BURST_BUF_SIZE];
- int j;
-
- num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT,
- events, BURST_BUF_SIZE);
- CU_ASSERT(num >= 0);
- CU_ASSERT(num <= BURST_BUF_SIZE);
- if (num == 0)
- continue;
-
- if (sync == ODP_SCHED_SYNC_ORDERED) {
- int ndx;
- int ndx_max;
- int rc;
-
- ndx_max = odp_queue_lock_count(from);
- CU_ASSERT_FATAL(ndx_max >= 0);
-
- qctx = odp_queue_context(from);
-
- for (j = 0; j < num; j++) {
- bctx = odp_buffer_addr(
- odp_buffer_from_event
- (events[j]));
-
- buf_cpy[j] = odp_buffer_alloc(pool);
- CU_ASSERT_FATAL(buf_cpy[j] !=
- ODP_BUFFER_INVALID);
- bctx_cpy = odp_buffer_addr(buf_cpy[j]);
- memcpy(bctx_cpy, bctx,
- sizeof(buf_contents));
- bctx_cpy->output_sequence =
- bctx_cpy->sequence;
- ev_cpy[j] =
- odp_buffer_to_event(buf_cpy[j]);
- }
-
- rc = odp_queue_enq_multi(qctx->pq_handle,
- ev_cpy, num);
- CU_ASSERT(rc == num);
-
- bctx = odp_buffer_addr(
- odp_buffer_from_event(events[0]));
- for (ndx = 0; ndx < ndx_max; ndx++) {
- odp_schedule_order_lock(ndx);
- CU_ASSERT(bctx->sequence ==
- qctx->lock_sequence[ndx]);
- qctx->lock_sequence[ndx] += num;
- odp_schedule_order_unlock(ndx);
- }
- }
-
- for (j = 0; j < num; j++)
- odp_event_free(events[j]);
- } else {
- ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
- if (ev == ODP_EVENT_INVALID)
- continue;
-
- buf = odp_buffer_from_event(ev);
- num = 1;
- if (sync == ODP_SCHED_SYNC_ORDERED) {
- int ndx;
- int ndx_max;
- int rc;
-
- ndx_max = odp_queue_lock_count(from);
- CU_ASSERT_FATAL(ndx_max >= 0);
-
- qctx = odp_queue_context(from);
- bctx = odp_buffer_addr(buf);
- buf_cpy = odp_buffer_alloc(pool);
- CU_ASSERT_FATAL(buf_cpy != ODP_BUFFER_INVALID);
- bctx_cpy = odp_buffer_addr(buf_cpy);
- memcpy(bctx_cpy, bctx, sizeof(buf_contents));
- bctx_cpy->output_sequence = bctx_cpy->sequence;
-
- rc = odp_queue_enq(qctx->pq_handle,
- odp_buffer_to_event
- (buf_cpy));
- CU_ASSERT(rc == 0);
-
- for (ndx = 0; ndx < ndx_max; ndx++) {
- odp_schedule_order_lock(ndx);
- CU_ASSERT(bctx->sequence ==
- qctx->lock_sequence[ndx]);
- qctx->lock_sequence[ndx] += num;
- odp_schedule_order_unlock(ndx);
- }
- }
-
- odp_buffer_free(buf);
- }
-
- if (args->enable_excl_atomic) {
- locked = odp_spinlock_trylock(&globals->atomic_lock);
- CU_ASSERT(locked != 0);
- CU_ASSERT(from != ODP_QUEUE_INVALID);
- if (locked) {
- int cnt;
- odp_time_t time = ODP_TIME_NULL;
- /* Do some work here to keep the thread busy */
- for (cnt = 0; cnt < 1000; cnt++)
- time = odp_time_sum(time,
- odp_time_local());
-
- odp_spinlock_unlock(&globals->atomic_lock);
- }
- }
-
- if (sync == ODP_SCHED_SYNC_ATOMIC)
- odp_schedule_release_atomic();
-
- if (sync == ODP_SCHED_SYNC_ORDERED)
- odp_schedule_release_ordered();
-
- odp_ticketlock_lock(&globals->lock);
-
- globals->buf_count -= num;
-
- if (globals->buf_count < 0) {
- odp_ticketlock_unlock(&globals->lock);
- CU_FAIL_FATAL("Buffer counting failed");
- }
-
- odp_ticketlock_unlock(&globals->lock);
- }
-
- if (args->num_workers > 1)
- odp_barrier_wait(&globals->barrier);
-
- if (sync == ODP_SCHED_SYNC_ORDERED)
- locked = odp_ticketlock_trylock(&globals->lock);
- else
- locked = 0;
-
- if (locked && globals->buf_count_cpy > 0) {
- odp_event_t ev;
- odp_queue_t pq;
- uint64_t seq;
- uint64_t bcount = 0;
- int i, j;
- char name[32];
- uint64_t num_bufs = args->num_bufs;
- uint64_t buf_count = globals->buf_count_cpy;
-
- for (i = 0; i < args->num_prio; i++) {
- for (j = 0; j < args->num_queues; j++) {
- snprintf(name, sizeof(name),
- "plain_%d_%d_o", i, j);
- pq = odp_queue_lookup(name);
- CU_ASSERT_FATAL(pq != ODP_QUEUE_INVALID);
-
- seq = 0;
- while (1) {
- ev = odp_queue_deq(pq);
-
- if (ev == ODP_EVENT_INVALID) {
- CU_ASSERT(seq == num_bufs);
- break;
- }
-
- bctx = odp_buffer_addr(
- odp_buffer_from_event(ev));
-
- CU_ASSERT(bctx->sequence == seq);
- seq++;
- bcount++;
- odp_event_free(ev);
- }
- }
- }
- CU_ASSERT(bcount == buf_count);
- globals->buf_count_cpy = 0;
- }
-
- if (locked)
- odp_ticketlock_unlock(&globals->lock);
-
- /* Clear scheduler atomic / ordered context between tests */
- num = exit_schedule_loop();
-
- CU_ASSERT(num == 0);
-
- if (num)
- printf("\nDROPPED %i events\n\n", num);
-
- return 0;
-}
-
-static void fill_queues(thread_args_t *args)
-{
- odp_schedule_sync_t sync;
- int num_queues, num_prio;
- odp_pool_t pool;
- int i, j, k;
- int buf_count = 0;
- test_globals_t *globals;
- char name[32];
- int ret;
- odp_buffer_t buf;
- odp_event_t ev;
-
- globals = args->globals;
- sync = args->sync;
- num_queues = args->num_queues;
- num_prio = args->num_prio;
-
- pool = odp_pool_lookup(MSG_POOL_NAME);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- for (i = 0; i < num_prio; i++) {
- for (j = 0; j < num_queues; j++) {
- odp_queue_t queue;
-
- switch (sync) {
- case ODP_SCHED_SYNC_PARALLEL:
- snprintf(name, sizeof(name),
- "sched_%d_%d_n", i, j);
- break;
- case ODP_SCHED_SYNC_ATOMIC:
- snprintf(name, sizeof(name),
- "sched_%d_%d_a", i, j);
- break;
- case ODP_SCHED_SYNC_ORDERED:
- snprintf(name, sizeof(name),
- "sched_%d_%d_o", i, j);
- break;
- default:
- CU_ASSERT_FATAL(0);
- break;
- }
-
- queue = odp_queue_lookup(name);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- for (k = 0; k < args->num_bufs; k++) {
- buf = odp_buffer_alloc(pool);
- CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
- ev = odp_buffer_to_event(buf);
- if (sync == ODP_SCHED_SYNC_ORDERED) {
- queue_context *qctx =
- odp_queue_context(queue);
- buf_contents *bctx =
- odp_buffer_addr(buf);
- bctx->sequence = qctx->sequence++;
- }
-
- ret = odp_queue_enq(queue, ev);
- CU_ASSERT_FATAL(ret == 0);
-
- if (ret)
- odp_buffer_free(buf);
- else
- buf_count++;
- }
- }
- }
-
- globals->buf_count = buf_count;
- globals->buf_count_cpy = buf_count;
-}
-
-static void reset_queues(thread_args_t *args)
-{
- int i, j, k;
- int num_prio = args->num_prio;
- int num_queues = args->num_queues;
- char name[32];
-
- for (i = 0; i < num_prio; i++) {
- for (j = 0; j < num_queues; j++) {
- odp_queue_t queue;
-
- snprintf(name, sizeof(name),
- "sched_%d_%d_o", i, j);
- queue = odp_queue_lookup(name);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- for (k = 0; k < args->num_bufs; k++) {
- queue_context *qctx =
- odp_queue_context(queue);
- int ndx;
- int ndx_max;
-
- ndx_max = odp_queue_lock_count(queue);
- CU_ASSERT_FATAL(ndx_max >= 0);
- qctx->sequence = 0;
- for (ndx = 0; ndx < ndx_max; ndx++)
- qctx->lock_sequence[ndx] = 0;
- }
- }
- }
-}
-
-static void schedule_common(odp_schedule_sync_t sync, int num_queues,
- int num_prio, int enable_schd_multi)
-{
- thread_args_t args;
- odp_shm_t shm;
- test_globals_t *globals;
-
- shm = odp_shm_lookup(GLOBALS_SHM_NAME);
- CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
- globals = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
-
- memset(&args, 0, sizeof(thread_args_t));
- args.globals = globals;
- args.sync = sync;
- args.num_queues = num_queues;
- args.num_prio = num_prio;
- args.num_bufs = BUFS_PER_QUEUE;
- args.num_workers = 1;
- args.enable_schd_multi = enable_schd_multi;
- args.enable_excl_atomic = 0; /* Not needed with a single CPU */
-
- fill_queues(&args);
-
- schedule_common_(&args);
- if (sync == ODP_SCHED_SYNC_ORDERED)
- reset_queues(&args);
-}
-
-static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
- int num_prio, int enable_schd_multi,
- int enable_excl_atomic)
-{
- odp_shm_t shm;
- test_globals_t *globals;
- thread_args_t *args;
-
- shm = odp_shm_lookup(GLOBALS_SHM_NAME);
- CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
- globals = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
-
- shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
- CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
- args = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL_FATAL(args);
-
- args->globals = globals;
- args->sync = sync;
- args->num_queues = num_queues;
- args->num_prio = num_prio;
- if (enable_excl_atomic)
- args->num_bufs = BUFS_PER_QUEUE_EXCL;
- else
- args->num_bufs = BUFS_PER_QUEUE;
- args->num_workers = globals->num_workers;
- args->enable_schd_multi = enable_schd_multi;
- args->enable_excl_atomic = enable_excl_atomic;
-
- fill_queues(args);
-
- /* Create and launch worker threads */
- args->cu_thr.numthrds = globals->num_workers;
- odp_cunit_thread_create(schedule_common_, &args->cu_thr);
-
- /* Wait for worker threads to terminate */
- odp_cunit_thread_exit(&args->cu_thr);
-
- /* Cleanup ordered queues for next pass */
- if (sync == ODP_SCHED_SYNC_ORDERED)
- reset_queues(args);
-}
-
-/* 1 queue 1 thread ODP_SCHED_SYNC_PARALLEL */
-void scheduler_test_1q_1t_n(void)
-{
- schedule_common(ODP_SCHED_SYNC_PARALLEL, ONE_Q, ONE_PRIO, SCHD_ONE);
-}
-
-/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
-void scheduler_test_1q_1t_a(void)
-{
- schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE);
-}
-
-/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
-void scheduler_test_1q_1t_o(void)
-{
- schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_ONE);
-}
-
-/* Many queues 1 thread ODP_SCHED_SYNC_PARALLEL */
-void scheduler_test_mq_1t_n(void)
-{
- /* Only one priority involved in these tests, but use
- the same number of queues the more general case uses */
- schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, ONE_PRIO, SCHD_ONE);
-}
-
-/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
-void scheduler_test_mq_1t_a(void)
-{
- schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO, SCHD_ONE);
-}
-
-/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
-void scheduler_test_mq_1t_o(void)
-{
- schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO, SCHD_ONE);
-}
-
-/* Many queues 1 thread check priority ODP_SCHED_SYNC_PARALLEL */
-void scheduler_test_mq_1t_prio_n(void)
-{
- int prio = odp_schedule_num_prio();
-
- schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_ONE);
-}
-
-/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
-void scheduler_test_mq_1t_prio_a(void)
-{
- int prio = odp_schedule_num_prio();
-
- schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE);
-}
-
-/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
-void scheduler_test_mq_1t_prio_o(void)
-{
- int prio = odp_schedule_num_prio();
-
- schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE);
-}
-
-/* Many queues many threads check priority ODP_SCHED_SYNC_PARALLEL */
-void scheduler_test_mq_mt_prio_n(void)
-{
- int prio = odp_schedule_num_prio();
-
- parallel_execute(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_ONE,
- DISABLE_EXCL_ATOMIC);
-}
-
-/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
-void scheduler_test_mq_mt_prio_a(void)
-{
- int prio = odp_schedule_num_prio();
-
- parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE,
- DISABLE_EXCL_ATOMIC);
-}
-
-/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
-void scheduler_test_mq_mt_prio_o(void)
-{
- int prio = odp_schedule_num_prio();
-
- parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE,
- DISABLE_EXCL_ATOMIC);
-}
-
-/* 1 queue many threads check exclusive access on ATOMIC queues */
-void scheduler_test_1q_mt_a_excl(void)
-{
- parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE,
- ENABLE_EXCL_ATOMIC);
-}
-
-/* 1 queue 1 thread ODP_SCHED_SYNC_PARALLEL multi */
-void scheduler_test_multi_1q_1t_n(void)
-{
- schedule_common(ODP_SCHED_SYNC_PARALLEL, ONE_Q, ONE_PRIO, SCHD_MULTI);
-}
-
-/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
-void scheduler_test_multi_1q_1t_a(void)
-{
- schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_MULTI);
-}
-
-/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED multi */
-void scheduler_test_multi_1q_1t_o(void)
-{
- schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_MULTI);
-}
-
-/* Many queues 1 thread ODP_SCHED_SYNC_PARALLEL multi */
-void scheduler_test_multi_mq_1t_n(void)
-{
- /* Only one priority involved in these tests, but use
- the same number of queues the more general case uses */
- schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, ONE_PRIO, SCHD_MULTI);
-}
-
-/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
-void scheduler_test_multi_mq_1t_a(void)
-{
- schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO, SCHD_MULTI);
-}
-
-/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED multi */
-void scheduler_test_multi_mq_1t_o(void)
-{
- schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO, SCHD_MULTI);
-}
-
-/* Many queues 1 thread check priority ODP_SCHED_SYNC_PARALLEL multi */
-void scheduler_test_multi_mq_1t_prio_n(void)
-{
- int prio = odp_schedule_num_prio();
-
- schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_MULTI);
-}
-
-/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
-void scheduler_test_multi_mq_1t_prio_a(void)
-{
- int prio = odp_schedule_num_prio();
-
- schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI);
-}
-
-/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED multi */
-void scheduler_test_multi_mq_1t_prio_o(void)
-{
- int prio = odp_schedule_num_prio();
-
- schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_MULTI);
-}
-
-/* Many queues many threads check priority ODP_SCHED_SYNC_PARALLEL multi */
-void scheduler_test_multi_mq_mt_prio_n(void)
-{
- int prio = odp_schedule_num_prio();
-
- parallel_execute(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_MULTI, 0);
-}
-
-/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi */
-void scheduler_test_multi_mq_mt_prio_a(void)
-{
- int prio = odp_schedule_num_prio();
-
- parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI, 0);
-}
-
-/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED multi */
-void scheduler_test_multi_mq_mt_prio_o(void)
-{
- int prio = odp_schedule_num_prio();
-
- parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_MULTI, 0);
-}
-
-/* 1 queue many threads check exclusive access on ATOMIC queues multi */
-void scheduler_test_multi_1q_mt_a_excl(void)
-{
- parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_MULTI,
- ENABLE_EXCL_ATOMIC);
-}
-
-void scheduler_test_pause_resume(void)
-{
- odp_queue_t queue;
- odp_buffer_t buf;
- odp_event_t ev;
- odp_queue_t from;
- int i;
- int local_bufs = 0;
- int ret;
-
- queue = odp_queue_lookup("sched_0_0_n");
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = odp_pool_lookup(MSG_POOL_NAME);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- for (i = 0; i < NUM_BUFS_PAUSE; i++) {
- buf = odp_buffer_alloc(pool);
- CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
- ev = odp_buffer_to_event(buf);
- ret = odp_queue_enq(queue, ev);
- CU_ASSERT(ret == 0);
-
- if (ret)
- odp_buffer_free(buf);
- }
-
- for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
- from = ODP_QUEUE_INVALID;
- ev = odp_schedule(&from, ODP_SCHED_WAIT);
- CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
- CU_ASSERT(from == queue);
- buf = odp_buffer_from_event(ev);
- odp_buffer_free(buf);
- }
-
- odp_schedule_pause();
-
- while (1) {
- ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
- if (ev == ODP_EVENT_INVALID)
- break;
-
- CU_ASSERT(from == queue);
- buf = odp_buffer_from_event(ev);
- odp_buffer_free(buf);
- local_bufs++;
- }
-
- CU_ASSERT(local_bufs < NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE);
-
- odp_schedule_resume();
-
- for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i < NUM_BUFS_PAUSE; i++) {
- ev = odp_schedule(&from, ODP_SCHED_WAIT);
- CU_ASSERT(from == queue);
- buf = odp_buffer_from_event(ev);
- odp_buffer_free(buf);
- }
-
- ret = exit_schedule_loop();
-
- CU_ASSERT(ret == 0);
-}
-
-static int create_queues(void)
-{
- int i, j, prios, rc;
- odp_queue_capability_t capa;
- odp_pool_param_t params;
- odp_buffer_t queue_ctx_buf;
- queue_context *qctx, *pqctx;
- uint32_t ndx;
- odp_queue_param_t p;
-
- if (odp_queue_capability(&capa) < 0) {
- printf("Queue capability query failed\n");
- return -1;
- }
-
- /* Limit to test maximum */
- if (capa.max_ordered_locks > MAX_ORDERED_LOCKS) {
- capa.max_ordered_locks = MAX_ORDERED_LOCKS;
- printf("Testing only %u ordered locks\n",
- capa.max_ordered_locks);
- }
-
- prios = odp_schedule_num_prio();
- odp_pool_param_init(&params);
- params.buf.size = sizeof(queue_context);
- params.buf.num = prios * QUEUES_PER_PRIO * 2;
- params.type = ODP_POOL_BUFFER;
-
- queue_ctx_pool = odp_pool_create(QUEUE_CTX_POOL_NAME, &params);
-
- if (queue_ctx_pool == ODP_POOL_INVALID) {
- printf("Pool creation failed (queue ctx).\n");
- return -1;
- }
-
- for (i = 0; i < prios; i++) {
- odp_queue_param_init(&p);
- p.type = ODP_QUEUE_TYPE_SCHED;
- p.sched.prio = i;
-
- for (j = 0; j < QUEUES_PER_PRIO; j++) {
- /* Per sched sync type */
- char name[32];
- odp_queue_t q, pq;
-
- snprintf(name, sizeof(name), "sched_%d_%d_n", i, j);
- p.sched.sync = ODP_SCHED_SYNC_PARALLEL;
- q = odp_queue_create(name, &p);
-
- if (q == ODP_QUEUE_INVALID) {
- printf("Schedule queue create failed.\n");
- return -1;
- }
-
- snprintf(name, sizeof(name), "sched_%d_%d_a", i, j);
- p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
- q = odp_queue_create(name, &p);
-
- if (q == ODP_QUEUE_INVALID) {
- printf("Schedule queue create failed.\n");
- return -1;
- }
-
- snprintf(name, sizeof(name), "plain_%d_%d_o", i, j);
- pq = odp_queue_create(name, NULL);
- if (pq == ODP_QUEUE_INVALID) {
- printf("Plain queue create failed.\n");
- return -1;
- }
-
- queue_ctx_buf = odp_buffer_alloc(queue_ctx_pool);
-
- if (queue_ctx_buf == ODP_BUFFER_INVALID) {
- printf("Cannot allocate plain queue ctx buf\n");
- return -1;
- }
-
- pqctx = odp_buffer_addr(queue_ctx_buf);
- pqctx->ctx_handle = queue_ctx_buf;
- pqctx->sequence = 0;
-
- rc = odp_queue_context_set(pq, pqctx, 0);
-
- if (rc != 0) {
- printf("Cannot set plain queue context\n");
- return -1;
- }
-
- snprintf(name, sizeof(name), "sched_%d_%d_o", i, j);
- p.sched.sync = ODP_SCHED_SYNC_ORDERED;
- p.sched.lock_count = capa.max_ordered_locks;
- q = odp_queue_create(name, &p);
-
- if (q == ODP_QUEUE_INVALID) {
- printf("Schedule queue create failed.\n");
- return -1;
- }
- if (odp_queue_lock_count(q) !=
- (int)capa.max_ordered_locks) {
- printf("Queue %" PRIu64 " created with "
- "%d locks instead of expected %d\n",
- odp_queue_to_u64(q),
- odp_queue_lock_count(q),
- capa.max_ordered_locks);
- return -1;
- }
-
- queue_ctx_buf = odp_buffer_alloc(queue_ctx_pool);
-
- if (queue_ctx_buf == ODP_BUFFER_INVALID) {
- printf("Cannot allocate queue ctx buf\n");
- return -1;
- }
-
- qctx = odp_buffer_addr(queue_ctx_buf);
- qctx->ctx_handle = queue_ctx_buf;
- qctx->pq_handle = pq;
- qctx->sequence = 0;
-
- for (ndx = 0;
- ndx < capa.max_ordered_locks;
- ndx++) {
- qctx->lock_sequence[ndx] = 0;
- }
-
- rc = odp_queue_context_set(q, qctx, 0);
-
- if (rc != 0) {
- printf("Cannot set queue context\n");
- return -1;
- }
- }
- }
-
- return 0;
-}
-
-int scheduler_suite_init(void)
-{
- odp_cpumask_t mask;
- odp_shm_t shm;
- odp_pool_t pool;
- test_globals_t *globals;
- thread_args_t *args;
- odp_pool_param_t params;
-
- odp_pool_param_init(&params);
- params.buf.size = BUF_SIZE;
- params.buf.align = 0;
- params.buf.num = MSG_POOL_SIZE;
- params.type = ODP_POOL_BUFFER;
-
- pool = odp_pool_create(MSG_POOL_NAME, &params);
-
- if (pool == ODP_POOL_INVALID) {
- printf("Pool creation failed (msg).\n");
- return -1;
- }
-
- shm = odp_shm_reserve(GLOBALS_SHM_NAME,
- sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
-
- globals = odp_shm_addr(shm);
-
- if (!globals) {
- printf("Shared memory reserve failed (globals).\n");
- return -1;
- }
-
- memset(globals, 0, sizeof(test_globals_t));
-
- globals->num_workers = odp_cpumask_default_worker(&mask, 0);
- if (globals->num_workers > MAX_WORKERS)
- globals->num_workers = MAX_WORKERS;
-
- shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
- ODP_CACHE_LINE_SIZE, 0);
- args = odp_shm_addr(shm);
-
- if (!args) {
- printf("Shared memory reserve failed (args).\n");
- return -1;
- }
-
- memset(args, 0, sizeof(thread_args_t));
-
- /* Barrier to sync test case execution */
- odp_barrier_init(&globals->barrier, globals->num_workers);
- odp_ticketlock_init(&globals->lock);
- odp_spinlock_init(&globals->atomic_lock);
-
- if (create_queues() != 0)
- return -1;
-
- return 0;
-}
-
-static int destroy_queue(const char *name)
-{
- odp_queue_t q;
- queue_context *qctx;
-
- q = odp_queue_lookup(name);
-
- if (q == ODP_QUEUE_INVALID)
- return -1;
- qctx = odp_queue_context(q);
- if (qctx)
- odp_buffer_free(qctx->ctx_handle);
-
- return odp_queue_destroy(q);
-}
-
-static int destroy_queues(void)
-{
- int i, j, prios;
-
- prios = odp_schedule_num_prio();
-
- for (i = 0; i < prios; i++) {
- for (j = 0; j < QUEUES_PER_PRIO; j++) {
- char name[32];
-
- snprintf(name, sizeof(name), "sched_%d_%d_n", i, j);
- if (destroy_queue(name) != 0)
- return -1;
-
- snprintf(name, sizeof(name), "sched_%d_%d_a", i, j);
- if (destroy_queue(name) != 0)
- return -1;
-
- snprintf(name, sizeof(name), "sched_%d_%d_o", i, j);
- if (destroy_queue(name) != 0)
- return -1;
-
- snprintf(name, sizeof(name), "plain_%d_%d_o", i, j);
- if (destroy_queue(name) != 0)
- return -1;
- }
- }
-
- if (odp_pool_destroy(queue_ctx_pool) != 0) {
- fprintf(stderr, "error: failed to destroy queue ctx pool\n");
- return -1;
- }
-
- return 0;
-}
-
-int scheduler_suite_term(void)
-{
- odp_pool_t pool;
- odp_shm_t shm;
-
- if (destroy_queues() != 0) {
- fprintf(stderr, "error: failed to destroy queues\n");
- return -1;
- }
-
- pool = odp_pool_lookup(MSG_POOL_NAME);
- if (odp_pool_destroy(pool) != 0)
- fprintf(stderr, "error: failed to destroy pool\n");
-
- shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
- if (odp_shm_free(shm) != 0)
- fprintf(stderr, "error: failed to free shm\n");
-
- shm = odp_shm_lookup(GLOBALS_SHM_NAME);
- if (odp_shm_free(shm) != 0)
- fprintf(stderr, "error: failed to free shm\n");
-
- return 0;
-}
-
-odp_testinfo_t scheduler_suite[] = {
- ODP_TEST_INFO(scheduler_test_wait_time),
- ODP_TEST_INFO(scheduler_test_num_prio),
- ODP_TEST_INFO(scheduler_test_queue_destroy),
- ODP_TEST_INFO(scheduler_test_groups),
- ODP_TEST_INFO(scheduler_test_pause_resume),
- ODP_TEST_INFO(scheduler_test_parallel),
- ODP_TEST_INFO(scheduler_test_atomic),
- ODP_TEST_INFO(scheduler_test_ordered),
- ODP_TEST_INFO(scheduler_test_chaos),
- ODP_TEST_INFO(scheduler_test_1q_1t_n),
- ODP_TEST_INFO(scheduler_test_1q_1t_a),
- ODP_TEST_INFO(scheduler_test_1q_1t_o),
- ODP_TEST_INFO(scheduler_test_mq_1t_n),
- ODP_TEST_INFO(scheduler_test_mq_1t_a),
- ODP_TEST_INFO(scheduler_test_mq_1t_o),
- ODP_TEST_INFO(scheduler_test_mq_1t_prio_n),
- ODP_TEST_INFO(scheduler_test_mq_1t_prio_a),
- ODP_TEST_INFO(scheduler_test_mq_1t_prio_o),
- ODP_TEST_INFO(scheduler_test_mq_mt_prio_n),
- ODP_TEST_INFO(scheduler_test_mq_mt_prio_a),
- ODP_TEST_INFO(scheduler_test_mq_mt_prio_o),
- ODP_TEST_INFO(scheduler_test_1q_mt_a_excl),
- ODP_TEST_INFO(scheduler_test_multi_1q_1t_n),
- ODP_TEST_INFO(scheduler_test_multi_1q_1t_a),
- ODP_TEST_INFO(scheduler_test_multi_1q_1t_o),
- ODP_TEST_INFO(scheduler_test_multi_mq_1t_n),
- ODP_TEST_INFO(scheduler_test_multi_mq_1t_a),
- ODP_TEST_INFO(scheduler_test_multi_mq_1t_o),
- ODP_TEST_INFO(scheduler_test_multi_mq_1t_prio_n),
- ODP_TEST_INFO(scheduler_test_multi_mq_1t_prio_a),
- ODP_TEST_INFO(scheduler_test_multi_mq_1t_prio_o),
- ODP_TEST_INFO(scheduler_test_multi_mq_mt_prio_n),
- ODP_TEST_INFO(scheduler_test_multi_mq_mt_prio_a),
- ODP_TEST_INFO(scheduler_test_multi_mq_mt_prio_o),
- ODP_TEST_INFO(scheduler_test_multi_1q_mt_a_excl),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t scheduler_suites[] = {
- {"Scheduler",
- scheduler_suite_init, scheduler_suite_term, scheduler_suite
- },
- ODP_SUITE_INFO_NULL,
-};
-
-int scheduler_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- ret = odp_cunit_register(scheduler_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/scheduler/scheduler.h b/test/common_plat/validation/api/scheduler/scheduler.h
deleted file mode 100644
index a619d89b2..000000000
--- a/test/common_plat/validation/api/scheduler/scheduler.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_SCHEDULER_H_
-#define _ODP_TEST_SCHEDULER_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void scheduler_test_wait_time(void);
-void scheduler_test_num_prio(void);
-void scheduler_test_queue_destroy(void);
-void scheduler_test_groups(void);
-void scheduler_test_chaos(void);
-void scheduler_test_parallel(void);
-void scheduler_test_atomic(void);
-void scheduler_test_ordered(void);
-void scheduler_test_1q_1t_n(void);
-void scheduler_test_1q_1t_a(void);
-void scheduler_test_1q_1t_o(void);
-void scheduler_test_mq_1t_n(void);
-void scheduler_test_mq_1t_a(void);
-void scheduler_test_mq_1t_o(void);
-void scheduler_test_mq_1t_prio_n(void);
-void scheduler_test_mq_1t_prio_a(void);
-void scheduler_test_mq_1t_prio_o(void);
-void scheduler_test_mq_mt_prio_n(void);
-void scheduler_test_mq_mt_prio_a(void);
-void scheduler_test_mq_mt_prio_o(void);
-void scheduler_test_1q_mt_a_excl(void);
-void scheduler_test_multi_1q_1t_n(void);
-void scheduler_test_multi_1q_1t_a(void);
-void scheduler_test_multi_1q_1t_o(void);
-void scheduler_test_multi_mq_1t_n(void);
-void scheduler_test_multi_mq_1t_a(void);
-void scheduler_test_multi_mq_1t_o(void);
-void scheduler_test_multi_mq_1t_prio_n(void);
-void scheduler_test_multi_mq_1t_prio_a(void);
-void scheduler_test_multi_mq_1t_prio_o(void);
-void scheduler_test_multi_mq_mt_prio_n(void);
-void scheduler_test_multi_mq_mt_prio_a(void);
-void scheduler_test_multi_mq_mt_prio_o(void);
-void scheduler_test_multi_1q_mt_a_excl(void);
-void scheduler_test_pause_resume(void);
-
-/* test arrays: */
-extern odp_testinfo_t scheduler_suite[];
-
-/* test array init/term functions: */
-int scheduler_suite_init(void);
-int scheduler_suite_term(void);
-
-/* test registry: */
-extern odp_suiteinfo_t scheduler_suites[];
-
-/* main test program: */
-int scheduler_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/scheduler/scheduler_main.c b/test/common_plat/validation/api/scheduler/scheduler_main.c
deleted file mode 100644
index 57cfa5fc5..000000000
--- a/test/common_plat/validation/api/scheduler/scheduler_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "scheduler.h"
-
-int main(int argc, char *argv[])
-{
- return scheduler_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/shmem/Makefile.am b/test/common_plat/validation/api/shmem/Makefile.am
deleted file mode 100644
index da88af662..000000000
--- a/test/common_plat/validation/api/shmem/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestshmem.la
-libtestshmem_la_SOURCES = shmem.c
-
-test_PROGRAMS = shmem_main$(EXEEXT)
-dist_shmem_main_SOURCES = shmem_main.c
-shmem_main_LDADD = libtestshmem.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = shmem.h
diff --git a/test/common_plat/validation/api/shmem/shmem.h b/test/common_plat/validation/api/shmem/shmem.h
deleted file mode 100644
index 092aa8005..000000000
--- a/test/common_plat/validation/api/shmem/shmem.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_SHMEM_H_
-#define _ODP_TEST_SHMEM_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void shmem_test_basic(void);
-void shmem_test_reserve_after_fork(void);
-void shmem_test_singleva_after_fork(void);
-void shmem_test_stress(void);
-
-/* test arrays: */
-extern odp_testinfo_t shmem_suite[];
-
-/* test registry: */
-extern odp_suiteinfo_t shmem_suites[];
-
-/* main test program: */
-int shmem_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/shmem/shmem_main.c b/test/common_plat/validation/api/shmem/shmem_main.c
deleted file mode 100644
index 4c6913051..000000000
--- a/test/common_plat/validation/api/shmem/shmem_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "shmem.h"
-
-int main(int argc, char *argv[])
-{
- return shmem_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/std_clib/.gitignore b/test/common_plat/validation/api/std_clib/.gitignore
deleted file mode 100644
index 37828330a..000000000
--- a/test/common_plat/validation/api/std_clib/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-std_clib_main
diff --git a/test/common_plat/validation/api/std_clib/Makefile.am b/test/common_plat/validation/api/std_clib/Makefile.am
deleted file mode 100644
index e2fc0ccf3..000000000
--- a/test/common_plat/validation/api/std_clib/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libteststd_clib.la
-libteststd_clib_la_SOURCES = std_clib.c
-
-test_PROGRAMS = std_clib_main$(EXEEXT)
-dist_std_clib_main_SOURCES = std_clib_main.c
-std_clib_main_LDADD = libteststd_clib.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = std_clib.h
diff --git a/test/common_plat/validation/api/std_clib/std_clib.h b/test/common_plat/validation/api/std_clib/std_clib.h
deleted file mode 100644
index 2804f27e2..000000000
--- a/test/common_plat/validation/api/std_clib/std_clib.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_STD_CLIB_H_
-#define _ODP_TEST_STD_CLIB_H_
-
-#include <odp_cunit_common.h>
-
-/* test arrays: */
-extern odp_testinfo_t std_clib_suite[];
-
-/* test registry: */
-extern odp_suiteinfo_t std_clib_suites[];
-
-/* main test program: */
-int std_clib_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/std_clib/std_clib_main.c b/test/common_plat/validation/api/std_clib/std_clib_main.c
deleted file mode 100644
index ef6f2736f..000000000
--- a/test/common_plat/validation/api/std_clib/std_clib_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "std_clib.h"
-
-int main(int argc, char *argv[])
-{
- return std_clib_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/system/Makefile.am b/test/common_plat/validation/api/system/Makefile.am
deleted file mode 100644
index 3789c36c2..000000000
--- a/test/common_plat/validation/api/system/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestsystem.la
-libtestsystem_la_SOURCES = system.c
-
-test_PROGRAMS = system_main$(EXEEXT)
-dist_system_main_SOURCES = system_main.c
-system_main_LDADD = libtestsystem.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = system.h
diff --git a/test/common_plat/validation/api/system/system.c b/test/common_plat/validation/api/system/system.c
deleted file mode 100644
index 57ff34eb9..000000000
--- a/test/common_plat/validation/api/system/system.c
+++ /dev/null
@@ -1,344 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <ctype.h>
-#include <odp_api.h>
-#include <odp/api/cpumask.h>
-#include "odp_cunit_common.h"
-#include "test_debug.h"
-#include "system.h"
-
-#define DIFF_TRY_NUM 160
-#define RES_TRY_NUM 10
-
-void system_test_odp_version_numbers(void)
-{
- int char_ok = 0;
- char version_string[128];
- char *s = version_string;
-
- strncpy(version_string, odp_version_api_str(),
- sizeof(version_string) - 1);
-
- while (*s) {
- if (isdigit((int)*s) || (strncmp(s, ".", 1) == 0)) {
- char_ok = 1;
- s++;
- } else {
- char_ok = 0;
- LOG_DBG("\nBAD VERSION=%s\n", version_string);
- break;
- }
- }
- CU_ASSERT(char_ok);
-}
-
-void system_test_odp_cpu_count(void)
-{
- int cpus;
-
- cpus = odp_cpu_count();
- CU_ASSERT(0 < cpus);
-}
-
-void system_test_odp_cpu_cycles(void)
-{
- uint64_t c2, c1;
-
- c1 = odp_cpu_cycles();
- odp_time_wait_ns(100);
- c2 = odp_cpu_cycles();
-
- CU_ASSERT(c2 != c1);
-}
-
-void system_test_odp_cpu_cycles_max(void)
-{
- uint64_t c2, c1;
- uint64_t max1, max2;
-
- max1 = odp_cpu_cycles_max();
- odp_time_wait_ns(100);
- max2 = odp_cpu_cycles_max();
-
- CU_ASSERT(max1 >= UINT32_MAX / 2);
- CU_ASSERT(max1 == max2);
-
- c1 = odp_cpu_cycles();
- odp_time_wait_ns(1000);
- c2 = odp_cpu_cycles();
-
- CU_ASSERT(c1 <= max1 && c2 <= max1);
-}
-
-void system_test_odp_cpu_cycles_resolution(void)
-{
- int i;
- uint64_t res;
- uint64_t c2, c1, max;
-
- max = odp_cpu_cycles_max();
-
- res = odp_cpu_cycles_resolution();
- CU_ASSERT(res != 0);
- CU_ASSERT(res < max / 1024);
-
- for (i = 0; i < RES_TRY_NUM; i++) {
- c1 = odp_cpu_cycles();
- odp_time_wait_ns(100 * ODP_TIME_MSEC_IN_NS + i);
- c2 = odp_cpu_cycles();
-
- CU_ASSERT(c1 % res == 0);
- CU_ASSERT(c2 % res == 0);
- }
-}
-
-void system_test_odp_cpu_cycles_diff(void)
-{
- int i;
- uint64_t c2, c1, c3, max;
- uint64_t tmp, diff, res;
-
- res = odp_cpu_cycles_resolution();
- max = odp_cpu_cycles_max();
-
- /* check resolution for wrap */
- c1 = max - 2 * res;
- do
- c2 = odp_cpu_cycles();
- while (c1 < c2);
-
- diff = odp_cpu_cycles_diff(c1, c1);
- CU_ASSERT(diff == 0);
-
- /* wrap */
- tmp = c2 + (max - c1) + res;
- diff = odp_cpu_cycles_diff(c2, c1);
- CU_ASSERT(diff == tmp);
- CU_ASSERT(diff % res == 0);
-
- /* no wrap, revert args */
- tmp = c1 - c2;
- diff = odp_cpu_cycles_diff(c1, c2);
- CU_ASSERT(diff == tmp);
- CU_ASSERT(diff % res == 0);
-
- c3 = odp_cpu_cycles();
- for (i = 0; i < DIFF_TRY_NUM; i++) {
- c1 = odp_cpu_cycles();
- odp_time_wait_ns(100 * ODP_TIME_MSEC_IN_NS + i);
- c2 = odp_cpu_cycles();
-
- CU_ASSERT(c2 != c1);
- CU_ASSERT(c1 % res == 0);
- CU_ASSERT(c2 % res == 0);
- CU_ASSERT(c1 <= max && c2 <= max);
-
- if (c2 > c1)
- tmp = c2 - c1;
- else
- tmp = c2 + (max - c1) + res;
-
- diff = odp_cpu_cycles_diff(c2, c1);
- CU_ASSERT(diff == tmp);
- CU_ASSERT(diff % res == 0);
-
- /* wrap is detected and verified */
- if (c2 < c1)
- break;
- }
-
- /* wrap was detected, no need to continue */
- if (i < DIFF_TRY_NUM)
- return;
-
- /* wrap has to be detected if possible */
- CU_ASSERT(max > UINT32_MAX);
- CU_ASSERT((max - c3) > UINT32_MAX);
-
- printf("wrap was not detected...");
-}
-
-void system_test_odp_sys_cache_line_size(void)
-{
- uint64_t cache_size;
-
- cache_size = odp_sys_cache_line_size();
- CU_ASSERT(0 < cache_size);
- CU_ASSERT(ODP_CACHE_LINE_SIZE == cache_size);
-}
-
-void system_test_odp_cpu_model_str(void)
-{
- char model[128];
-
- snprintf(model, 128, "%s", odp_cpu_model_str());
- CU_ASSERT(strlen(model) > 0);
- CU_ASSERT(strlen(model) < 127);
-}
-
-void system_test_odp_cpu_model_str_id(void)
-{
- char model[128];
- odp_cpumask_t mask;
- int i, num, cpu;
-
- num = odp_cpumask_all_available(&mask);
- cpu = odp_cpumask_first(&mask);
-
- for (i = 0; i < num; i++) {
- snprintf(model, 128, "%s", odp_cpu_model_str_id(cpu));
- CU_ASSERT(strlen(model) > 0);
- CU_ASSERT(strlen(model) < 127);
- cpu = odp_cpumask_next(&mask, cpu);
- }
-}
-
-void system_test_odp_sys_page_size(void)
-{
- uint64_t page;
-
- page = odp_sys_page_size();
- CU_ASSERT(0 < page);
- CU_ASSERT(ODP_PAGE_SIZE == page);
-}
-
-void system_test_odp_sys_huge_page_size(void)
-{
- uint64_t page;
-
- page = odp_sys_huge_page_size();
- CU_ASSERT(0 < page);
-}
-
-int system_check_odp_cpu_hz(void)
-{
- if (odp_cpu_hz() == 0) {
- fprintf(stderr, "odp_cpu_hz is not supported, skipping\n");
- return ODP_TEST_INACTIVE;
- }
-
- return ODP_TEST_ACTIVE;
-}
-
-void system_test_odp_cpu_hz(void)
-{
- uint64_t hz = odp_cpu_hz();
-
- /* Test value sanity: less than 10GHz */
- CU_ASSERT(hz < 10 * GIGA_HZ);
-
- /* larger than 1kHz */
- CU_ASSERT(hz > 1 * KILO_HZ);
-}
-
-int system_check_odp_cpu_hz_id(void)
-{
- uint64_t hz;
- odp_cpumask_t mask;
- int i, num, cpu;
-
- num = odp_cpumask_all_available(&mask);
- cpu = odp_cpumask_first(&mask);
-
- for (i = 0; i < num; i++) {
- hz = odp_cpu_hz_id(cpu);
- if (hz == 0) {
- fprintf(stderr, "cpu %d does not support"
- " odp_cpu_hz_id(),"
- "skip that test\n", cpu);
- return ODP_TEST_INACTIVE;
- }
- cpu = odp_cpumask_next(&mask, cpu);
- }
-
- return ODP_TEST_ACTIVE;
-}
-
-void system_test_odp_cpu_hz_id(void)
-{
- uint64_t hz;
- odp_cpumask_t mask;
- int i, num, cpu;
-
- num = odp_cpumask_all_available(&mask);
- cpu = odp_cpumask_first(&mask);
-
- for (i = 0; i < num; i++) {
- hz = odp_cpu_hz_id(cpu);
- /* Test value sanity: less than 10GHz */
- CU_ASSERT(hz < 10 * GIGA_HZ);
- /* larger than 1kHz */
- CU_ASSERT(hz > 1 * KILO_HZ);
- cpu = odp_cpumask_next(&mask, cpu);
- }
-}
-
-void system_test_odp_cpu_hz_max(void)
-{
- uint64_t hz;
-
- hz = odp_cpu_hz_max();
- CU_ASSERT(0 < hz);
-}
-
-void system_test_odp_cpu_hz_max_id(void)
-{
- uint64_t hz;
- odp_cpumask_t mask;
- int i, num, cpu;
-
- num = odp_cpumask_all_available(&mask);
- cpu = odp_cpumask_first(&mask);
-
- for (i = 0; i < num; i++) {
- hz = odp_cpu_hz_max_id(cpu);
- CU_ASSERT(0 < hz);
- cpu = odp_cpumask_next(&mask, cpu);
- }
-}
-
-odp_testinfo_t system_suite[] = {
- ODP_TEST_INFO(system_test_odp_version_numbers),
- ODP_TEST_INFO(system_test_odp_cpu_count),
- ODP_TEST_INFO(system_test_odp_sys_cache_line_size),
- ODP_TEST_INFO(system_test_odp_cpu_model_str),
- ODP_TEST_INFO(system_test_odp_cpu_model_str_id),
- ODP_TEST_INFO(system_test_odp_sys_page_size),
- ODP_TEST_INFO(system_test_odp_sys_huge_page_size),
- ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz,
- system_check_odp_cpu_hz),
- ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz_id,
- system_check_odp_cpu_hz_id),
- ODP_TEST_INFO(system_test_odp_cpu_hz_max),
- ODP_TEST_INFO(system_test_odp_cpu_hz_max_id),
- ODP_TEST_INFO(system_test_odp_cpu_cycles),
- ODP_TEST_INFO(system_test_odp_cpu_cycles_max),
- ODP_TEST_INFO(system_test_odp_cpu_cycles_resolution),
- ODP_TEST_INFO(system_test_odp_cpu_cycles_diff),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t system_suites[] = {
- {"System Info", NULL, NULL, system_suite},
- ODP_SUITE_INFO_NULL,
-};
-
-int system_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- ret = odp_cunit_register(system_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/system/system.h b/test/common_plat/validation/api/system/system.h
deleted file mode 100644
index cbb994eb0..000000000
--- a/test/common_plat/validation/api/system/system.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_SYSTEM_H_
-#define _ODP_TEST_SYSTEM_H_
-
-#include <odp_cunit_common.h>
-
-#define GIGA_HZ 1000000000ULL
-#define KILO_HZ 1000ULL
-
-/* test functions: */
-void system_test_odp_version_numbers(void);
-void system_test_odp_cpu_count(void);
-void system_test_odp_sys_cache_line_size(void);
-void system_test_odp_cpu_model_str(void);
-void system_test_odp_cpu_model_str_id(void);
-void system_test_odp_sys_page_size(void);
-void system_test_odp_sys_huge_page_size(void);
-int system_check_odp_cpu_hz(void);
-void system_test_odp_cpu_hz(void);
-int system_check_odp_cpu_hz_id(void);
-void system_test_odp_cpu_hz_id(void);
-void system_test_odp_cpu_hz_max(void);
-void system_test_odp_cpu_hz_max_id(void);
-void system_test_odp_cpu_cycles_max(void);
-void system_test_odp_cpu_cycles(void);
-void system_test_odp_cpu_cycles_diff(void);
-void system_test_odp_cpu_cycles_resolution(void);
-
-/* test arrays: */
-extern odp_testinfo_t system_suite[];
-
-/* test registry: */
-extern odp_suiteinfo_t system_suites[];
-
-/* main test program: */
-int system_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/system/system_main.c b/test/common_plat/validation/api/system/system_main.c
deleted file mode 100644
index 50d202a84..000000000
--- a/test/common_plat/validation/api/system/system_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "system.h"
-
-int main(int argc, char *argv[])
-{
- return system_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/thread/Makefile.am b/test/common_plat/validation/api/thread/Makefile.am
deleted file mode 100644
index eaf680cf5..000000000
--- a/test/common_plat/validation/api/thread/Makefile.am
+++ /dev/null
@@ -1,12 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestthread.la
-libtestthread_la_SOURCES = thread.c
-libtestthread_la_CFLAGS = $(AM_CFLAGS) -DTEST_THRMASK
-libtestthread_la_LIBADD = $(LIBTHRMASK_COMMON)
-
-test_PROGRAMS = thread_main$(EXEEXT)
-dist_thread_main_SOURCES = thread_main.c
-thread_main_LDADD = libtestthread.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = thread.h
diff --git a/test/common_plat/validation/api/thread/thread.c b/test/common_plat/validation/api/thread/thread.c
deleted file mode 100644
index 24f1c4580..000000000
--- a/test/common_plat/validation/api/thread/thread.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_api.h>
-#include <odp_cunit_common.h>
-#include <mask_common.h>
-#include <test_debug.h>
-#include "thread.h"
-
-/* Test thread entry and exit synchronization barriers */
-odp_barrier_t bar_entry;
-odp_barrier_t bar_exit;
-
-void thread_test_odp_cpu_id(void)
-{
- (void)odp_cpu_id();
- CU_PASS();
-}
-
-void thread_test_odp_thread_id(void)
-{
- (void)odp_thread_id();
- CU_PASS();
-}
-
-void thread_test_odp_thread_count(void)
-{
- (void)odp_thread_count();
- CU_PASS();
-}
-
-static int thread_func(void *arg TEST_UNUSED)
-{
- /* indicate that thread has started */
- odp_barrier_wait(&bar_entry);
-
- CU_ASSERT(odp_thread_type() == ODP_THREAD_WORKER);
-
- /* wait for indication that we can exit */
- odp_barrier_wait(&bar_exit);
-
- return CU_get_number_of_failures();
-}
-
-void thread_test_odp_thrmask_worker(void)
-{
- odp_thrmask_t mask;
- int ret;
- pthrd_arg args = { .testcase = 0, .numthrds = 1 };
-
- CU_ASSERT_FATAL(odp_thread_type() == ODP_THREAD_CONTROL);
-
- odp_barrier_init(&bar_entry, args.numthrds + 1);
- odp_barrier_init(&bar_exit, args.numthrds + 1);
-
- /* should start out with 0 worker threads */
- ret = odp_thrmask_worker(&mask);
- CU_ASSERT(ret == odp_thrmask_count(&mask));
- CU_ASSERT(ret == 0);
-
- /* start the test thread(s) */
- ret = odp_cunit_thread_create(thread_func, &args);
- CU_ASSERT(ret == args.numthrds);
-
- if (ret != args.numthrds)
- return;
-
- /* wait for thread(s) to start */
- odp_barrier_wait(&bar_entry);
-
- ret = odp_thrmask_worker(&mask);
- CU_ASSERT(ret == odp_thrmask_count(&mask));
- CU_ASSERT(ret == args.numthrds);
- CU_ASSERT(ret <= odp_thread_count_max());
-
- /* allow thread(s) to exit */
- odp_barrier_wait(&bar_exit);
-
- odp_cunit_thread_exit(&args);
-}
-
-void thread_test_odp_thrmask_control(void)
-{
- odp_thrmask_t mask;
- int ret;
-
- CU_ASSERT(odp_thread_type() == ODP_THREAD_CONTROL);
-
- /* should start out with 1 worker thread */
- ret = odp_thrmask_control(&mask);
- CU_ASSERT(ret == odp_thrmask_count(&mask));
- CU_ASSERT(ret == 1);
-}
-
-odp_testinfo_t thread_suite[] = {
- ODP_TEST_INFO(thread_test_odp_cpu_id),
- ODP_TEST_INFO(thread_test_odp_thread_id),
- ODP_TEST_INFO(thread_test_odp_thread_count),
- ODP_TEST_INFO(thread_test_odp_thrmask_to_from_str),
- ODP_TEST_INFO(thread_test_odp_thrmask_equal),
- ODP_TEST_INFO(thread_test_odp_thrmask_zero),
- ODP_TEST_INFO(thread_test_odp_thrmask_set),
- ODP_TEST_INFO(thread_test_odp_thrmask_clr),
- ODP_TEST_INFO(thread_test_odp_thrmask_isset),
- ODP_TEST_INFO(thread_test_odp_thrmask_count),
- ODP_TEST_INFO(thread_test_odp_thrmask_and),
- ODP_TEST_INFO(thread_test_odp_thrmask_or),
- ODP_TEST_INFO(thread_test_odp_thrmask_xor),
- ODP_TEST_INFO(thread_test_odp_thrmask_copy),
- ODP_TEST_INFO(thread_test_odp_thrmask_first),
- ODP_TEST_INFO(thread_test_odp_thrmask_last),
- ODP_TEST_INFO(thread_test_odp_thrmask_next),
- ODP_TEST_INFO(thread_test_odp_thrmask_worker),
- ODP_TEST_INFO(thread_test_odp_thrmask_control),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t thread_suites[] = {
- {"thread", NULL, NULL, thread_suite},
- ODP_SUITE_INFO_NULL,
-};
-
-int thread_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- ret = odp_cunit_register(thread_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/thread/thread.h b/test/common_plat/validation/api/thread/thread.h
deleted file mode 100644
index d511c9259..000000000
--- a/test/common_plat/validation/api/thread/thread.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_THREAD_H_
-#define _ODP_TEST_THREAD_H_
-
-#include <odp_api.h>
-#include <odp_cunit_common.h>
-
-/* test functions: */
-#ifndef TEST_THRMASK
-#define TEST_THRMASK
-#endif
-#include "mask_common.h"
-void thread_test_odp_cpu_id(void);
-void thread_test_odp_thread_id(void);
-void thread_test_odp_thread_count(void);
-void thread_test_odp_thrmask_control(void);
-void thread_test_odp_thrmask_worker(void);
-
-/* test arrays: */
-extern odp_testinfo_t thread_suite[];
-
-/* test registry: */
-extern odp_suiteinfo_t thread_suites[];
-
-/* main test program: */
-int thread_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/thread/thread_main.c b/test/common_plat/validation/api/thread/thread_main.c
deleted file mode 100644
index 53c756551..000000000
--- a/test/common_plat/validation/api/thread/thread_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "thread.h"
-
-int main(int argc, char *argv[])
-{
- return thread_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/time/Makefile.am b/test/common_plat/validation/api/time/Makefile.am
deleted file mode 100644
index bf2d0268c..000000000
--- a/test/common_plat/validation/api/time/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtesttime.la
-libtesttime_la_SOURCES = time.c
-
-test_PROGRAMS = time_main$(EXEEXT)
-dist_time_main_SOURCES = time_main.c
-time_main_LDADD = libtesttime.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = time.h
diff --git a/test/common_plat/validation/api/time/time.c b/test/common_plat/validation/api/time/time.c
deleted file mode 100644
index 530d5c07a..000000000
--- a/test/common_plat/validation/api/time/time.c
+++ /dev/null
@@ -1,476 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_api.h>
-#include "odp_cunit_common.h"
-#include "time.h"
-
-#define BUSY_LOOP_CNT 30000000 /* used for t > min resolution */
-#define BUSY_LOOP_CNT_LONG 6000000000 /* used for t > 4 sec */
-#define MIN_TIME_RATE 32000
-#define MAX_TIME_RATE 15000000000
-#define DELAY_TOLERANCE 20000000 /* deviation for delay */
-#define WAIT_SECONDS 3
-
-static uint64_t local_res;
-static uint64_t global_res;
-
-typedef odp_time_t time_cb(void);
-typedef uint64_t time_res_cb(void);
-typedef odp_time_t time_from_ns_cb(uint64_t ns);
-
-void time_test_constants(void)
-{
- uint64_t ns;
-
- ns = ODP_TIME_SEC_IN_NS / 1000;
- CU_ASSERT(ns == ODP_TIME_MSEC_IN_NS);
- ns /= 1000;
- CU_ASSERT(ns == ODP_TIME_USEC_IN_NS);
-}
-
-static void time_test_res(time_res_cb time_res, uint64_t *res)
-{
- uint64_t rate;
-
- rate = time_res();
- CU_ASSERT(rate > MIN_TIME_RATE);
- CU_ASSERT(rate < MAX_TIME_RATE);
-
- *res = ODP_TIME_SEC_IN_NS / rate;
- if (ODP_TIME_SEC_IN_NS % rate)
- (*res)++;
-}
-
-void time_test_local_res(void)
-{
- time_test_res(odp_time_local_res, &local_res);
-}
-
-void time_test_global_res(void)
-{
- time_test_res(odp_time_global_res, &global_res);
-}
-
-/* check that related conversions come back to the same value */
-static void time_test_conversion(time_from_ns_cb time_from_ns, uint64_t res)
-{
- uint64_t ns1, ns2;
- odp_time_t time;
- uint64_t upper_limit, lower_limit;
-
- ns1 = 100;
- time = time_from_ns(ns1);
-
- ns2 = odp_time_to_ns(time);
-
- /* need to check within arithmetic tolerance that the same
- * value in ns is returned after conversions */
- upper_limit = ns1 + res;
- lower_limit = ns1 - res;
- CU_ASSERT((ns2 <= upper_limit) && (ns2 >= lower_limit));
-
- ns1 = 60 * 11 * ODP_TIME_SEC_IN_NS;
- time = time_from_ns(ns1);
-
- ns2 = odp_time_to_ns(time);
-
- /* need to check within arithmetic tolerance that the same
- * value in ns is returned after conversions */
- upper_limit = ns1 + res;
- lower_limit = ns1 - res;
- CU_ASSERT((ns2 <= upper_limit) && (ns2 >= lower_limit));
-
- /* test on 0 */
- ns1 = odp_time_to_ns(ODP_TIME_NULL);
- CU_ASSERT(ns1 == 0);
-}
-
-void time_test_local_conversion(void)
-{
- time_test_conversion(odp_time_local_from_ns, local_res);
-}
-
-void time_test_global_conversion(void)
-{
- time_test_conversion(odp_time_global_from_ns, global_res);
-}
-
-void time_test_monotony(void)
-{
- volatile uint64_t count = 0;
- odp_time_t l_t1, l_t2, l_t3;
- odp_time_t g_t1, g_t2, g_t3;
- uint64_t ns1, ns2, ns3;
-
- l_t1 = odp_time_local();
- g_t1 = odp_time_global();
-
- while (count < BUSY_LOOP_CNT) {
- count++;
- };
-
- l_t2 = odp_time_local();
- g_t2 = odp_time_global();
-
- while (count < BUSY_LOOP_CNT_LONG) {
- count++;
- };
-
- l_t3 = odp_time_local();
- g_t3 = odp_time_global();
-
- ns1 = odp_time_to_ns(l_t1);
- ns2 = odp_time_to_ns(l_t2);
- ns3 = odp_time_to_ns(l_t3);
-
- /* Local time assertions */
- CU_ASSERT(ns2 > ns1);
- CU_ASSERT(ns3 > ns2);
-
- ns1 = odp_time_to_ns(g_t1);
- ns2 = odp_time_to_ns(g_t2);
- ns3 = odp_time_to_ns(g_t3);
-
- /* Global time assertions */
- CU_ASSERT(ns2 > ns1);
- CU_ASSERT(ns3 > ns2);
-}
-
-static void time_test_cmp(time_cb time, time_from_ns_cb time_from_ns)
-{
- /* volatile to stop optimization of busy loop */
- volatile int count = 0;
- odp_time_t t1, t2, t3;
-
- t1 = time();
-
- while (count < BUSY_LOOP_CNT) {
- count++;
- };
-
- t2 = time();
-
- while (count < BUSY_LOOP_CNT * 2) {
- count++;
- };
-
- t3 = time();
-
- CU_ASSERT(odp_time_cmp(t2, t1) > 0);
- CU_ASSERT(odp_time_cmp(t3, t2) > 0);
- CU_ASSERT(odp_time_cmp(t3, t1) > 0);
- CU_ASSERT(odp_time_cmp(t1, t2) < 0);
- CU_ASSERT(odp_time_cmp(t2, t3) < 0);
- CU_ASSERT(odp_time_cmp(t1, t3) < 0);
- CU_ASSERT(odp_time_cmp(t1, t1) == 0);
- CU_ASSERT(odp_time_cmp(t2, t2) == 0);
- CU_ASSERT(odp_time_cmp(t3, t3) == 0);
-
- t2 = time_from_ns(60 * 10 * ODP_TIME_SEC_IN_NS);
- t1 = time_from_ns(3);
-
- CU_ASSERT(odp_time_cmp(t2, t1) > 0);
- CU_ASSERT(odp_time_cmp(t1, t2) < 0);
-
- t1 = time_from_ns(0);
- CU_ASSERT(odp_time_cmp(t1, ODP_TIME_NULL) == 0);
-}
-
-void time_test_local_cmp(void)
-{
- time_test_cmp(odp_time_local, odp_time_local_from_ns);
-}
-
-void time_test_global_cmp(void)
-{
- time_test_cmp(odp_time_global, odp_time_global_from_ns);
-}
-
-/* check that a time difference gives a reasonable result */
-static void time_test_diff(time_cb time,
- time_from_ns_cb time_from_ns,
- uint64_t res)
-{
- /* volatile to stop optimization of busy loop */
- volatile int count = 0;
- odp_time_t diff, t1, t2;
- uint64_t nsdiff, ns1, ns2, ns;
- uint64_t upper_limit, lower_limit;
-
- /* test timestamp diff */
- t1 = time();
-
- while (count < BUSY_LOOP_CNT) {
- count++;
- };
-
- t2 = time();
- CU_ASSERT(odp_time_cmp(t2, t1) > 0);
-
- diff = odp_time_diff(t2, t1);
- CU_ASSERT(odp_time_cmp(diff, ODP_TIME_NULL) > 0);
-
- ns1 = odp_time_to_ns(t1);
- ns2 = odp_time_to_ns(t2);
- ns = ns2 - ns1;
- nsdiff = odp_time_to_ns(diff);
-
- upper_limit = ns + 2 * res;
- lower_limit = ns - 2 * res;
- CU_ASSERT((nsdiff <= upper_limit) && (nsdiff >= lower_limit));
-
- /* test timestamp and interval diff */
- ns1 = 54;
- t1 = time_from_ns(ns1);
- ns = ns2 - ns1;
-
- diff = odp_time_diff(t2, t1);
- CU_ASSERT(odp_time_cmp(diff, ODP_TIME_NULL) > 0);
- nsdiff = odp_time_to_ns(diff);
-
- upper_limit = ns + 2 * res;
- lower_limit = ns - 2 * res;
- CU_ASSERT((nsdiff <= upper_limit) && (nsdiff >= lower_limit));
-
- /* test interval diff */
- ns2 = 60 * 10 * ODP_TIME_SEC_IN_NS;
- ns = ns2 - ns1;
-
- t2 = time_from_ns(ns2);
- diff = odp_time_diff(t2, t1);
- CU_ASSERT(odp_time_cmp(diff, ODP_TIME_NULL) > 0);
- nsdiff = odp_time_to_ns(diff);
-
- upper_limit = ns + 2 * res;
- lower_limit = ns - 2 * res;
- CU_ASSERT((nsdiff <= upper_limit) && (nsdiff >= lower_limit));
-
- /* same time has to diff to 0 */
- diff = odp_time_diff(t2, t2);
- CU_ASSERT(odp_time_cmp(diff, ODP_TIME_NULL) == 0);
-
- diff = odp_time_diff(t2, ODP_TIME_NULL);
- CU_ASSERT(odp_time_cmp(t2, diff) == 0);
-}
-
-void time_test_local_diff(void)
-{
- time_test_diff(odp_time_local, odp_time_local_from_ns, local_res);
-}
-
-void time_test_global_diff(void)
-{
- time_test_diff(odp_time_global, odp_time_global_from_ns, global_res);
-}
-
-/* check that a time sum gives a reasonable result */
-static void time_test_sum(time_cb time,
- time_from_ns_cb time_from_ns,
- uint64_t res)
-{
- odp_time_t sum, t1, t2;
- uint64_t nssum, ns1, ns2, ns;
- uint64_t upper_limit, lower_limit;
-
- /* sum timestamp and interval */
- t1 = time();
- ns2 = 103;
- t2 = time_from_ns(ns2);
- ns1 = odp_time_to_ns(t1);
- ns = ns1 + ns2;
-
- sum = odp_time_sum(t2, t1);
- CU_ASSERT(odp_time_cmp(sum, ODP_TIME_NULL) > 0);
- nssum = odp_time_to_ns(sum);
-
- upper_limit = ns + 2 * res;
- lower_limit = ns - 2 * res;
- CU_ASSERT((nssum <= upper_limit) && (nssum >= lower_limit));
-
- /* sum intervals */
- ns1 = 60 * 13 * ODP_TIME_SEC_IN_NS;
- t1 = time_from_ns(ns1);
- ns = ns1 + ns2;
-
- sum = odp_time_sum(t2, t1);
- CU_ASSERT(odp_time_cmp(sum, ODP_TIME_NULL) > 0);
- nssum = odp_time_to_ns(sum);
-
- upper_limit = ns + 2 * res;
- lower_limit = ns - 2 * res;
- CU_ASSERT((nssum <= upper_limit) && (nssum >= lower_limit));
-
- /* test on 0 */
- sum = odp_time_sum(t2, ODP_TIME_NULL);
- CU_ASSERT(odp_time_cmp(t2, sum) == 0);
-}
-
-void time_test_local_sum(void)
-{
- time_test_sum(odp_time_local, odp_time_local_from_ns, local_res);
-}
-
-void time_test_global_sum(void)
-{
- time_test_sum(odp_time_global, odp_time_global_from_ns, global_res);
-}
-
-static void time_test_wait_until(time_cb time, time_from_ns_cb time_from_ns)
-{
- int i;
- odp_time_t lower_limit, upper_limit;
- odp_time_t start_time, end_time, wait;
- odp_time_t second = time_from_ns(ODP_TIME_SEC_IN_NS);
-
- start_time = time();
- wait = start_time;
- for (i = 0; i < WAIT_SECONDS; i++) {
- wait = odp_time_sum(wait, second);
- odp_time_wait_until(wait);
- }
- end_time = time();
-
- wait = odp_time_diff(end_time, start_time);
- lower_limit = time_from_ns(WAIT_SECONDS * ODP_TIME_SEC_IN_NS -
- DELAY_TOLERANCE);
- upper_limit = time_from_ns(WAIT_SECONDS * ODP_TIME_SEC_IN_NS +
- DELAY_TOLERANCE);
-
- if (odp_time_cmp(wait, lower_limit) < 0) {
- fprintf(stderr, "Exceed lower limit: "
- "wait is %" PRIu64 ", lower_limit %" PRIu64 "\n",
- odp_time_to_ns(wait), odp_time_to_ns(lower_limit));
- CU_FAIL("Exceed lower limit\n");
- }
-
- if (odp_time_cmp(wait, upper_limit) > 0) {
- fprintf(stderr, "Exceed upper limit: "
- "wait is %" PRIu64 ", upper_limit %" PRIu64 "\n",
- odp_time_to_ns(wait), odp_time_to_ns(lower_limit));
- CU_FAIL("Exceed upper limit\n");
- }
-}
-
-void time_test_local_wait_until(void)
-{
- time_test_wait_until(odp_time_local, odp_time_local_from_ns);
-}
-
-void time_test_global_wait_until(void)
-{
- time_test_wait_until(odp_time_global, odp_time_global_from_ns);
-}
-
-void time_test_wait_ns(void)
-{
- int i;
- odp_time_t lower_limit, upper_limit;
- odp_time_t start_time, end_time, diff;
-
- start_time = odp_time_local();
- for (i = 0; i < WAIT_SECONDS; i++)
- odp_time_wait_ns(ODP_TIME_SEC_IN_NS);
- end_time = odp_time_local();
-
- diff = odp_time_diff(end_time, start_time);
-
- lower_limit = odp_time_local_from_ns(WAIT_SECONDS * ODP_TIME_SEC_IN_NS -
- DELAY_TOLERANCE);
- upper_limit = odp_time_local_from_ns(WAIT_SECONDS * ODP_TIME_SEC_IN_NS +
- DELAY_TOLERANCE);
-
- if (odp_time_cmp(diff, lower_limit) < 0) {
- fprintf(stderr, "Exceed lower limit: "
- "diff is %" PRIu64 ", lower_limit %" PRIu64 "\n",
- odp_time_to_ns(diff), odp_time_to_ns(lower_limit));
- CU_FAIL("Exceed lower limit\n");
- }
-
- if (odp_time_cmp(diff, upper_limit) > 0) {
- fprintf(stderr, "Exceed upper limit: "
- "diff is %" PRIu64 ", upper_limit %" PRIu64 "\n",
- odp_time_to_ns(diff), odp_time_to_ns(lower_limit));
- CU_FAIL("Exceed upper limit\n");
- }
-}
-
-static void time_test_to_u64(time_cb time)
-{
- volatile int count = 0;
- uint64_t val1, val2;
- odp_time_t t1, t2;
-
- t1 = time();
-
- val1 = odp_time_to_u64(t1);
- CU_ASSERT(val1 > 0);
-
- while (count < BUSY_LOOP_CNT) {
- count++;
- };
-
- t2 = time();
- val2 = odp_time_to_u64(t2);
- CU_ASSERT(val2 > 0);
-
- CU_ASSERT(val2 > val1);
-
- val1 = odp_time_to_u64(ODP_TIME_NULL);
- CU_ASSERT(val1 == 0);
-}
-
-void time_test_local_to_u64(void)
-{
- time_test_to_u64(odp_time_local);
-}
-
-void time_test_global_to_u64(void)
-{
- time_test_to_u64(odp_time_global);
-}
-
-odp_testinfo_t time_suite_time[] = {
- ODP_TEST_INFO(time_test_constants),
- ODP_TEST_INFO(time_test_local_res),
- ODP_TEST_INFO(time_test_local_conversion),
- ODP_TEST_INFO(time_test_monotony),
- ODP_TEST_INFO(time_test_local_cmp),
- ODP_TEST_INFO(time_test_local_diff),
- ODP_TEST_INFO(time_test_local_sum),
- ODP_TEST_INFO(time_test_local_wait_until),
- ODP_TEST_INFO(time_test_wait_ns),
- ODP_TEST_INFO(time_test_local_to_u64),
- ODP_TEST_INFO(time_test_global_res),
- ODP_TEST_INFO(time_test_global_conversion),
- ODP_TEST_INFO(time_test_global_cmp),
- ODP_TEST_INFO(time_test_global_diff),
- ODP_TEST_INFO(time_test_global_sum),
- ODP_TEST_INFO(time_test_global_wait_until),
- ODP_TEST_INFO(time_test_global_to_u64),
- ODP_TEST_INFO_NULL
-};
-
-odp_suiteinfo_t time_suites[] = {
- {"Time", NULL, NULL, time_suite_time},
- ODP_SUITE_INFO_NULL
-};
-
-int time_main(int argc, char *argv[])
-{
- int ret;
-
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- ret = odp_cunit_register(time_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/time/time.h b/test/common_plat/validation/api/time/time.h
deleted file mode 100644
index e5132a494..000000000
--- a/test/common_plat/validation/api/time/time.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_TIME_H_
-#define _ODP_TEST_TIME_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void time_test_constants(void);
-void time_test_local_res(void);
-void time_test_global_res(void);
-void time_test_local_conversion(void);
-void time_test_global_conversion(void);
-void time_test_local_cmp(void);
-void time_test_global_cmp(void);
-void time_test_local_diff(void);
-void time_test_global_diff(void);
-void time_test_local_sum(void);
-void time_test_global_sum(void);
-void time_test_local_wait_until(void);
-void time_test_global_wait_until(void);
-void time_test_wait_ns(void);
-void time_test_local_to_u64(void);
-void time_test_global_to_u64(void);
-void time_test_monotony(void);
-
-/* test arrays: */
-extern odp_testinfo_t time_suite_time[];
-
-/* test registry: */
-extern odp_suiteinfo_t time_suites[];
-
-/* main test program: */
-int time_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/time/time_main.c b/test/common_plat/validation/api/time/time_main.c
deleted file mode 100644
index f86d638a5..000000000
--- a/test/common_plat/validation/api/time/time_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "time.h"
-
-int main(int argc, char *argv[])
-{
- return time_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/timer/Makefile.am b/test/common_plat/validation/api/timer/Makefile.am
deleted file mode 100644
index fe6872f41..000000000
--- a/test/common_plat/validation/api/timer/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtesttimer.la
-libtesttimer_la_SOURCES = timer.c
-
-test_PROGRAMS = timer_main$(EXEEXT)
-dist_timer_main_SOURCES = timer_main.c
-timer_main_LDADD = libtesttimer.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = timer.h
diff --git a/test/common_plat/validation/api/timer/timer.c b/test/common_plat/validation/api/timer/timer.c
deleted file mode 100644
index b7d84c649..000000000
--- a/test/common_plat/validation/api/timer/timer.c
+++ /dev/null
@@ -1,605 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- */
-
-/* For rand_r and nanosleep */
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#endif
-
-#include <time.h>
-#include <odp.h>
-#include <odp/helper/odph_api.h>
-#include "odp_cunit_common.h"
-#include "test_debug.h"
-#include "timer.h"
-
-/** @private Timeout range in milliseconds (ms) */
-#define RANGE_MS 2000
-
-/** @private Number of timers per thread */
-#define NTIMERS 2000
-
-/** @private Barrier for thread synchronisation */
-static odp_barrier_t test_barrier;
-
-/** @private Timeout pool handle used by all threads */
-static odp_pool_t tbp;
-
-/** @private Timer pool handle used by all threads */
-static odp_timer_pool_t tp;
-
-/** @private Count of timeouts delivered too late */
-static odp_atomic_u32_t ndelivtoolate;
-
-/** @private Sum of all allocated timers from all threads. Thread-local
- * caches may make this number lower than the capacity of the pool */
-static odp_atomic_u32_t timers_allocated;
-
-/* @private Timer helper structure */
-struct test_timer {
- odp_timer_t tim; /* Timer handle */
- odp_event_t ev; /* Timeout event */
- odp_event_t ev2; /* Copy of event handle */
- uint64_t tick; /* Expiration tick or TICK_INVALID */
-};
-
-#define TICK_INVALID (~(uint64_t)0)
-
-void timer_test_timeout_pool_alloc(void)
-{
- odp_pool_t pool;
- const int num = 3;
- odp_timeout_t tmo[num];
- odp_event_t ev;
- int index;
- char wrong_type = 0;
- odp_pool_param_t params;
-
- odp_pool_param_init(&params);
- params.type = ODP_POOL_TIMEOUT;
- params.tmo.num = num;
-
- pool = odp_pool_create("timeout_pool_alloc", &params);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- odp_pool_print(pool);
-
- /* Try to allocate num items from the pool */
- for (index = 0; index < num; index++) {
- tmo[index] = odp_timeout_alloc(pool);
-
- if (tmo[index] == ODP_TIMEOUT_INVALID)
- break;
-
- ev = odp_timeout_to_event(tmo[index]);
- if (odp_event_type(ev) != ODP_EVENT_TIMEOUT)
- wrong_type = 1;
- }
-
- /* Check that the pool had at least num items */
- CU_ASSERT(index == num);
- /* index points out of buffer[] or it point to an invalid buffer */
- index--;
-
- /* Check that the pool had correct buffers */
- CU_ASSERT(wrong_type == 0);
-
- for (; index >= 0; index--)
- odp_timeout_free(tmo[index]);
-
- CU_ASSERT(odp_pool_destroy(pool) == 0);
-}
-
-void timer_test_timeout_pool_free(void)
-{
- odp_pool_t pool;
- odp_timeout_t tmo;
- odp_pool_param_t params;
-
- odp_pool_param_init(&params);
- params.type = ODP_POOL_TIMEOUT;
- params.tmo.num = 1;
-
- pool = odp_pool_create("timeout_pool_free", &params);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
- odp_pool_print(pool);
-
- /* Allocate the only timeout from the pool */
- tmo = odp_timeout_alloc(pool);
- CU_ASSERT_FATAL(tmo != ODP_TIMEOUT_INVALID);
-
- /* Pool should have only one timeout */
- CU_ASSERT_FATAL(odp_timeout_alloc(pool) == ODP_TIMEOUT_INVALID)
-
- odp_timeout_free(tmo);
-
- /* Check that the timeout was returned back to the pool */
- tmo = odp_timeout_alloc(pool);
- CU_ASSERT_FATAL(tmo != ODP_TIMEOUT_INVALID);
-
- odp_timeout_free(tmo);
- CU_ASSERT(odp_pool_destroy(pool) == 0);
-}
-
-void timer_test_odp_timer_cancel(void)
-{
- odp_pool_t pool;
- odp_pool_param_t params;
- odp_timer_pool_param_t tparam;
- odp_timer_pool_t tp;
- odp_queue_t queue;
- odp_timer_t tim;
- odp_event_t ev;
- odp_timeout_t tmo;
- odp_timer_set_t rc;
- uint64_t tick;
-
- odp_pool_param_init(&params);
- params.type = ODP_POOL_TIMEOUT;
- params.tmo.num = 1;
-
- pool = odp_pool_create("tmo_pool_for_cancel", &params);
-
- if (pool == ODP_POOL_INVALID)
- CU_FAIL_FATAL("Timeout pool create failed");
-
- tparam.res_ns = 100 * ODP_TIME_MSEC_IN_NS;
- tparam.min_tmo = 1 * ODP_TIME_SEC_IN_NS;
- tparam.max_tmo = 10 * ODP_TIME_SEC_IN_NS;
- tparam.num_timers = 1;
- tparam.priv = 0;
- tparam.clk_src = ODP_CLOCK_CPU;
- tp = odp_timer_pool_create(NULL, &tparam);
- if (tp == ODP_TIMER_POOL_INVALID)
- CU_FAIL_FATAL("Timer pool create failed");
-
- /* Start all created timer pools */
- odp_timer_pool_start();
-
- queue = odp_queue_create("timer_queue", NULL);
- if (queue == ODP_QUEUE_INVALID)
- CU_FAIL_FATAL("Queue create failed");
-
- #define USER_PTR ((void *)0xdead)
- tim = odp_timer_alloc(tp, queue, USER_PTR);
- if (tim == ODP_TIMER_INVALID)
- CU_FAIL_FATAL("Failed to allocate timer");
- LOG_DBG("Timer handle: %" PRIu64 "\n", odp_timer_to_u64(tim));
-
- ev = odp_timeout_to_event(odp_timeout_alloc(pool));
- if (ev == ODP_EVENT_INVALID)
- CU_FAIL_FATAL("Failed to allocate timeout");
-
- tick = odp_timer_ns_to_tick(tp, 2 * ODP_TIME_SEC_IN_NS);
-
- rc = odp_timer_set_rel(tim, tick, &ev);
- if (rc != ODP_TIMER_SUCCESS)
- CU_FAIL_FATAL("Failed to set timer (relative time)");
-
- ev = ODP_EVENT_INVALID;
- if (odp_timer_cancel(tim, &ev) != 0)
- CU_FAIL_FATAL("Failed to cancel timer (relative time)");
-
- if (ev == ODP_EVENT_INVALID)
- CU_FAIL_FATAL("Cancel did not return event");
-
- tmo = odp_timeout_from_event(ev);
- if (tmo == ODP_TIMEOUT_INVALID)
- CU_FAIL_FATAL("Cancel did not return timeout");
- LOG_DBG("Timeout handle: %" PRIu64 "\n", odp_timeout_to_u64(tmo));
-
- if (odp_timeout_timer(tmo) != tim)
- CU_FAIL("Cancel invalid tmo.timer");
-
- if (odp_timeout_user_ptr(tmo) != USER_PTR)
- CU_FAIL("Cancel invalid tmo.user_ptr");
-
- odp_timeout_free(tmo);
-
- ev = odp_timer_free(tim);
- if (ev != ODP_EVENT_INVALID)
- CU_FAIL_FATAL("Free returned event");
-
- odp_timer_pool_destroy(tp);
-
- if (odp_queue_destroy(queue) != 0)
- CU_FAIL_FATAL("Failed to destroy queue");
-
- if (odp_pool_destroy(pool) != 0)
- CU_FAIL_FATAL("Failed to destroy pool");
-}
-
-/* @private Handle a received (timeout) event */
-static void handle_tmo(odp_event_t ev, bool stale, uint64_t prev_tick)
-{
- CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); /* Internal error */
- if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) {
- /* Not a timeout event */
- CU_FAIL("Unexpected event type received");
- return;
- }
- /* Read the metadata from the timeout */
- odp_timeout_t tmo = odp_timeout_from_event(ev);
- odp_timer_t tim = odp_timeout_timer(tmo);
- uint64_t tick = odp_timeout_tick(tmo);
- struct test_timer *ttp = odp_timeout_user_ptr(tmo);
-
- if (tim == ODP_TIMER_INVALID)
- CU_FAIL("odp_timeout_timer() invalid timer");
- if (!ttp)
- CU_FAIL("odp_timeout_user_ptr() null user ptr");
-
- if (ttp && ttp->ev2 != ev)
- CU_FAIL("odp_timeout_user_ptr() wrong user ptr");
- if (ttp && ttp->tim != tim)
- CU_FAIL("odp_timeout_timer() wrong timer");
- if (stale) {
- if (odp_timeout_fresh(tmo))
- CU_FAIL("Wrong status (fresh) for stale timeout");
- /* Stale timeout => local timer must have invalid tick */
- if (ttp && ttp->tick != TICK_INVALID)
- CU_FAIL("Stale timeout for active timer");
- } else {
- if (!odp_timeout_fresh(tmo))
- CU_FAIL("Wrong status (stale) for fresh timeout");
- /* Fresh timeout => local timer must have matching tick */
- if (ttp && ttp->tick != tick) {
- LOG_DBG("Wrong tick: expected %" PRIu64
- " actual %" PRIu64 "\n",
- ttp->tick, tick);
- CU_FAIL("odp_timeout_tick() wrong tick");
- }
- /* Check that timeout was delivered 'timely' */
- if (tick > odp_timer_current_tick(tp))
- CU_FAIL("Timeout delivered early");
- if (tick < prev_tick) {
- LOG_DBG("Too late tick: %" PRIu64
- " prev_tick %" PRIu64"\n",
- tick, prev_tick);
- /* We don't report late timeouts using CU_FAIL */
- odp_atomic_inc_u32(&ndelivtoolate);
- }
- }
-
- if (ttp) {
- /* Internal error */
- CU_ASSERT_FATAL(ttp->ev == ODP_EVENT_INVALID);
- ttp->ev = ev;
- }
-}
-
-/* @private Worker thread entrypoint which performs timer alloc/set/cancel/free
- * tests */
-static int worker_entrypoint(void *arg TEST_UNUSED)
-{
- int thr = odp_thread_id();
- uint32_t i, allocated;
- unsigned seed = thr;
- int rc;
- odp_queue_t queue;
- struct test_timer *tt;
- uint32_t nset;
- uint64_t tck;
- uint32_t nrcv;
- uint32_t nreset;
- uint32_t ncancel;
- uint32_t ntoolate;
- uint32_t ms;
- uint64_t prev_tick;
- odp_event_t ev;
- struct timespec ts;
- uint32_t nstale;
- odp_timer_set_t timer_rc;
-
- queue = odp_queue_create("timer_queue", NULL);
- if (queue == ODP_QUEUE_INVALID)
- CU_FAIL_FATAL("Queue create failed");
-
- tt = malloc(sizeof(struct test_timer) * NTIMERS);
- if (!tt)
- CU_FAIL_FATAL("malloc failed");
-
- /* Prepare all timers */
- for (i = 0; i < NTIMERS; i++) {
- tt[i].ev = odp_timeout_to_event(odp_timeout_alloc(tbp));
- if (tt[i].ev == ODP_EVENT_INVALID) {
- LOG_DBG("Failed to allocate timeout (%" PRIu32 "/%d)\n",
- i, NTIMERS);
- break;
- }
- tt[i].tim = odp_timer_alloc(tp, queue, &tt[i]);
- if (tt[i].tim == ODP_TIMER_INVALID) {
- LOG_DBG("Failed to allocate timer (%" PRIu32 "/%d)\n",
- i, NTIMERS);
- odp_event_free(tt[i].ev);
- break;
- }
- tt[i].ev2 = tt[i].ev;
- tt[i].tick = TICK_INVALID;
- }
- allocated = i;
- if (allocated == 0)
- CU_FAIL_FATAL("unable to alloc a timer");
- odp_atomic_fetch_add_u32(&timers_allocated, allocated);
-
- odp_barrier_wait(&test_barrier);
-
- /* Initial set all timers with a random expiration time */
- nset = 0;
- for (i = 0; i < allocated; i++) {
- tck = odp_timer_current_tick(tp) + 1 +
- odp_timer_ns_to_tick(tp, (rand_r(&seed) % RANGE_MS)
- * 1000000ULL);
- timer_rc = odp_timer_set_abs(tt[i].tim, tck, &tt[i].ev);
- if (timer_rc != ODP_TIMER_SUCCESS) {
- CU_FAIL("Failed to set timer");
- } else {
- tt[i].tick = tck;
- nset++;
- }
- }
-
- /* Step through wall time, 1ms at a time and check for expired timers */
- nrcv = 0;
- nreset = 0;
- ncancel = 0;
- ntoolate = 0;
- prev_tick = odp_timer_current_tick(tp);
-
- for (ms = 0; ms < 7 * RANGE_MS / 10 && allocated > 0; ms++) {
- while ((ev = odp_queue_deq(queue)) != ODP_EVENT_INVALID) {
- /* Subtract one from prev_tick to allow for timeouts
- * to be delivered a tick late */
- handle_tmo(ev, false, prev_tick - 1);
- nrcv++;
- }
- prev_tick = odp_timer_current_tick(tp);
- i = rand_r(&seed) % allocated;
- if (tt[i].ev == ODP_EVENT_INVALID &&
- (rand_r(&seed) % 2 == 0)) {
- /* Timer active, cancel it */
- rc = odp_timer_cancel(tt[i].tim, &tt[i].ev);
- if (rc != 0)
- /* Cancel failed, timer already expired */
- ntoolate++;
- tt[i].tick = TICK_INVALID;
- ncancel++;
- } else {
- if (tt[i].ev != ODP_EVENT_INVALID)
- /* Timer inactive => set */
- nset++;
- else
- /* Timer active => reset */
- nreset++;
- uint64_t tck = 1 + odp_timer_ns_to_tick(tp,
- (rand_r(&seed) % RANGE_MS) * 1000000ULL);
- odp_timer_set_t rc;
- uint64_t cur_tick;
- /* Loop until we manage to read cur_tick and set a
- * relative timer in the same tick */
- do {
- cur_tick = odp_timer_current_tick(tp);
- rc = odp_timer_set_rel(tt[i].tim,
- tck, &tt[i].ev);
- } while (cur_tick != odp_timer_current_tick(tp));
- if (rc == ODP_TIMER_TOOEARLY ||
- rc == ODP_TIMER_TOOLATE) {
- CU_FAIL("Failed to set timer (tooearly/toolate)");
- } else if (rc != ODP_TIMER_SUCCESS) {
- /* Set/reset failed, timer already expired */
- ntoolate++;
- } else if (rc == ODP_TIMER_SUCCESS) {
- /* Save expected expiration tick on success */
- tt[i].tick = cur_tick + tck;
- }
- }
- ts.tv_sec = 0;
- ts.tv_nsec = 1000000; /* 1ms */
- if (nanosleep(&ts, NULL) < 0)
- CU_FAIL_FATAL("nanosleep failed");
- }
-
- /* Cancel and free all timers */
- nstale = 0;
- for (i = 0; i < allocated; i++) {
- (void)odp_timer_cancel(tt[i].tim, &tt[i].ev);
- tt[i].tick = TICK_INVALID;
- if (tt[i].ev == ODP_EVENT_INVALID)
- /* Cancel too late, timer already expired and
- * timeout enqueued */
- nstale++;
- }
-
- LOG_DBG("Thread %u: %" PRIu32 " timers set\n", thr, nset);
- LOG_DBG("Thread %u: %" PRIu32 " timers reset\n", thr, nreset);
- LOG_DBG("Thread %u: %" PRIu32 " timers cancelled\n", thr, ncancel);
- LOG_DBG("Thread %u: %" PRIu32 " timers reset/cancelled too late\n",
- thr, ntoolate);
- LOG_DBG("Thread %u: %" PRIu32 " timeouts received\n", thr, nrcv);
- LOG_DBG("Thread %u: %" PRIu32
- " stale timeout(s) after odp_timer_free()\n",
- thr, nstale);
-
- /* Delay some more to ensure timeouts for expired timers can be
- * received. Can not use busy loop here to make background timer
- * thread finish their work. */
- ts.tv_sec = 0;
- ts.tv_nsec = (3 * RANGE_MS / 10 + 50) * ODP_TIME_MSEC_IN_NS;
- if (nanosleep(&ts, NULL) < 0)
- CU_FAIL_FATAL("nanosleep failed");
-
- while (nstale != 0) {
- ev = odp_queue_deq(queue);
- if (ev != ODP_EVENT_INVALID) {
- handle_tmo(ev, true, 0/*Don't care for stale tmo's*/);
- nstale--;
- } else {
- CU_FAIL("Failed to receive stale timeout");
- break;
- }
- }
-
- for (i = 0; i < allocated; i++) {
- if (odp_timer_free(tt[i].tim) != ODP_EVENT_INVALID)
- CU_FAIL("odp_timer_free");
- }
-
- /* Check if there any more (unexpected) events */
- ev = odp_queue_deq(queue);
- if (ev != ODP_EVENT_INVALID)
- CU_FAIL("Unexpected event received");
-
- rc = odp_queue_destroy(queue);
- CU_ASSERT(rc == 0);
- for (i = 0; i < allocated; i++) {
- if (tt[i].ev != ODP_EVENT_INVALID)
- odp_event_free(tt[i].ev);
- }
-
- free(tt);
- LOG_DBG("Thread %u: exiting\n", thr);
- return CU_get_number_of_failures();
-}
-
-/* @private Timer test case entrypoint */
-void timer_test_odp_timer_all(void)
-{
- int rc;
- odp_pool_param_t params;
- odp_timer_pool_param_t tparam;
- odp_cpumask_t unused;
- odp_timer_pool_info_t tpinfo;
- uint64_t tick;
- uint64_t ns;
- uint64_t t2;
- pthrd_arg thrdarg;
-
- /* Reserve at least one core for running other processes so the timer
- * test hopefully can run undisturbed and thus get better timing
- * results. */
- int num_workers = odp_cpumask_default_worker(&unused, 0);
-
- /* force to max CPU count */
- if (num_workers > MAX_WORKERS)
- num_workers = MAX_WORKERS;
-
- /* On a single-CPU machine run at least one thread */
- if (num_workers < 1)
- num_workers = 1;
-
- /* Create timeout pools */
- odp_pool_param_init(&params);
- params.type = ODP_POOL_TIMEOUT;
- params.tmo.num = (NTIMERS + 1) * num_workers;
-
- tbp = odp_pool_create("tmo_pool", &params);
- if (tbp == ODP_POOL_INVALID)
- CU_FAIL_FATAL("Timeout pool create failed");
-
-#define NAME "timer_pool"
-#define RES (10 * ODP_TIME_MSEC_IN_NS / 3)
-#define MIN (10 * ODP_TIME_MSEC_IN_NS / 3)
-#define MAX (1000000 * ODP_TIME_MSEC_IN_NS)
- /* Create a timer pool */
- tparam.res_ns = RES;
- tparam.min_tmo = MIN;
- tparam.max_tmo = MAX;
- tparam.num_timers = num_workers * NTIMERS;
- tparam.priv = 0;
- tparam.clk_src = ODP_CLOCK_CPU;
- tp = odp_timer_pool_create(NAME, &tparam);
- if (tp == ODP_TIMER_POOL_INVALID)
- CU_FAIL_FATAL("Timer pool create failed");
-
- /* Start all created timer pools */
- odp_timer_pool_start();
-
- if (odp_timer_pool_info(tp, &tpinfo) != 0)
- CU_FAIL("odp_timer_pool_info");
- CU_ASSERT(strcmp(tpinfo.name, NAME) == 0);
- CU_ASSERT(tpinfo.param.res_ns == RES);
- CU_ASSERT(tpinfo.param.min_tmo == MIN);
- CU_ASSERT(tpinfo.param.max_tmo == MAX);
- CU_ASSERT(strcmp(tpinfo.name, NAME) == 0);
-
- LOG_DBG("Timer pool handle: %" PRIu64 "\n", odp_timer_pool_to_u64(tp));
- LOG_DBG("#timers..: %u\n", NTIMERS);
- LOG_DBG("Tmo range: %u ms (%" PRIu64 " ticks)\n", RANGE_MS,
- odp_timer_ns_to_tick(tp, 1000000ULL * RANGE_MS));
-
- for (tick = 0; tick < 1000000000000ULL; tick += 1000000ULL) {
- ns = odp_timer_tick_to_ns(tp, tick);
- t2 = odp_timer_ns_to_tick(tp, ns);
- if (tick != t2)
- CU_FAIL("Invalid conversion tick->ns->tick");
- }
-
- /* Initialize barrier used by worker threads for synchronization */
- odp_barrier_init(&test_barrier, num_workers);
-
- /* Initialize the shared timeout counter */
- odp_atomic_init_u32(&ndelivtoolate, 0);
-
- /* Initialize the number of finally allocated elements */
- odp_atomic_init_u32(&timers_allocated, 0);
-
- /* Create and start worker threads */
- thrdarg.testcase = 0;
- thrdarg.numthrds = num_workers;
- odp_cunit_thread_create(worker_entrypoint, &thrdarg);
-
- /* Wait for worker threads to exit */
- odp_cunit_thread_exit(&thrdarg);
- LOG_DBG("Number of timeouts delivered/received too late: %" PRIu32 "\n",
- odp_atomic_load_u32(&ndelivtoolate));
-
- /* Check some statistics after the test */
- if (odp_timer_pool_info(tp, &tpinfo) != 0)
- CU_FAIL("odp_timer_pool_info");
- CU_ASSERT(tpinfo.param.num_timers == (unsigned)num_workers * NTIMERS);
- CU_ASSERT(tpinfo.cur_timers == 0);
- CU_ASSERT(tpinfo.hwm_timers == odp_atomic_load_u32(&timers_allocated));
-
- /* Destroy timer pool, all timers must have been freed */
- odp_timer_pool_destroy(tp);
-
- /* Destroy timeout pool, all timeouts must have been freed */
- rc = odp_pool_destroy(tbp);
- CU_ASSERT(rc == 0);
-
- CU_PASS("ODP timer test");
-}
-
-odp_testinfo_t timer_suite[] = {
- ODP_TEST_INFO(timer_test_timeout_pool_alloc),
- ODP_TEST_INFO(timer_test_timeout_pool_free),
- ODP_TEST_INFO(timer_test_odp_timer_cancel),
- ODP_TEST_INFO(timer_test_odp_timer_all),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t timer_suites[] = {
- {"Timer", NULL, NULL, timer_suite},
- ODP_SUITE_INFO_NULL,
-};
-
-int timer_main(int argc, char *argv[])
-{
- /* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- int ret = odp_cunit_register(timer_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/common_plat/validation/api/timer/timer.h b/test/common_plat/validation/api/timer/timer.h
deleted file mode 100644
index bd304fffd..000000000
--- a/test/common_plat/validation/api/timer/timer.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_TIMER_H_
-#define _ODP_TEST_TIMER_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void timer_test_timeout_pool_alloc(void);
-void timer_test_timeout_pool_free(void);
-void timer_test_odp_timer_cancel(void);
-void timer_test_odp_timer_all(void);
-
-/* test arrays: */
-extern odp_testinfo_t timer_suite[];
-
-/* test registry: */
-extern odp_suiteinfo_t timer_suites[];
-
-/* main test program: */
-int timer_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/timer/timer_main.c b/test/common_plat/validation/api/timer/timer_main.c
deleted file mode 100644
index c318763fa..000000000
--- a/test/common_plat/validation/api/timer/timer_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "timer.h"
-
-int main(int argc, char *argv[])
-{
- return timer_main(argc, argv);
-}
diff --git a/test/common_plat/validation/api/traffic_mngr/Makefile.am b/test/common_plat/validation/api/traffic_mngr/Makefile.am
deleted file mode 100644
index 35e689a02..000000000
--- a/test/common_plat/validation/api/traffic_mngr/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtesttraffic_mngr.la
-libtesttraffic_mngr_la_SOURCES = traffic_mngr.c
-
-test_PROGRAMS = traffic_mngr_main$(EXEEXT)
-dist_traffic_mngr_main_SOURCES = traffic_mngr_main.c
-traffic_mngr_main_LDADD = libtesttraffic_mngr.la -lm $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = traffic_mngr.h
diff --git a/test/common_plat/validation/api/traffic_mngr/traffic_mngr.h b/test/common_plat/validation/api/traffic_mngr/traffic_mngr.h
deleted file mode 100644
index af115fef7..000000000
--- a/test/common_plat/validation/api/traffic_mngr/traffic_mngr.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_TRAFFIC_MNGR_H_
-#define _ODP_TEST_TRAFFIC_MNGR_H_
-
-#include <odp_cunit_common.h>
-
-int traffic_mngr_check_shaper(void);
-int traffic_mngr_check_scheduler(void);
-
-/* test functions: */
-void traffic_mngr_test_capabilities(void);
-void traffic_mngr_test_tm_create(void);
-void traffic_mngr_test_shaper_profile(void);
-void traffic_mngr_test_sched_profile(void);
-void traffic_mngr_test_threshold_profile(void);
-void traffic_mngr_test_wred_profile(void);
-void traffic_mngr_test_shaper(void);
-void traffic_mngr_test_scheduler(void);
-void traffic_mngr_test_thresholds(void);
-void traffic_mngr_test_byte_wred(void);
-void traffic_mngr_test_pkt_wred(void);
-void traffic_mngr_test_query(void);
-void traffic_mngr_test_marking(void);
-void traffic_mngr_test_fanin_info(void);
-void traffic_mngr_test_destroy(void);
-
-/* test arrays: */
-extern odp_testinfo_t traffic_mngr_suite[];
-
-/* test suite init/term functions: */
-int traffic_mngr_suite_init(void);
-int traffic_mngr_suite_term(void);
-
-/* test registry: */
-extern odp_suiteinfo_t traffic_mngr_suites[];
-
-/* main test program: */
-int traffic_mngr_main(int argc, char *argv[]);
-
-#endif
diff --git a/test/common_plat/validation/api/traffic_mngr/traffic_mngr_main.c b/test/common_plat/validation/api/traffic_mngr/traffic_mngr_main.c
deleted file mode 100644
index 1fc1f78d7..000000000
--- a/test/common_plat/validation/api/traffic_mngr/traffic_mngr_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "traffic_mngr.h"
-
-int main(int argc, char *argv[])
-{
- return traffic_mngr_main(argc, argv);
-}
diff --git a/test/linux-dpdk/Makefile.am b/test/linux-dpdk/Makefile.am
deleted file mode 100644
index fab35c626..000000000
--- a/test/linux-dpdk/Makefile.am
+++ /dev/null
@@ -1,56 +0,0 @@
-include $(top_srcdir)/test/Makefile.inc
-TESTS_ENVIRONMENT += TEST_DIR=${top_builddir}/test/common_plat/validation
-
-ALL_API_VALIDATION_DIR = ${top_builddir}/test/common_plat/validation/api
-
-SUBDIRS =
-
-if test_vald
-TESTS = validation/api/pktio/pktio_run.sh \
- $(ALL_API_VALIDATION_DIR)/atomic/atomic_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/barrier/barrier_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/buffer/buffer_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/classification/classification_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/cpumask/cpumask_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/crypto/crypto_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/errno/errno_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/hash/hash_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/init/init_main_ok$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/init/init_main_abort$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/init/init_main_log$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/lock/lock_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/packet/packet_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/pool/pool_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/queue/queue_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/random/random_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/scheduler/scheduler_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/std_clib/std_clib_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/thread/thread_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/time/time_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/timer/timer_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/traffic_mngr/traffic_mngr_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/shmem/shmem_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/system/system_main$(EXEEXT)
-
-SUBDIRS += validation/api/pktio
-else
-#performance tests refer to pktio_env
-if test_perf
-SUBDIRS += validation/api/pktio
-endif
-endif
-
-dist_check_SCRIPTS = run-test tests-validation.env $(LOG_COMPILER)
-
-test_SCRIPTS = $(dist_check_SCRIPTS)
-
-tests-validation.env:
- echo "TESTS=\"$(TESTS)\"" > $@
- echo "$(TESTS_ENVIRONMENT)" >> $@
- echo "$(LOG_COMPILER)" >> $@
-
-if test_installdir
-installcheck-local:
- $(DESTDIR)/$(testdir)/run-test
-endif
-
diff --git a/test/linux-dpdk/Makefile.inc b/test/linux-dpdk/Makefile.inc
deleted file mode 100644
index a24b93626..000000000
--- a/test/linux-dpdk/Makefile.inc
+++ /dev/null
@@ -1,19 +0,0 @@
-# The following definitions may be used by platform tests that wish to
-# build specific ODP applications, (i.e those whose do more than validation
-# test wrapping)
-
-AM_LDFLAGS += -static
-
-LIBCUNIT_COMMON = $(top_builddir)/test/common_plat/common/libcunit_common.la
-LIB = $(top_builddir)/lib
-LIBODP = $(LIB)/libodphelper.la $(LIB)/libodp-dpdk.la
-
-INCCUNIT_COMMON = -I$(top_srcdir)/test/common_plat/common
-INCODP = -I$(top_builddir)/platform/@with_platform@/include \
- -I$(top_builddir)/include \
- -I$(top_srcdir)/helper/include \
- -I$(top_srcdir)/include \
- -I$(top_srcdir)/include/odp/arch/@ARCH_ABI@ \
- -I$(top_srcdir)/platform/@with_platform@/arch/$(ARCH_DIR) \
- -I$(top_srcdir)/platform/@with_platform@/include \
- -I$(top_srcdir)/test
diff --git a/test/linux-dpdk/m4/configure.m4 b/test/linux-dpdk/m4/configure.m4
deleted file mode 100644
index ff6caf97a..000000000
--- a/test/linux-dpdk/m4/configure.m4
+++ /dev/null
@@ -1,2 +0,0 @@
-AC_CONFIG_FILES([test/linux-dpdk/Makefile
- test/linux-dpdk/validation/api/pktio/Makefile])
diff --git a/test/linux-dpdk/run-test b/test/linux-dpdk/run-test
deleted file mode 120000
index 332cf3fee..000000000
--- a/test/linux-dpdk/run-test
+++ /dev/null
@@ -1 +0,0 @@
-../linux-generic/run-test \ No newline at end of file
diff --git a/test/linux-dpdk/validation/api/pktio/.gitignore b/test/linux-dpdk/validation/api/pktio/.gitignore
deleted file mode 120000
index 559053e1d..000000000
--- a/test/linux-dpdk/validation/api/pktio/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/validation/api/pktio/.gitignore \ No newline at end of file
diff --git a/test/linux-dpdk/validation/api/pktio/Makefile.am b/test/linux-dpdk/validation/api/pktio/Makefile.am
deleted file mode 100644
index e401a29af..000000000
--- a/test/linux-dpdk/validation/api/pktio/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-dist_check_SCRIPTS = pktio_env \
- pktio_run.sh
-
-test_SCRIPTS = $(dist_check_SCRIPTS)
diff --git a/test/linux-dpdk/validation/api/pktio/pktio_env b/test/linux-dpdk/validation/api/pktio/pktio_env
deleted file mode 120000
index 271cefee1..000000000
--- a/test/linux-dpdk/validation/api/pktio/pktio_env
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/validation/api/pktio/pktio_env \ No newline at end of file
diff --git a/test/linux-dpdk/validation/api/pktio/pktio_run.sh b/test/linux-dpdk/validation/api/pktio/pktio_run.sh
deleted file mode 100755
index 538c87d00..000000000
--- a/test/linux-dpdk/validation/api/pktio/pktio_run.sh
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2015, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-# Proceed the pktio tests. This script expects at least one argument:
-# setup) setup the pktio test environment
-# cleanup) cleanup the pktio test environment
-# run) run the pktio tests (setup, run, cleanup)
-# extra arguments are passed unchanged to the test itself (pktio_main)
-# Without arguments, "run" is assumed and no extra argument is passed to the
-# test (legacy mode).
-#
-
-# directories where pktio_main binary can be found:
-# -in the validation dir when running make check (intree or out of tree)
-# -in the script directory, when running after 'make install', or
-# -in the validation when running standalone (./pktio_run) intree.
-# -in the current directory.
-# running stand alone out of tree requires setting PATH
-PATH=${TEST_DIR}/api/pktio:$PATH
-PATH=$(dirname $0):$PATH
-PATH=$(dirname $0)/../../../../common_plat/validation/api/pktio:$PATH
-PATH=.:$PATH
-
-pktio_main_path=$(which pktio_main${EXEEXT})
-if [ -x "$pktio_main_path" ] ; then
- echo "running with pktio_main: $pktio_run_path"
-else
- echo "cannot find pktio_main: please set you PATH for it."
-fi
-
-# directory where platform test sources are, including scripts
-TEST_SRC_DIR=$(dirname $0)
-
-# exit codes expected by automake for skipped tests
-TEST_SKIPPED=77
-
-# Use installed pktio env or for make check take it from platform directory
-if [ -f "./pktio_env" ]; then
- . ./pktio_env
-elif [ -f ${TEST_SRC_DIR}/pktio_env ]; then
- . ${TEST_SRC_DIR}/pktio_env
-else
- echo "BUG: unable to find pktio_env!"
- echo "pktio_env has to be in current directory or in platform/\$ODP_PLATFORM/test."
- echo "ODP_PLATFORM=\"$ODP_PLATFORM\""
- exit 1
-fi
-
-run_test()
-{
- local ret=0
-
- pktio_main${EXEEXT} $*
- if [ $? -ne 0 ]; then
- ret=1
- fi
- if [ $ret -ne 0 ]; then
- echo "!!! FAILED !!!"
- fi
-
- return $ret
-}
-
-run()
-{
- echo "pktio: using 'loop' device"
- $ODP_GDB pktio_main${EXEEXT} $*
- loop_ret=$?
-
- # need to be root to run tests with real interfaces
- if [ "$(id -u)" != "0" ]; then
- exit $ret
- fi
-
- if [ "$ODP_PKTIO_IF0" = "" ]; then
- # no interfaces specified, use default veth interfaces
- # setup by the pktio_env script
- setup_pktio_env clean
- if [ $? != 0 ]; then
- echo "Failed to setup test environment, skipping test."
- exit $TEST_SKIPPED
- fi
- export ODP_PLATFORM_PARAMS="-n 4 --no-pci --vdev eth_pcap0,iface=$IF0 --vdev eth_pcap1,iface=$IF1"
- export ODP_PKTIO_IF0=0
- export ODP_PKTIO_IF1=1
- fi
-
- run_test
- ret=$?
-
- [ $ret = 0 ] && ret=$loop_ret
-
- exit $ret
-}
-
-if [ $# != 0 ]; then
- action=$1
- shift
-fi
-
-case "$action" in
- setup) setup_pktio_env ;;
- cleanup) cleanup_pktio_env ;;
- run) run ;;
- *) run ;;
-esac
diff --git a/test/linux-dpdk/wrapper-script.sh b/test/linux-dpdk/wrapper-script.sh
deleted file mode 100755
index 3bb52d9cf..000000000
--- a/test/linux-dpdk/wrapper-script.sh
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/bin/bash
-
-export ODP_PLATFORM_PARAMS=${ODP_PLATFORM_PARAMS:--n 4}
-# where to mount huge pages
-export HUGEPAGEDIR=${HUGEPAGEDIR:-/mnt/huge}
-
-# Make sure huge pages are released when a unit test crashes "make check"
-trap ctrl_c INT
-
-ctrl_c() {
- echo "** Trapped CTRL-C"
- if grep -qs "$HUGEPAGEDIR" /proc/mounts; then
- echo "** Umounting hugetlbfs"
- sleep 1 && sudo umount -a -t hugetlbfs
- fi
-}
-
-function mount_and_reserve() {
- export PATH_NR="/sys/devices/system/node/node0/hugepages/hugepages-${SIZE_KB}kB/nr_hugepages"
- export PATH_FREE="/sys/devices/system/node/node0/hugepages/hugepages-${SIZE_KB}kB/free_hugepages"
- if grep -qs "$HUGEPAGEDIR" /proc/mounts; then
- echo "Umounting hugetlbfs from previous use!"
- sudo umount -a -t hugetlbfs
- fi
- echo "Trying $SIZE pages"
- sudo mount -t hugetlbfs -o pagesize=$SIZE nodev $HUGEPAGEDIR 2>/dev/null
- res=$?
- if [ $res -ne 0 ]; then
- echo "ERROR: can't mount hugepages"
- return $res
- fi
- sudo sh -c "echo $RESERVE > $PATH_NR"
- if [ `cat $PATH_NR` -lt 1 ]; then
- echo "Failed to reserve at least 1 huge page!"
- return 1
- else
- echo "Total number: `cat $PATH_NR`"
- echo "Free pages: `cat $PATH_FREE`"
- fi
-}
-
-if [ ! -d $HUGEPAGEDIR ]; then
- sudo mkdir -p $HUGEPAGEDIR
-fi
-echo "Mounting hugetlbfs"
-export SIZE=1G
-export SIZE_KB=1048576
-export RESERVE=1
-mount_and_reserve
-res=$?
-if [ $res -ne 0 ]; then
- export SIZE=2MB
- export SIZE_KB=2048
- export RESERVE=1024
- mount_and_reserve
- res=$?
- if [ $res -ne 0 ]; then
- echo "ERROR: can't mount hugepages with any size"
- exit $res
- fi
-fi
-echo "running $1!"
-if [ ${1: -3} == ".sh" ]
-then
- sudo TEST_DIR=${TEST_DIR} \
- ODP_PLATFORM_PARAMS="$ODP_PLATFORM_PARAMS" \
- ODP_GDB=$ODP_GDB $1
-else
- sudo TEST_DIR=${TEST_DIR} ODP_PLATFORM_PARAMS="$ODP_PLATFORM_PARAMS" \
- $ODP_GDB $1
-fi
-res=$?
-echo "Unmounting hugetlbfs"
-sleep 0.3 && sudo umount -a -t hugetlbfs
-exit $res
-
diff --git a/test/linux-generic/.gitignore b/test/linux-generic/.gitignore
deleted file mode 100644
index 5dabf91c1..000000000
--- a/test/linux-generic/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-*.log
-*.trs
-tests-validation.env
diff --git a/test/linux-generic/Makefile.am b/test/linux-generic/Makefile.am
deleted file mode 100644
index 998ee5617..000000000
--- a/test/linux-generic/Makefile.am
+++ /dev/null
@@ -1,83 +0,0 @@
-include $(top_srcdir)/test/Makefile.inc
-TESTS_ENVIRONMENT += TEST_DIR=${top_builddir}/test/common_plat/validation
-
-ALL_API_VALIDATION_DIR = ${top_builddir}/test/common_plat/validation/api
-
-SUBDIRS = performance
-
-if test_vald
-TESTS = validation/api/pktio/pktio_run.sh \
- validation/api/pktio/pktio_run_tap.sh \
- validation/api/shmem/shmem_linux \
- $(ALL_API_VALIDATION_DIR)/atomic/atomic_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/barrier/barrier_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/buffer/buffer_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/classification/classification_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/cpumask/cpumask_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/crypto/crypto_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/errno/errno_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/hash/hash_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/init/init_main_ok$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/init/init_main_abort$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/init/init_main_log$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/lock/lock_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/packet/packet_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/pool/pool_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/queue/queue_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/random/random_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/scheduler/scheduler_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/std_clib/std_clib_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/thread/thread_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/time/time_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/timer/timer_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/traffic_mngr/traffic_mngr_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/shmem/shmem_main$(EXEEXT) \
- $(ALL_API_VALIDATION_DIR)/system/system_main$(EXEEXT) \
- ring/ring_main$(EXEEXT)
-
-SUBDIRS += validation/api/pktio\
- validation/api/shmem\
- mmap_vlan_ins\
- pktio_ipc\
- ring
-
-if HAVE_PCAP
-TESTS += validation/api/pktio/pktio_run_pcap.sh
-
-TESTS += mmap_vlan_ins/mmap_vlan_ins.sh
-SUBDIRS += mmap_vlan_ins
-endif
-if netmap_support
-TESTS += validation/api/pktio/pktio_run_netmap.sh
-endif
-if PKTIO_DPDK
-TESTS += validation/api/pktio/pktio_run_dpdk.sh
-endif
-
-if PKTIO_IPC
-TESTS += pktio_ipc/pktio_ipc_run.sh
-SUBDIRS += pktio_ipc
-endif
-else
-#performance tests refer to pktio_env
-if test_perf
-SUBDIRS += validation/api/pktio
-endif
-endif
-
-TEST_EXTENSIONS = .sh
-
-dist_check_SCRIPTS = run-test tests-validation.env $(LOG_COMPILER)
-
-test_SCRIPTS = $(dist_check_SCRIPTS)
-
-tests-validation.env:
- echo "TESTS=\"$(TESTS)\"" > $@
- echo "$(TESTS_ENVIRONMENT)" >> $@
- echo "$(LOG_COMPILER)" >> $@
-
-if test_installdir
-installcheck-local:
- $(DESTDIR)/$(testdir)/run-test
-endif
-
diff --git a/test/linux-generic/Makefile.inc b/test/linux-generic/Makefile.inc
deleted file mode 100644
index 198087f3c..000000000
--- a/test/linux-generic/Makefile.inc
+++ /dev/null
@@ -1,20 +0,0 @@
-# The following definitions may be used by platform tests that wish to
-# build specific ODP applications, (i.e those whose do more than validation
-# test wrapping)
-
-AM_LDFLAGS += -static
-
-LIBCUNIT_COMMON = $(top_builddir)/test/common_plat/common/libcunit_common.la
-LIB = $(top_builddir)/lib
-LIBODP = $(LIB)/libodphelper.la $(LIB)/libodp-linux.la
-
-INCCUNIT_COMMON = -I$(top_srcdir)/test/common_plat/common
-INCODP = \
- -I$(top_builddir)/include \
- -I$(top_builddir)/platform/@with_platform@/include \
- -I$(top_srcdir)/helper/include \
- -I$(top_srcdir)/include \
- -I$(top_srcdir)/include/odp/arch/@ARCH_ABI@ \
- -I$(top_srcdir)/platform/@with_platform@/arch/$(ARCH_DIR) \
- -I$(top_srcdir)/platform/@with_platform@/include \
- -I$(top_srcdir)/test
diff --git a/test/linux-generic/m4/configure.m4 b/test/linux-generic/m4/configure.m4
deleted file mode 100644
index 8746dabc8..000000000
--- a/test/linux-generic/m4/configure.m4
+++ /dev/null
@@ -1,9 +0,0 @@
-m4_include([test/linux-generic/m4/performance.m4])
-
-AC_CONFIG_FILES([test/linux-generic/Makefile
- test/linux-generic/validation/api/shmem/Makefile
- test/linux-generic/validation/api/pktio/Makefile
- test/linux-generic/mmap_vlan_ins/Makefile
- test/linux-generic/pktio_ipc/Makefile
- test/linux-generic/ring/Makefile
- test/linux-generic/performance/Makefile])
diff --git a/test/linux-generic/m4/performance.m4 b/test/linux-generic/m4/performance.m4
deleted file mode 100644
index 7f54b96d7..000000000
--- a/test/linux-generic/m4/performance.m4
+++ /dev/null
@@ -1,9 +0,0 @@
-##########################################################################
-# Enable/disable test-perf-proc
-##########################################################################
-test_perf_proc=no
-AC_ARG_ENABLE([test-perf-proc],
- [ --enable-test-perf-proc run test in test/performance in process mode],
- [if test "x$enableval" = "xyes"; then
- test_perf_proc=yes
- fi])
diff --git a/test/linux-generic/mmap_vlan_ins/.gitignore b/test/linux-generic/mmap_vlan_ins/.gitignore
deleted file mode 100644
index 755fa2ed5..000000000
--- a/test/linux-generic/mmap_vlan_ins/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*.pcap
-plat_mmap_vlan_ins
diff --git a/test/linux-generic/mmap_vlan_ins/Makefile.am b/test/linux-generic/mmap_vlan_ins/Makefile.am
deleted file mode 100644
index 5cac159c7..000000000
--- a/test/linux-generic/mmap_vlan_ins/Makefile.am
+++ /dev/null
@@ -1,15 +0,0 @@
-include $(top_srcdir)/test/Makefile.inc
-TESTS_ENVIRONMENT += TEST_DIR=${top_builddir}/test/validation
-
-dist_check_SCRIPTS = vlan.pcap \
- mmap_vlan_ins.sh \
- pktio_env
-
-test_SCRIPTS = $(dist_check_SCRIPTS)
-
-test_PROGRAMS = plat_mmap_vlan_ins$(EXEEXT)
-plat_mmap_vlan_ins_LDFLAGS = $(AM_LDFLAGS) -static
-plat_mmap_vlan_ins_CFLAGS = $(AM_CFLAGS) -I${top_srcdir}/example
-
-# Clonned from example odp_l2fwd simple
-dist_plat_mmap_vlan_ins_SOURCES = mmap_vlan_ins.c
diff --git a/test/linux-generic/mmap_vlan_ins/mmap_vlan_ins.c b/test/linux-generic/mmap_vlan_ins/mmap_vlan_ins.c
deleted file mode 100644
index e91a9d0dd..000000000
--- a/test/linux-generic/mmap_vlan_ins/mmap_vlan_ins.c
+++ /dev/null
@@ -1,226 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <getopt.h>
-#include <signal.h>
-
-#include <odp_api.h>
-#include <odp/helper/odph_api.h>
-
-#define POOL_NUM_PKT 100
-#define POOL_SEG_LEN 1500
-#define MAX_PKT_BURST 32
-#define MAX_WORKERS 1
-
-static int g_ret;
-
-struct {
- odp_pktio_t if0, if1;
- odp_pktin_queue_t if0in, if1in;
- odp_pktout_queue_t if0out, if1out;
- odph_ethaddr_t src, dst;
-} global;
-
-static odp_pktio_t create_pktio(const char *name, odp_pool_t pool,
- odp_pktin_queue_t *pktin,
- odp_pktout_queue_t *pktout)
-{
- odp_pktio_param_t pktio_param;
- odp_pktin_queue_param_t in_queue_param;
- odp_pktout_queue_param_t out_queue_param;
- odp_pktio_t pktio;
-
- odp_pktio_param_init(&pktio_param);
-
- pktio = odp_pktio_open(name, pool, &pktio_param);
- if (pktio == ODP_PKTIO_INVALID) {
- printf("Failed to open %s\n", name);
- exit(1);
- }
-
- odp_pktin_queue_param_init(&in_queue_param);
- odp_pktout_queue_param_init(&out_queue_param);
-
- in_queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
-
- if (odp_pktin_queue_config(pktio, &in_queue_param)) {
- printf("Failed to config input queue for %s\n", name);
- exit(1);
- }
-
- out_queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
-
- if (odp_pktout_queue_config(pktio, &out_queue_param)) {
- printf("Failed to config output queue for %s\n", name);
- exit(1);
- }
-
- if (odp_pktin_queue(pktio, pktin, 1) != 1) {
- printf("pktin queue query failed for %s\n", name);
- exit(1);
- }
- if (odp_pktout_queue(pktio, pktout, 1) != 1) {
- printf("pktout queue query failed for %s\n", name);
- exit(1);
- }
- return pktio;
-}
-
-static int run_worker(void *arg ODP_UNUSED)
-{
- odp_packet_t pkt_tbl[MAX_PKT_BURST];
- int pkts, sent, tx_drops, i;
- int total_pkts = 0;
- uint64_t wait_time = odp_pktin_wait_time(2 * ODP_TIME_SEC_IN_NS);
-
- if (odp_pktio_start(global.if0)) {
- printf("unable to start input interface\n");
- exit(1);
- }
- printf("started input interface\n");
- if (odp_pktio_start(global.if1)) {
- printf("unable to start output interface\n");
- exit(1);
- }
- printf("started output interface\n");
- printf("started all\n");
-
- while (1) {
- pkts = odp_pktin_recv_tmo(global.if0in, pkt_tbl, MAX_PKT_BURST,
- wait_time);
- if (odp_unlikely(pkts <= 0)) {
- printf("recv tmo!\n");
- break;
- }
-
- for (i = 0; i < pkts; i++) {
- odp_packet_t pkt = pkt_tbl[i];
- odph_ethhdr_t *eth;
-
- if (odp_unlikely(!odp_packet_has_eth(pkt))) {
- printf("warning: packet has no eth header\n");
- return 0;
- }
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- eth->src = global.src;
- eth->dst = global.dst;
- }
- sent = odp_pktout_send(global.if1out, pkt_tbl, pkts);
- if (sent < 0)
- sent = 0;
- total_pkts += sent;
- tx_drops = pkts - sent;
- if (odp_unlikely(tx_drops))
- odp_packet_free_multi(&pkt_tbl[sent], tx_drops);
- }
-
- printf("Total send packets: %d\n", total_pkts);
-
- if (total_pkts < 10)
- g_ret = -1;
-
- return 0;
-}
-
-int main(int argc, char **argv)
-{
- odp_pool_t pool;
- odp_pool_param_t params;
- odp_cpumask_t cpumask;
- odph_odpthread_t thd[MAX_WORKERS];
- odp_instance_t instance;
- odph_odpthread_params_t thr_params;
- int opt;
- int long_index;
-
- static const struct option longopts[] = { {NULL, 0, NULL, 0} };
- static const char *shortopts = "";
-
- /* let helper collect its own arguments (e.g. --odph_proc) */
- odph_parse_options(argc, argv, shortopts, longopts);
-
- /*
- * parse own options: currentely none, but this will move optind
- * to the first non-option argument. (in case there where helprt args)
- */
- opterr = 0; /* do not issue errors on helper options */
- while (1) {
- opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
- if (-1 == opt)
- break; /* No more options */
- }
-
- if (argc != optind + 4 ||
- odph_eth_addr_parse(&global.dst, argv[optind + 2]) != 0 ||
- odph_eth_addr_parse(&global.src, argv[optind + 3]) != 0) {
- printf("Usage: odp_l2fwd_simple eth0 eth1 01:02:03:04:05:06"
- " 07:08:09:0a:0b:0c\n");
- printf("Where eth0 and eth1 are the used interfaces"
- " (must have 2 of them)\n");
- printf("And the hexadecimal numbers are destination MAC address"
- " and source MAC address\n");
- exit(1);
- }
-
- if (odp_init_global(&instance, NULL, NULL)) {
- printf("Error: ODP global init failed.\n");
- exit(1);
- }
-
- if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
- printf("Error: ODP local init failed.\n");
- exit(1);
- }
-
- /* Create packet pool */
- odp_pool_param_init(&params);
- params.pkt.seg_len = POOL_SEG_LEN;
- params.pkt.len = POOL_SEG_LEN;
- params.pkt.num = POOL_NUM_PKT;
- params.type = ODP_POOL_PACKET;
-
- pool = odp_pool_create("packet pool", &params);
-
- if (pool == ODP_POOL_INVALID) {
- printf("Error: packet pool create failed.\n");
- exit(1);
- }
-
- global.if0 = create_pktio(argv[optind], pool, &global.if0in,
- &global.if0out);
- global.if1 = create_pktio(argv[optind + 1], pool, &global.if1in,
- &global.if1out);
-
- odp_cpumask_default_worker(&cpumask, MAX_WORKERS);
-
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.start = run_worker;
- thr_params.arg = NULL;
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
-
- odph_odpthreads_create(thd, &cpumask, &thr_params);
- odph_odpthreads_join(thd);
-
- if (odp_pool_destroy(pool)) {
- printf("Error: pool destroy\n");
- exit(EXIT_FAILURE);
- }
-
- if (odp_term_local()) {
- printf("Error: term local\n");
- exit(EXIT_FAILURE);
- }
-
- if (odp_term_global(instance)) {
- printf("Error: term global\n");
- exit(EXIT_FAILURE);
- }
-
- return g_ret;
-}
diff --git a/test/linux-generic/mmap_vlan_ins/mmap_vlan_ins.sh b/test/linux-generic/mmap_vlan_ins/mmap_vlan_ins.sh
deleted file mode 100755
index 3c6df8ecd..000000000
--- a/test/linux-generic/mmap_vlan_ins/mmap_vlan_ins.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2016, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-#
-# This test is intend to test pkt_mmap_vlan_insert() feature for
-# linux-generic packet mmap pktio.
-#
-#
-export ODP_PKTIO_DISABLE_SOCKET_MMSG=1
-
-# directory where platform test sources are, including scripts
-TEST_SRC_DIR=$(dirname $0)
-
-# exit codes expected by automake for skipped tests
-TEST_SKIPPED=77
-
-# directories where binary can be found:
-# -in the validation dir when running make check (intree or out of tree)
-# -in the script directory, when running after 'make install', or
-# -in the validation when running standalone intree.
-# -in the current directory.
-# running stand alone out of tree requires setting PATH
-PATH=${TEST_DIR}/../mmap_vlan_ins:$PATH
-PATH=`pwd`/mmap_vlan_ins:$PATH
-PATH=$(dirname $0):$PATH
-PATH=.:$PATH
-
-bin_path=$(which plat_mmap_vlan_ins${EXEEXT})
-if [ -x "$bin_path" ] ; then
- echo "running with plat_mmap_vlan_ins: $bin_path"
-else
- echo "cannot find plat_mmap_vlan_ins: please set you PATH for it."
- pwd
- exit 1
-fi
-
-
-# Use installed pktio env or for make check take it from platform directory
-if [ -f "./pktio_env" ]; then
- . ./pktio_env
-elif [ -f ${TEST_SRC_DIR}/pktio_env ]; then
- . ${TEST_SRC_DIR}/pktio_env
-else
- echo "BUG: unable to find pktio_env!"
- echo "pktio_env has to be in current directory or"
- echo " in platform/\$ODP_PLATFORM/test."
- echo "ODP_PLATFORM=\"$ODP_PLATFORM\""
- exit 1
-fi
-
-setup_pktio_env
-if [ $? -ne 0 ]; then
- return 77 # Skip the test
-fi
-
-PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name vlan.pcap -print -quit`
-echo "using PCAP_IN = ${PCAP_IN}"
-PCAP_OUT=vlan_out.pcap
-
-# Listen on veth pipe and write to pcap Send pcap
-plat_mmap_vlan_ins${EXEEXT} pktiop0p1 pcap:out=${PCAP_OUT} \
- 00:02:03:04:05:06 00:08:09:0a:0b:0c &
-# Send pcap file to veth interface
-plat_mmap_vlan_ins${EXEEXT} pcap:in=${PCAP_IN} pktiop1p0 \
- 01:02:03:04:05:06 01:08:09:0a:0b:0c
-
-rm -f ${PCAP_OUT}
-cleanup_pktio_env
-
-exit 0
diff --git a/test/linux-generic/mmap_vlan_ins/pktio_env b/test/linux-generic/mmap_vlan_ins/pktio_env
deleted file mode 100644
index 345b5bd56..000000000
--- a/test/linux-generic/mmap_vlan_ins/pktio_env
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2015, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-# Test script wrapper for running ODP pktio apps on linux-generic.
-#
-# For linux-generic the default behavior is to create two pairs of
-# virtual Ethernet interfaces and provide the names of these via
-# environment variables to pktio apps, the interfaces will be removed
-# before the script exits.
-#
-# Note that the creation of virtual Ethernet devices depends on having
-# CONFIG_VETH enabled in the kernel, if not enabled the env setup will be skipped.
-#
-# Network set up
-# IF0 <---> IF1
-# IF2 <---> IF3
-IF0=pktiop0p1
-IF1=pktiop1p0
-IF2=pktiop2p3
-IF3=pktiop3p2
-
-if [ "$0" = "$BASH_SOURCE" ]; then
- echo "Error: Platform specific env file has to be sourced."
-fi
-
-check_for_root()
-{
- if [ "$(id -u)" != "0" ]; then
- echo "check_for_root(): need to be root to setup VETH"
- return 1
- fi
- return 0
-}
-
-# wait for a network interface's operational state to be "up"
-wait_for_iface_up()
-{
- iface=$1
- cnt=0
-
- while [ $cnt -lt 50 ]; do
- read operstate < /sys/class/net/$iface/operstate
-
- if [ $? -ne 0 ]; then
- break
- elif [ "$operstate" = "up" ]; then
- return 0
- fi
-
- sleep 0.1
- cnt=`expr $cnt + 1`
- done
-
- return 1
-}
-
-setup_pktio_env()
-{
- echo "pktio: setting up test interfaces $IF0, $IF1, $IF2, $IF3."
-
- check_for_root
- if [ $? -ne 0 ]; then
- return 1
- fi
-
- for iface in $IF0 $IF1 $IF2 $IF3; do
- ip link show $iface 2> /dev/null
- if [ $? -eq 0 ]; then
- echo "pktio: interface $iface already exist $?"
- return 2
- fi
- done
-
- if [ "$1" = "clean" ]; then
- trap cleanup_pktio_env EXIT
- fi
-
- ip link add $IF0 type veth peer name $IF1
- if [ $? -ne 0 ]; then
- echo "pktio: error: unable to create veth pair"
- return 3
- fi
- ip link add $IF2 type veth peer name $IF3
- if [ $? -ne 0 ]; then
- echo "pktio: error: unable to create veth pair"
- return 4
- fi
-
- for iface in $IF0 $IF1 $IF2 $IF3; do
- ip link set $iface mtu 9216 up
- ifconfig $iface -arp
- done
-
- # check that the interface has come up before starting the test
- for iface in $IF0 $IF1 $IF2 $IF3; do
- wait_for_iface_up $iface
- if [ $? -ne 0 ]; then
- echo "pktio: interface $iface failed to come up"
- return 5
- fi
- done
-}
-
-cleanup_pktio_env()
-{
- echo "pktio: removing test interfaces $IF0, $IF1, $IF2, $IF3"
- check_for_root
- if [ $? -ne 0 ]; then
- return 1
- fi
-
- for iface in $IF0 $IF1 $IF2 $IF3; do
- ip link del $iface 2> /dev/null
- done
- return 0
-}
diff --git a/test/linux-generic/mmap_vlan_ins/vlan.pcap b/test/linux-generic/mmap_vlan_ins/vlan.pcap
deleted file mode 100644
index 106ccb682..000000000
--- a/test/linux-generic/mmap_vlan_ins/vlan.pcap
+++ /dev/null
Binary files differ
diff --git a/test/linux-generic/performance/.gitignore b/test/linux-generic/performance/.gitignore
deleted file mode 100644
index 7e563b8b3..000000000
--- a/test/linux-generic/performance/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*.log
-*.trs
diff --git a/test/linux-generic/performance/Makefile.am b/test/linux-generic/performance/Makefile.am
deleted file mode 100644
index cb72fce96..000000000
--- a/test/linux-generic/performance/Makefile.am
+++ /dev/null
@@ -1,13 +0,0 @@
-include $(top_srcdir)/test/Makefile.inc
-
-TESTS_ENVIRONMENT += TEST_DIR=${builddir}
-
-TESTSCRIPTS = odp_scheduling_run_proc.sh
-
-TEST_EXTENSIONS = .sh
-
-if test_perf_proc
-TESTS = $(TESTSCRIPTS)
-endif
-
-EXTRA_DIST = $(TESTSCRIPTS)
diff --git a/test/linux-generic/performance/odp_scheduling_run_proc.sh b/test/linux-generic/performance/odp_scheduling_run_proc.sh
deleted file mode 100755
index 384017aff..000000000
--- a/test/linux-generic/performance/odp_scheduling_run_proc.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2016, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-# Script that passes command line arguments to odp_scheduling test when
-# launched by 'make check'
-
-TEST_DIR="${TEST_DIR:-$(dirname $0)}"
-PERFORMANCE="$TEST_DIR/../../common_plat/performance"
-ret=0
-ALL=0
-
-run()
-{
- echo odp_scheduling_run starts requesting $1 worker threads
- echo =====================================================
-
- $PERFORMANCE/odp_scheduling${EXEEXT} --odph_proc -c $1 || ret=1
-}
-
-run 1
-run 5
-run 8
-run 11
-run $ALL
-
-exit $ret
diff --git a/test/linux-generic/pktio_ipc/.gitignore b/test/linux-generic/pktio_ipc/.gitignore
deleted file mode 100644
index 49ee4fd29..000000000
--- a/test/linux-generic/pktio_ipc/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-pktio_ipc1
-pktio_ipc2
diff --git a/test/linux-generic/pktio_ipc/Makefile.am b/test/linux-generic/pktio_ipc/Makefile.am
deleted file mode 100644
index 8858bd2f5..000000000
--- a/test/linux-generic/pktio_ipc/Makefile.am
+++ /dev/null
@@ -1,20 +0,0 @@
-include $(top_srcdir)/test/Makefile.inc
-TESTS_ENVIRONMENT += TEST_DIR=${top_builddir}/test/validation
-
-test_PROGRAMS = pktio_ipc1\
- pktio_ipc2
-
-pktio_ipc1_CFLAGS = $(AM_CFLAGS) -I${top_srcdir}/example
-pktio_ipc1_LDFLAGS = $(AM_LDFLAGS) -static
-pktio_ipc2_CFLAGS = $(AM_CFLAGS) -I${top_srcdir}/example
-pktio_ipc2_LDFLAGS = $(AM_LDFLAGS) -static
-
-noinst_HEADERS = $(top_srcdir)/test/test_debug.h
-
-dist_pktio_ipc1_SOURCES = pktio_ipc1.c ipc_common.c
-dist_pktio_ipc2_SOURCES = pktio_ipc2.c ipc_common.c
-
-EXTRA_DIST = ipc_common.h
-
-dist_check_SCRIPTS = pktio_ipc_run.sh
-test_SCRIPTS = $(dist_check_SCRIPTS)
diff --git a/test/linux-generic/pktio_ipc/ipc_common.c b/test/linux-generic/pktio_ipc/ipc_common.c
deleted file mode 100644
index 85cbc8b41..000000000
--- a/test/linux-generic/pktio_ipc/ipc_common.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "ipc_common.h"
-
-/** Run time in seconds */
-int run_time_sec;
-/** Pid of the master process */
-int master_pid;
-
-int ipc_odp_packet_send_or_free(odp_pktio_t pktio,
- odp_packet_t pkt_tbl[], int num)
-{
- int ret;
- int sent = 0;
- odp_time_t start_time;
- odp_time_t end_time;
- odp_time_t wait;
- odp_pktout_queue_t pktout;
- int i;
-
- start_time = odp_time_local();
- wait = odp_time_local_from_ns(ODP_TIME_SEC_IN_NS);
- end_time = odp_time_sum(start_time, wait);
-
- if (odp_pktout_queue(pktio, &pktout, 1) != 1) {
- EXAMPLE_ERR("no output queue\n");
- return -1;
- }
-
- while (sent != num) {
- ret = odp_pktout_send(pktout, &pkt_tbl[sent], num - sent);
- if (ret < 0) {
- EXAMPLE_ERR("odp_pktout_send return %d\n", ret);
- for (i = sent; i < num; i++)
- odp_packet_free(pkt_tbl[i]);
- return -1;
- }
-
- sent += ret;
-
- if (odp_time_cmp(end_time, odp_time_local()) < 0) {
- for (i = sent; i < num; i++)
- odp_packet_free(pkt_tbl[i]);
- EXAMPLE_ERR("Send Timeout!\n");
- return -1;
- }
- }
-
- return 0;
-}
-
-odp_pktio_t create_pktio(odp_pool_t pool, int master_pid)
-{
- odp_pktio_param_t pktio_param;
- odp_pktio_t ipc_pktio;
- char name[30];
-
- odp_pktio_param_init(&pktio_param);
-
- if (master_pid)
- sprintf(name, TEST_IPC_PKTIO_PID_NAME, master_pid);
- else
- sprintf(name, TEST_IPC_PKTIO_NAME);
-
- printf("pid: %d, create IPC pktio %s\n", getpid(), name);
- ipc_pktio = odp_pktio_open(name, pool, &pktio_param);
- if (ipc_pktio == ODP_PKTIO_INVALID) {
- EXAMPLE_ERR("Error: ipc pktio %s create failed.\n", name);
- return ODP_PKTIO_INVALID;
- }
-
- if (odp_pktin_queue_config(ipc_pktio, NULL)) {
- EXAMPLE_ERR("Input queue config failed\n");
- return ODP_PKTIO_INVALID;
- }
-
- if (odp_pktout_queue_config(ipc_pktio, NULL)) {
- EXAMPLE_ERR("Output queue config failed\n");
- return ODP_PKTIO_INVALID;
- }
-
- return ipc_pktio;
-}
-
-/**
- * Parse and store the command line arguments
- *
- * @param argc argument count
- * @param argv[] argument vector
- * @param appl_args Store application arguments here
- */
-void parse_args(int argc, char *argv[])
-{
- int opt;
- int long_index;
- static struct option longopts[] = {
- {"time", required_argument, NULL, 't'},
- {"pid", required_argument, NULL, 'p'}, /* master process pid */
- {"help", no_argument, NULL, 'h'}, /* return 'h' */
- {NULL, 0, NULL, 0}
- };
-
- run_time_sec = 0; /* loop forever if time to run is 0 */
- master_pid = 0;
-
- while (1) {
- opt = getopt_long(argc, argv, "+t:p:h",
- longopts, &long_index);
-
- if (opt == -1)
- break; /* No more options */
-
- switch (opt) {
- case 't':
- run_time_sec = atoi(optarg);
- break;
- case 'p':
- master_pid = atoi(optarg);
- break;
- case 'h':
- default:
- usage(argv[0]);
- exit(EXIT_SUCCESS);
- break;
- }
- }
-
- optind = 1; /* reset 'extern optind' from the getopt lib */
-}
-
-/**
- * Print system and application info
- */
-void print_info(char *progname)
-{
- printf("\n"
- "ODP system info\n"
- "---------------\n"
- "ODP API version: %s\n"
- "CPU model: %s\n"
- "\n",
- odp_version_api_str(), odp_cpu_model_str());
-
- printf("Running ODP appl: \"%s\"\n"
- "-----------------\n"
- "Using IF: %s\n",
- progname, pktio_name);
- printf("\n\n");
- fflush(NULL);
-}
-
-/**
- * Prinf usage information
- */
-void usage(char *progname)
-{
- printf("\n"
- "Usage: %s OPTIONS\n"
- " E.g. -n ipc_name_space %s -t seconds\n"
- "\n"
- "OpenDataPlane odp-linux ipc test application.\n"
- "\n"
- "Mandatory OPTIONS:\n"
- " -n, --ns IPC name space ID /dev/shm/odp-<ns>-objname.\n"
- "Optional OPTIONS\n"
- " -h, --help Display help and exit.\n"
- " -t, --time Time to run in seconds.\n"
- "\n", NO_PATH(progname), NO_PATH(progname)
- );
-}
diff --git a/test/linux-generic/pktio_ipc/ipc_common.h b/test/linux-generic/pktio_ipc/ipc_common.h
deleted file mode 100644
index 624557761..000000000
--- a/test/linux-generic/pktio_ipc/ipc_common.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#define _POSIX_C_SOURCE 200809L
-#include <stdlib.h>
-#include <inttypes.h>
-#include <string.h>
-#include <getopt.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <signal.h>
-#include <sys/wait.h>
-
-#include <example_debug.h>
-
-#include <odp.h>
-#include <odp/helper/odph_api.h>
-
-/** @def SHM_PKT_POOL_SIZE
- * @brief Size of the shared memory block
- */
-#define SHM_PKT_POOL_SIZE 8192
-
-/** @def SHM_PKT_POOL_BUF_SIZE
- * @brief Buffer size of the packet pool buffer
- */
-#define SHM_PKT_POOL_BUF_SIZE 100
-
-/** @def MAX_PKT_BURST
- * @brief Maximum number of packet bursts
- */
-#define MAX_PKT_BURST 16
-
-/** Get rid of path in filename - only for unix-type paths using '/' */
-#define NO_PATH(file_name) (strrchr((file_name), '/') ? \
- strrchr((file_name), '/') + 1 : (file_name))
-
-#define TEST_SEQ_MAGIC 0x92749451
-#define TEST_SEQ_MAGIC_2 0x81638340
-
-#define TEST_ALLOC_MAGIC 0x1234adcd
-
-#define TEST_IPC_PKTIO_NAME "ipc:ipktio"
-#define TEST_IPC_PKTIO_PID_NAME "ipc:%d:ipktio"
-
-/** Can be any name, same or not the same. */
-#define TEST_IPC_POOL_NAME "ipc_packet_pool"
-
-/** magic number and sequence at start of packet payload */
-typedef struct ODP_PACKED {
- odp_u32be_t magic;
- odp_u32be_t seq;
-} pkt_head_t;
-
-/** magic number at end of packet payload */
-typedef struct ODP_PACKED {
- odp_u32be_t magic;
-} pkt_tail_t;
-
-/** Application argument */
-char *pktio_name;
-
-/** Run time in seconds */
-int run_time_sec;
-
-/** PID of the master process */
-int master_pid;
-
-/* helper funcs */
-void parse_args(int argc, char *argv[]);
-void print_info(char *progname);
-void usage(char *progname);
-
-/**
- * Create a ipc pktio handle.
- *
- * @param pool Pool to associate with device for packet RX/TX
- * @param master_pid Pid of master process
- *
- * @return The handle of the created pktio object.
- * @retval ODP_PKTIO_INVALID if the create fails.
- */
-odp_pktio_t create_pktio(odp_pool_t pool, int master_pid);
-
-/** Spin and send all packet from table
- *
- * @param pktio pktio device
- * @param pkt_tbl packets table
- * @param num number of packets
- */
-int ipc_odp_packet_send_or_free(odp_pktio_t pktio,
- odp_packet_t pkt_tbl[],
- int num);
diff --git a/test/linux-generic/pktio_ipc/pktio_ipc1.c b/test/linux-generic/pktio_ipc/pktio_ipc1.c
deleted file mode 100644
index 705c205db..000000000
--- a/test/linux-generic/pktio_ipc/pktio_ipc1.c
+++ /dev/null
@@ -1,355 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "ipc_common.h"
-
-/**
- * @file
- * @example pktio_ipc1.c ODP IPC example application.
- * This application works in pair with pktio_ipc2 application.
- * It opens ipc pktio, allocates packets, sets magic number and
- * sends packets to ipc pktio. Then app reads packets and checks
- * that magic number was properly updated and there is no packet
- * loss (i.e. sequesce counter continiusly incrementing.)
- */
-
-/**
- * Packet IO loopback worker thread using bursts from/to IO resources
- *
- * @param arg thread arguments of type 'thread_args_t *'
- */
-static int pktio_run_loop(odp_pool_t pool)
-{
- int pkts;
- odp_pktio_t ipc_pktio = ODP_PKTIO_INVALID;
- odp_packet_t pkt_tbl[MAX_PKT_BURST];
- uint64_t cnt = 0; /* increasing counter on each send packet */
- uint64_t cnt_recv = 0; /* increasing counter to validate
- cnt on receive */
- uint64_t stat_pkts = 0;
- uint64_t stat_pkts_alloc = 0;
- uint64_t stat_pkts_prev = 0;
- uint64_t stat_errors = 0;
- uint64_t stat_free = 0;
- odp_time_t start_cycle;
- odp_time_t current_cycle;
- odp_time_t cycle;
- odp_time_t diff;
- odp_time_t wait;
- int ret;
- odp_pktin_queue_t pktin;
- char name[30];
-
- if (master_pid)
- sprintf(name, TEST_IPC_PKTIO_PID_NAME, master_pid);
- else
- sprintf(name, TEST_IPC_PKTIO_NAME);
-
- wait = odp_time_local_from_ns(run_time_sec * ODP_TIME_SEC_IN_NS);
- start_cycle = odp_time_local();
- current_cycle = start_cycle;
-
- for (;;) {
- if (run_time_sec) {
- cycle = odp_time_local();
- diff = odp_time_diff(cycle, start_cycle);
- if (odp_time_cmp(wait, diff) < 0) {
- printf("timeout exit, run_time_sec %d\n",
- run_time_sec);
- return -1;
- }
- }
-
- ipc_pktio = create_pktio(pool, master_pid);
- if (ipc_pktio != ODP_PKTIO_INVALID)
- break;
- if (!master_pid)
- break;
- }
-
- if (ipc_pktio == ODP_PKTIO_INVALID)
- return -1;
-
- if (odp_pktin_queue(ipc_pktio, &pktin, 1) != 1) {
- EXAMPLE_ERR("no input queue\n");
- return -1;
- }
-
- /* start ipc pktio, i.e. wait until other process connects */
- for (;;) {
- if (run_time_sec) {
- cycle = odp_time_local();
- diff = odp_time_diff(cycle, start_cycle);
- if (odp_time_cmp(wait, diff) < 0) {
- printf("timeout exit, run_time_sec %d\n",
- run_time_sec);
- goto exit;
- }
- }
-
- ret = odp_pktio_start(ipc_pktio);
- if (!ret)
- break;
- }
-
- /* packets loop */
- for (;;) {
- int i;
-
- /* 1. exit loop if time specified */
- if (run_time_sec) {
- cycle = odp_time_local();
- diff = odp_time_diff(cycle, start_cycle);
- if (odp_time_cmp(wait, diff) < 0) {
- EXAMPLE_DBG("exit after %d seconds\n",
- run_time_sec);
- break;
- }
- }
-
- /* 2. Receive packets back from ipc_pktio, validate magic
- * number sequence counter and free that packet
- */
- while (1) {
- pkts = odp_pktin_recv(pktin, pkt_tbl, MAX_PKT_BURST);
- if (pkts <= 0)
- break;
-
- for (i = 0; i < pkts; i++) {
- odp_packet_t pkt = pkt_tbl[i];
- pkt_head_t head;
- pkt_tail_t tail;
- size_t off;
-
- off = odp_packet_l4_offset(pkt);
- if (off == ODP_PACKET_OFFSET_INVALID) {
- stat_errors++;
- stat_free++;
- odp_packet_free(pkt);
- EXAMPLE_ERR("invalid l4 offset\n");
- }
-
- off += ODPH_UDPHDR_LEN;
- ret = odp_packet_copy_to_mem(pkt, off,
- sizeof(head),
- &head);
- if (ret) {
- stat_errors++;
- stat_free++;
- odp_packet_free(pkt);
- EXAMPLE_DBG("error\n");
- continue;
- }
-
- if (head.magic == TEST_ALLOC_MAGIC) {
- stat_free++;
- odp_packet_free(pkt);
- continue;
- }
-
- if (head.magic != TEST_SEQ_MAGIC_2) {
- stat_errors++;
- stat_free++;
- odp_packet_free(pkt);
- EXAMPLE_DBG("error\n");
- continue;
- }
-
- off = odp_packet_len(pkt) - sizeof(pkt_tail_t);
- ret = odp_packet_copy_to_mem(pkt, off,
- sizeof(tail),
- &tail);
- if (ret) {
- stat_errors++;
- stat_free++;
- odp_packet_free(pkt);
- continue;
- }
-
- if (tail.magic != TEST_SEQ_MAGIC) {
- stat_errors++;
- stat_free++;
- odp_packet_free(pkt);
- continue;
- }
-
- cnt_recv++;
-
- if (head.seq != cnt_recv) {
- stat_errors++;
- odp_packet_free(pkt);
- EXAMPLE_DBG("head.seq %d - "
- "cnt_recv %" PRIu64 ""
- " = %" PRIu64 "\n",
- head.seq, cnt_recv,
- head.seq - cnt_recv);
- cnt_recv = head.seq;
- stat_errors++;
- stat_free++;
- continue;
- }
-
- stat_pkts++;
- odp_packet_free(pkt);
- }
- }
-
- /* 3. emulate that pkts packets were received */
- odp_random_data((uint8_t *)&pkts, sizeof(pkts), 0);
- pkts = ((pkts & 0xffff) % MAX_PKT_BURST) + 1;
-
- for (i = 0; i < pkts; i++) {
- odp_packet_t pkt;
-
- pkt = odp_packet_alloc(pool, SHM_PKT_POOL_BUF_SIZE);
- if (pkt == ODP_PACKET_INVALID)
- break;
-
- stat_pkts_alloc++;
- odp_packet_l4_offset_set(pkt, 30);
- pkt_tbl[i] = pkt;
- }
-
- /* exit if no packets allocated */
- if (i == 0) {
- EXAMPLE_DBG("unable to alloc packet pkts %d/%d\n",
- i, pkts);
- break;
- }
-
- pkts = i;
-
- /* 4. Copy counter and magic numbers to that packets */
- for (i = 0; i < pkts; i++) {
- pkt_head_t head;
- pkt_tail_t tail;
- size_t off;
- odp_packet_t pkt = pkt_tbl[i];
-
- off = odp_packet_l4_offset(pkt);
- if (off == ODP_PACKET_OFFSET_INVALID)
- EXAMPLE_ABORT("packet L4 offset not set");
-
- head.magic = TEST_SEQ_MAGIC;
- head.seq = cnt++;
-
- off += ODPH_UDPHDR_LEN;
- ret = odp_packet_copy_from_mem(pkt, off, sizeof(head),
- &head);
- if (ret)
- EXAMPLE_ABORT("unable to copy in head data");
-
- tail.magic = TEST_SEQ_MAGIC;
- off = odp_packet_len(pkt) - sizeof(pkt_tail_t);
- ret = odp_packet_copy_from_mem(pkt, off, sizeof(tail),
- &tail);
- if (ret)
- EXAMPLE_ABORT("unable to copy in tail data");
- }
-
- /* 5. Send packets to ipc_pktio */
- ret = ipc_odp_packet_send_or_free(ipc_pktio, pkt_tbl, pkts);
- if (ret < 0) {
- EXAMPLE_DBG("unable to sending to ipc pktio\n");
- break;
- }
-
- cycle = odp_time_local();
- diff = odp_time_diff(cycle, current_cycle);
- if (odp_time_cmp(odp_time_local_from_ns(ODP_TIME_SEC_IN_NS),
- diff) < 0) {
- current_cycle = cycle;
- printf("\rpkts: %" PRIu64 ", alloc %" PRIu64 ","
- " errors %" PRIu64 ", pps %" PRIu64 ","
- " free %" PRIu64 ".",
- stat_pkts, stat_pkts_alloc, stat_errors,
- (stat_pkts + stat_pkts_alloc - stat_pkts_prev),
- stat_free);
- fflush(stdout);
- stat_pkts_prev = stat_pkts + stat_pkts_alloc;
- }
- }
-
- /* cleanup and exit */
- ret = odp_pktio_stop(ipc_pktio);
- if (ret) {
- EXAMPLE_DBG("odp_pktio_stop error %d\n", ret);
- return -1;
- }
-
-exit:
- ret = odp_pktio_close(ipc_pktio);
- if (ret) {
- EXAMPLE_DBG("odp_pktio_close error %d\n", ret);
- return -1;
- }
-
- return (stat_errors > 10 || stat_pkts < 1000) ? -1 : 0;
-}
-
-/**
- * ODP packet example main function
- */
-int main(int argc, char *argv[])
-{
- odp_pool_t pool;
- odp_pool_param_t params;
- odp_instance_t instance;
- int ret;
-
- /* Parse and store the application arguments */
- parse_args(argc, argv);
-
- /* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
- EXAMPLE_ERR("Error: ODP global init failed.\n");
- exit(EXIT_FAILURE);
- }
-
- /* Init this thread */
- if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
- EXAMPLE_ERR("Error: ODP local init failed.\n");
- exit(EXIT_FAILURE);
- }
-
- /* Print both system and application information */
- print_info(NO_PATH(argv[0]));
-
- /* Create packet pool */
- memset(&params, 0, sizeof(params));
- params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE;
- params.pkt.len = SHM_PKT_POOL_BUF_SIZE;
- params.pkt.num = SHM_PKT_POOL_SIZE;
- params.type = ODP_POOL_PACKET;
-
- pool = odp_pool_create(TEST_IPC_POOL_NAME, &params);
- if (pool == ODP_POOL_INVALID) {
- EXAMPLE_ERR("Error: packet pool create failed.\n");
- exit(EXIT_FAILURE);
- }
-
- odp_pool_print(pool);
-
- ret = pktio_run_loop(pool);
-
- if (odp_pool_destroy(pool)) {
- EXAMPLE_ERR("Error: odp_pool_destroy() failed.\n");
- exit(EXIT_FAILURE);
- }
-
- if (odp_term_local()) {
- EXAMPLE_ERR("Error: odp_term_local() failed.\n");
- exit(EXIT_FAILURE);
- }
-
- if (odp_term_global(instance)) {
- EXAMPLE_ERR("Error: odp_term_global() failed.\n");
- exit(EXIT_FAILURE);
- }
-
- EXAMPLE_DBG("return %d\n", ret);
- return ret;
-}
diff --git a/test/linux-generic/pktio_ipc/pktio_ipc2.c b/test/linux-generic/pktio_ipc/pktio_ipc2.c
deleted file mode 100644
index daf384137..000000000
--- a/test/linux-generic/pktio_ipc/pktio_ipc2.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * @example pktio_ipc2.c ODP IPC example application.
- * This application works in pair with pktio_ipc1 application.
- * It opens ipc pktio, reads packets and updates magic number.
- * Also it allocates some packets from internal pool and sends
- * to ipc pktio.
- */
-
-#include "ipc_common.h"
-
-static int ipc_second_process(int master_pid)
-{
- odp_pktio_t ipc_pktio = ODP_PKTIO_INVALID;
- odp_pool_param_t params;
- odp_pool_t pool;
- odp_packet_t pkt_tbl[MAX_PKT_BURST];
- odp_packet_t alloc_pkt;
- int pkts;
- int ret;
- int i;
- odp_time_t start_cycle;
- odp_time_t cycle;
- odp_time_t diff;
- odp_time_t wait;
- uint64_t stat_pkts = 0;
- odp_pktin_queue_t pktin;
-
- /* Create packet pool */
- memset(&params, 0, sizeof(params));
- params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE;
- params.pkt.len = SHM_PKT_POOL_BUF_SIZE;
- params.pkt.num = SHM_PKT_POOL_SIZE;
- params.type = ODP_POOL_PACKET;
-
- pool = odp_pool_create(TEST_IPC_POOL_NAME, &params);
- if (pool == ODP_POOL_INVALID) {
- EXAMPLE_ERR("Error: packet pool create failed.\n");
- exit(EXIT_FAILURE);
- }
-
- wait = odp_time_local_from_ns(run_time_sec * ODP_TIME_SEC_IN_NS);
- start_cycle = odp_time_local();
-
- for (;;) {
- /* exit loop if time specified */
- if (run_time_sec) {
- cycle = odp_time_local();
- diff = odp_time_diff(cycle, start_cycle);
- if (odp_time_cmp(wait, diff) < 0) {
- printf("timeout exit, run_time_sec %d\n",
- run_time_sec);
- goto not_started;
- }
- }
-
- ipc_pktio = create_pktio(pool, master_pid);
- if (ipc_pktio != ODP_PKTIO_INVALID)
- break;
- if (!master_pid)
- break;
- }
-
- if (ipc_pktio == ODP_PKTIO_INVALID) {
- odp_pool_destroy(pool);
- return -1;
- }
-
- if (odp_pktin_queue(ipc_pktio, &pktin, 1) != 1) {
- odp_pool_destroy(pool);
- EXAMPLE_ERR("no input queue\n");
- return -1;
- }
-
- /* start ipc pktio, i.e. wait until other process connects */
- for (;;) {
- /* 1. exit loop if time specified */
- if (run_time_sec) {
- cycle = odp_time_local();
- diff = odp_time_diff(cycle, start_cycle);
- if (odp_time_cmp(wait, diff) < 0) {
- printf("timeout exit, run_time_sec %d\n",
- run_time_sec);
- goto not_started;
- }
- }
-
- ret = odp_pktio_start(ipc_pktio);
- if (!ret)
- break;
- }
-
- for (;;) {
- /* exit loop if time specified */
- if (run_time_sec) {
- cycle = odp_time_local();
- diff = odp_time_diff(cycle, start_cycle);
- if (odp_time_cmp(wait, diff) < 0) {
- EXAMPLE_DBG("exit after %d seconds\n",
- run_time_sec);
- break;
- }
- }
-
- /* recv some packets and change MAGIC to MAGIC_2 */
- pkts = odp_pktin_recv(pktin, pkt_tbl, MAX_PKT_BURST);
- if (pkts <= 0)
- continue;
-
- for (i = 0; i < pkts; i++) {
- odp_packet_t pkt = pkt_tbl[i];
- pkt_head_t head;
- size_t off;
-
- off = odp_packet_l4_offset(pkt);
- if (off == ODP_PACKET_OFFSET_INVALID) {
- EXAMPLE_ERR("invalid l4 offset\n");
- for (int j = i; j < pkts; j++)
- odp_packet_free(pkt_tbl[j]);
- break;
- }
-
- off += ODPH_UDPHDR_LEN;
- ret = odp_packet_copy_to_mem(pkt, off, sizeof(head),
- &head);
- if (ret)
- EXAMPLE_ABORT("unable copy out head data");
-
- if (head.magic != TEST_SEQ_MAGIC) {
- EXAMPLE_ERR("Wrong head magic! %x", head.magic);
- for (int j = i; j < pkts; j++)
- odp_packet_free(pkt_tbl[j]);
- break;
- }
-
- /* Modify magic number in packet */
- head.magic = TEST_SEQ_MAGIC_2;
- ret = odp_packet_copy_from_mem(pkt, off, sizeof(head),
- &head);
- if (ret)
- EXAMPLE_ABORT("unable to copy in head data");
- }
-
- /* send all packets back */
- ret = ipc_odp_packet_send_or_free(ipc_pktio, pkt_tbl, i);
- if (ret < 0)
- EXAMPLE_ABORT("can not send packets\n");
-
- stat_pkts += ret;
-
- /* alloc packet from local pool, set magic to ALLOC_MAGIC,
- * and send it.*/
- alloc_pkt = odp_packet_alloc(pool, SHM_PKT_POOL_BUF_SIZE);
- if (alloc_pkt != ODP_PACKET_INVALID) {
- pkt_head_t head;
- size_t off;
-
- odp_packet_l4_offset_set(alloc_pkt, 30);
-
- head.magic = TEST_ALLOC_MAGIC;
-
- off = odp_packet_l4_offset(alloc_pkt);
- off += ODPH_UDPHDR_LEN;
- ret = odp_packet_copy_from_mem(alloc_pkt, off,
- sizeof(head),
- &head);
- if (ret)
- EXAMPLE_ABORT("unable to copy in head data");
-
- pkt_tbl[0] = alloc_pkt;
- ret = ipc_odp_packet_send_or_free(ipc_pktio,
- pkt_tbl, 1);
- if (ret < 0)
- EXAMPLE_ABORT("can not send packets\n");
- stat_pkts += 1;
- }
- }
-
- /* cleanup and exit */
- ret = odp_pktio_stop(ipc_pktio);
- if (ret) {
- EXAMPLE_DBG("ipc2: odp_pktio_stop error %d\n", ret);
- return -1;
- }
-
-not_started:
- ret = odp_pktio_close(ipc_pktio);
- if (ret) {
- EXAMPLE_DBG("ipc2: odp_pktio_close error %d\n", ret);
- return -1;
- }
-
- ret = odp_pool_destroy(pool);
- if (ret)
- EXAMPLE_DBG("ipc2: pool_destroy error %d\n", ret);
-
- return stat_pkts > 1000 ? 0 : -1;
-}
-
-int main(int argc, char *argv[])
-{
- odp_instance_t instance;
- int ret;
-
- /* Parse and store the application arguments */
- parse_args(argc, argv);
-
- if (odp_init_global(&instance, NULL, NULL)) {
- EXAMPLE_ERR("Error: ODP global init failed.\n");
- exit(EXIT_FAILURE);
- }
-
- /* Init this thread */
- if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
- EXAMPLE_ERR("Error: ODP local init failed.\n");
- exit(EXIT_FAILURE);
- }
-
- ret = ipc_second_process(master_pid);
-
- if (odp_term_local()) {
- EXAMPLE_ERR("Error: odp_term_local() failed.\n");
- exit(EXIT_FAILURE);
- }
-
- if (odp_term_global(instance)) {
- EXAMPLE_ERR("Error: odp_term_global() failed.\n");
- exit(EXIT_FAILURE);
- }
-
- return ret;
-}
diff --git a/test/linux-generic/pktio_ipc/pktio_ipc_run.sh b/test/linux-generic/pktio_ipc/pktio_ipc_run.sh
deleted file mode 100755
index 52e8d42a0..000000000
--- a/test/linux-generic/pktio_ipc/pktio_ipc_run.sh
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2015, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-# directories where test binary can be found:
-# -in the validation dir when running make check (intree or out of tree)
-# -in the script directory, when running after 'make install', or
-# -in the validation when running standalone (./pktio_ipc_run) intree.
-# -in the current directory.
-# running stand alone out of tree requires setting PATH
-PATH=./pktio_ipc:$PATH
-PATH=$(dirname $0):$PATH
-PATH=$(dirname $0)/../../../../platform/linux-generic/test/pktio_ipc:$PATH
-PATH=.:$PATH
-
-run()
-{
- local ret=0
- #if test was interrupted with CTRL+c than files
- #might remain in shm. Needed cleanely delete them.
- rm -rf /tmp/odp-* 2>&1 > /dev/null
-
- echo "==== run pktio_ipc1 then pktio_ipc2 ===="
- pktio_ipc1${EXEEXT} -t 10 &
- IPC_PID=$!
-
- pktio_ipc2${EXEEXT} -p ${IPC_PID} -t 5
- ret=$?
- # pktio_ipc1 should do clean up and exit just
- # after pktio_ipc2 exited. If it does not happen
- # kill him in test.
- sleep 13
- (kill ${IPC_PID} 2>&1 > /dev/null ) > /dev/null
- if [ $? -eq 0 ]; then
- echo "pktio_ipc1${EXEEXT} was killed"
- ls -l /tmp/odp* 2> /dev/null
- rm -rf /tmp/odp-${IPC_PID}* 2>&1 > /dev/null
- else
- echo "normal exit of 2 application"
- ls -l /tmp/odp* 2> /dev/null
- fi
-
- if [ $ret -ne 0 ]; then
- echo "!!!First stage FAILED $ret!!!"
- exit $ret
- else
- echo "First stage PASSED"
- fi
-
- echo "==== run pktio_ipc2 then pktio_ipc1 ===="
- pktio_ipc2${EXEEXT} -t 10 &
- IPC_PID=$!
-
- pktio_ipc1${EXEEXT} -p ${IPC_PID} -t 5
- ret=$?
- # pktio_ipc2 do not exit on pktio_ipc1 disconnect
- # wait until it exits cleanly
- sleep 13
- (kill ${IPC_PID} 2>&1 > /dev/null ) > /dev/null
- if [ $? -eq 0 ]; then
- echo "pktio_ipc2${EXEEXT} was killed"
- ls -l /tmp/odp* 2> /dev/null
- rm -rf /tmp/odp-${IPC_PID}* 2>&1 > /dev/null
- else
- echo "normal exit of 2 application"
- ls -l /tmp/odp* 2> /dev/null
- fi
-
- if [ $ret -ne 0 ]; then
- echo "!!! FAILED !!!"
- ls -l /tmp/odp* 2> /dev/null
- rm -rf /tmp/odp-${IPC_PID}* 2>&1 > /dev/null
- exit $ret
- else
- ls -l /tmp/odp* 2> /dev/null
- echo "Second stage PASSED"
- fi
-
- echo "!!!PASSED!!!"
- exit 0
-}
-
-case "$1" in
- *) run ;;
-esac
diff --git a/test/linux-generic/ring/.gitignore b/test/linux-generic/ring/.gitignore
deleted file mode 100644
index 7341a340c..000000000
--- a/test/linux-generic/ring/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-ring_main
diff --git a/test/linux-generic/ring/Makefile.am b/test/linux-generic/ring/Makefile.am
deleted file mode 100644
index c08658482..000000000
--- a/test/linux-generic/ring/Makefile.am
+++ /dev/null
@@ -1,14 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestring.la
-libtestring_la_SOURCES = ring_suites.c ring_basic.c ring_stress.c
-libtestring_la_CFLAGS = $(AM_CFLAGS) $(INCCUNIT_COMMON) $(INCODP)
-
-test_PROGRAMS = ring_main$(EXEEXT)
-dist_ring_main_SOURCES = ring_main.c
-
-ring_main_LDFLAGS = $(AM_LDFLAGS)
-ring_main_LDADD = libtestring.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-noinst_HEADERS = ring_suites.h
-
diff --git a/test/linux-generic/ring/ring_basic.c b/test/linux-generic/ring/ring_basic.c
deleted file mode 100644
index 926dc465d..000000000
--- a/test/linux-generic/ring/ring_basic.c
+++ /dev/null
@@ -1,361 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP ring basic test
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#include <test_debug.h>
-#include <odp_cunit_common.h>
-#include <odp_packet_io_ring_internal.h>
-
-#include "ring_suites.h"
-
-/* labor functions declaration */
-static void __do_basic_burst(_ring_t *r);
-static void __do_basic_bulk(_ring_t *r);
-static void __do_basic_watermark(_ring_t *r);
-
-/* dummy object pointers for enqueue and dequeue testing */
-static void **test_enq_data;
-static void **test_deq_data;
-
-/* create two rings: one for single thread usage scenario
- * and another for multiple thread usage scenario.
- * st - single thread usage scenario
- * mt - multiple thread usage scenario
- */
-static const char *st_ring_name = "ST basic ring";
-static const char *mt_ring_name = "MT basic ring";
-static _ring_t *st_ring, *mt_ring;
-
-int ring_test_basic_start(void)
-{
- int i = 0;
-
- /* alloc dummy object pointers for enqueue testing */
- test_enq_data = malloc(RING_SIZE * 2 * sizeof(void *));
- if (NULL == test_enq_data) {
- LOG_ERR("failed to allocate basic test enqeue data\n");
- return -1;
- }
-
- for (i = 0; i < RING_SIZE * 2; i++)
- test_enq_data[i] = (void *)(unsigned long)i;
-
- /* alloc dummy object pointers for dequeue testing */
- test_deq_data = malloc(RING_SIZE * 2 * sizeof(void *));
- if (NULL == test_deq_data) {
- LOG_ERR("failed to allocate basic test dequeue data\n");
- free(test_enq_data); test_enq_data = NULL;
- return -1;
- }
-
- memset(test_deq_data, 0, RING_SIZE * 2 * sizeof(void *));
- return 0;
-}
-
-int ring_test_basic_end(void)
-{
- _ring_destroy(st_ring_name);
- _ring_destroy(mt_ring_name);
-
- free(test_enq_data);
- free(test_deq_data);
- return 0;
-}
-
-/* basic test cases */
-void ring_test_basic_create(void)
-{
- /* prove illegal size shall fail */
- st_ring = _ring_create(st_ring_name, ILLEGAL_SIZE, 0);
- CU_ASSERT(NULL == st_ring);
- CU_ASSERT(EINVAL == __odp_errno);
-
- /* create ring for single thread usage scenario */
- st_ring = _ring_create(st_ring_name, RING_SIZE,
- _RING_F_SP_ENQ | _RING_F_SC_DEQ);
-
- CU_ASSERT(NULL != st_ring);
- CU_ASSERT(_ring_lookup(st_ring_name) == st_ring);
-
- /* create ring for multiple thread usage scenario */
- mt_ring = _ring_create(mt_ring_name, RING_SIZE,
- _RING_SHM_PROC);
-
- CU_ASSERT(NULL != mt_ring);
- CU_ASSERT(_ring_lookup(mt_ring_name) == mt_ring);
-}
-
-void ring_test_basic_burst(void)
-{
- /* two rounds to cover both single
- * thread and multiple thread APIs
- */
- __do_basic_burst(st_ring);
- __do_basic_burst(mt_ring);
-}
-
-void ring_test_basic_bulk(void)
-{
- __do_basic_bulk(st_ring);
- __do_basic_bulk(mt_ring);
-}
-
-void ring_test_basic_watermark(void)
-{
- __do_basic_watermark(st_ring);
- __do_basic_watermark(mt_ring);
-}
-
-/* labor functions definition */
-static void __do_basic_burst(_ring_t *r)
-{
- int result = 0;
- unsigned int count = 0;
- void * const *source = test_enq_data;
- void * const *dest = test_deq_data;
- void **enq = NULL, **deq = NULL;
-
- enq = test_enq_data; deq = test_deq_data;
-
- /* ring is empty */
- CU_ASSERT(1 == _ring_empty(r));
-
- /* enqueue 1 object */
- result = _ring_enqueue_burst(r, enq, 1);
- enq += 1;
- CU_ASSERT(1 == (result & _RING_SZ_MASK));
-
- /* enqueue 2 objects */
- result = _ring_enqueue_burst(r, enq, 2);
- enq += 2;
- CU_ASSERT(2 == (result & _RING_SZ_MASK));
-
- /* enqueue HALF_BULK objects */
- result = _ring_enqueue_burst(r, enq, HALF_BULK);
- enq += HALF_BULK;
- CU_ASSERT(HALF_BULK == (result & _RING_SZ_MASK));
-
- /* ring is neither empty nor full */
- CU_ASSERT(0 == _ring_full(r));
- CU_ASSERT(0 == _ring_empty(r));
-
- /* _ring_count() equals enqueued */
- count = (1 + 2 + HALF_BULK);
- CU_ASSERT(count == _ring_count(r));
- /* _ring_free_count() equals rooms left */
- count = (RING_SIZE - 1) - count;
- CU_ASSERT(count == _ring_free_count(r));
-
- /* exceed the size, enquene as many as possible */
- result = _ring_enqueue_burst(r, enq, HALF_BULK);
- enq += count;
- CU_ASSERT(count == (result & _RING_SZ_MASK));
- CU_ASSERT(1 == _ring_full(r));
-
- /* dequeue 1 object */
- result = _ring_dequeue_burst(r, deq, 1);
- deq += 1;
- CU_ASSERT(1 == (result & _RING_SZ_MASK));
-
- /* dequeue 2 objects */
- result = _ring_dequeue_burst(r, deq, 2);
- deq += 2;
- CU_ASSERT(2 == (result & _RING_SZ_MASK));
-
- /* dequeue HALF_BULK objects */
- result = _ring_dequeue_burst(r, deq, HALF_BULK);
- deq += HALF_BULK;
- CU_ASSERT(HALF_BULK == (result & _RING_SZ_MASK));
-
- /* _ring_free_count() equals dequeued */
- count = (1 + 2 + HALF_BULK);
- CU_ASSERT(count == _ring_free_count(r));
- /* _ring_count() equals remained left */
- count = (RING_SIZE - 1) - count;
- CU_ASSERT(count == _ring_count(r));
-
- /* underrun the size, dequeue as many as possible */
- result = _ring_dequeue_burst(r, deq, HALF_BULK);
- deq += count;
- CU_ASSERT(count == (result & _RING_SZ_MASK));
- CU_ASSERT(1 == _ring_empty(r));
-
- /* check data */
- CU_ASSERT(0 == memcmp(source, dest, deq - dest));
-
- /* reset dequeue data */
- memset(test_deq_data, 0, RING_SIZE * 2 * sizeof(void *));
-}
-
-/* incomplete ring API set: strange!
- * complement _ring_enqueue/dequeue_bulk to improve coverage
- */
-static inline int __ring_enqueue_bulk(
- _ring_t *r, void * const *objects, unsigned bulk)
-{
- if (r->prod.sp_enqueue)
- return _ring_sp_enqueue_bulk(r, objects, bulk);
- else
- return _ring_mp_enqueue_bulk(r, objects, bulk);
-}
-
-static inline int __ring_dequeue_bulk(
- _ring_t *r, void **objects, unsigned bulk)
-{
- if (r->cons.sc_dequeue)
- return _ring_sc_dequeue_bulk(r, objects, bulk);
- else
- return _ring_mc_dequeue_bulk(r, objects, bulk);
-}
-
-static void __do_basic_bulk(_ring_t *r)
-{
- int result = 0;
- unsigned int count = 0;
- void * const *source = test_enq_data;
- void * const *dest = test_deq_data;
- void **enq = NULL, **deq = NULL;
-
- enq = test_enq_data; deq = test_deq_data;
-
- /* ring is empty */
- CU_ASSERT(1 == _ring_empty(r));
-
- /* enqueue 1 object */
- result = __ring_enqueue_bulk(r, enq, 1);
- enq += 1;
- CU_ASSERT(0 == result);
-
- /* enqueue 2 objects */
- result = __ring_enqueue_bulk(r, enq, 2);
- enq += 2;
- CU_ASSERT(0 == result);
-
- /* enqueue HALF_BULK objects */
- result = __ring_enqueue_bulk(r, enq, HALF_BULK);
- enq += HALF_BULK;
- CU_ASSERT(0 == result);
-
- /* ring is neither empty nor full */
- CU_ASSERT(0 == _ring_full(r));
- CU_ASSERT(0 == _ring_empty(r));
-
- /* _ring_count() equals enqueued */
- count = (1 + 2 + HALF_BULK);
- CU_ASSERT(count == _ring_count(r));
- /* _ring_free_count() equals rooms left */
- count = (RING_SIZE - 1) - count;
- CU_ASSERT(count == _ring_free_count(r));
-
- /* exceed the size, enquene shall fail with -ENOBUFS */
- result = __ring_enqueue_bulk(r, enq, HALF_BULK);
- CU_ASSERT(-ENOBUFS == result);
-
- /* fullful the ring */
- result = __ring_enqueue_bulk(r, enq, count);
- enq += count;
- CU_ASSERT(0 == result);
- CU_ASSERT(1 == _ring_full(r));
-
- /* dequeue 1 object */
- result = __ring_dequeue_bulk(r, deq, 1);
- deq += 1;
- CU_ASSERT(0 == result);
-
- /* dequeue 2 objects */
- result = __ring_dequeue_bulk(r, deq, 2);
- deq += 2;
- CU_ASSERT(0 == result);
-
- /* dequeue HALF_BULK objects */
- result = __ring_dequeue_bulk(r, deq, HALF_BULK);
- deq += HALF_BULK;
- CU_ASSERT(0 == result);
-
- /* _ring_free_count() equals dequeued */
- count = (1 + 2 + HALF_BULK);
- CU_ASSERT(count == _ring_free_count(r));
- /* _ring_count() equals remained left */
- count = (RING_SIZE - 1) - count;
- CU_ASSERT(count == _ring_count(r));
-
- /* underrun the size, dequeue shall fail with -ENOENT */
- result = __ring_dequeue_bulk(r, deq, HALF_BULK);
- CU_ASSERT(-ENOENT == result);
-
- /* empty the queue */
- result = __ring_dequeue_bulk(r, deq, count);
- deq += count;
- CU_ASSERT(0 == result);
- CU_ASSERT(1 == _ring_empty(r));
-
- /* check data */
- CU_ASSERT(0 == memcmp(source, dest, deq - dest));
-
- /* reset dequeue data */
- memset(test_deq_data, 0, RING_SIZE * 2 * sizeof(void *));
-}
-
-void __do_basic_watermark(_ring_t *r)
-{
- int result = 0;
- void * const *source = test_enq_data;
- void * const *dest = test_deq_data;
- void **enq = NULL, **deq = NULL;
-
- enq = test_enq_data; deq = test_deq_data;
-
- /* bulk = 3/4 watermark to trigger alarm on 2nd enqueue */
- const unsigned watermark = PIECE_BULK;
- const unsigned bulk = (watermark / 4) * 3;
-
- /* watermark cannot exceed ring size */
- result = _ring_set_water_mark(r, ILLEGAL_SIZE);
- CU_ASSERT(-EINVAL == result);
-
- /* set watermark */
- result = _ring_set_water_mark(r, watermark);
- CU_ASSERT(0 == result);
-
- /* 1st enqueue shall succeed */
- result = __ring_enqueue_bulk(r, enq, bulk);
- enq += bulk;
- CU_ASSERT(0 == result);
-
- /* 2nd enqueue shall succeed but return -EDQUOT */
- result = __ring_enqueue_bulk(r, enq, bulk);
- enq += bulk;
- CU_ASSERT(-EDQUOT == result);
-
- /* dequeue 1st bulk */
- result = __ring_dequeue_bulk(r, deq, bulk);
- deq += bulk;
- CU_ASSERT(0 == result);
-
- /* dequeue 2nd bulk */
- result = __ring_dequeue_bulk(r, deq, bulk);
- deq += bulk;
- CU_ASSERT(0 == result);
-
- /* check data */
- CU_ASSERT(0 == memcmp(source, dest, deq - dest));
-
- /* reset watermark */
- result = _ring_set_water_mark(r, 0);
- CU_ASSERT(0 == result);
-
- /* reset dequeue data */
- memset(test_deq_data, 0, RING_SIZE * 2 * sizeof(void *));
-}
diff --git a/test/linux-generic/ring/ring_main.c b/test/linux-generic/ring/ring_main.c
deleted file mode 100644
index 715268843..000000000
--- a/test/linux-generic/ring/ring_main.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "ring_suites.h"
-
-int main(int argc, char *argv[])
-{
- return ring_suites_main(argc, argv);
-}
diff --git a/test/linux-generic/ring/ring_stress.c b/test/linux-generic/ring/ring_stress.c
deleted file mode 100644
index b6ddb34e3..000000000
--- a/test/linux-generic/ring/ring_stress.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP ring stress test
- */
-
-#define _GNU_SOURCE
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <odp_api.h>
-#include <odp/helper/odph_api.h>
-#include <odp_packet_io_ring_internal.h>
-#include <test_debug.h>
-#include <odp_cunit_common.h>
-
-#include "ring_suites.h"
-
-/* There's even number of producer and consumer threads and each thread does
- * this many successful enq or deq operations */
-#define NUM_BULK_OP ((RING_SIZE / PIECE_BULK) * 100)
-
-/*
- * Since cunit framework cannot work with multi-threading, ask workers
- * to save their results for delayed assertion after thread collection.
- */
-static int worker_results[MAX_WORKERS];
-
-/*
- * Note : make sure that both enqueue and dequeue
- * operation starts at same time so to avoid data corruption
- * Its because atomic lock will protect only indexes, but if order of
- * read or write operation incorrect then data mismatch will happen
- * So its resposibility of application develop to take care of order of
- * data read or write.
- */
-typedef enum {
- STRESS_1_1_PRODUCER_CONSUMER,
- STRESS_1_N_PRODUCER_CONSUMER,
- STRESS_N_1_PRODUCER_CONSUMER,
- STRESS_N_M_PRODUCER_CONSUMER
-} stress_case_t;
-
-/* worker function declarations */
-static int stress_worker(void *_data);
-
-/* global name for later look up in workers' context */
-static const char *ring_name = "stress_ring";
-
-/* barrier to run threads at the same time */
-static odp_barrier_t barrier;
-
-int ring_test_stress_start(void)
-{
- _ring_t *r_stress = NULL;
-
- /* multiple thread usage scenario, thread or process sharable */
- r_stress = _ring_create(ring_name, RING_SIZE, _RING_SHM_PROC);
- if (r_stress == NULL) {
- LOG_ERR("create ring failed for stress.\n");
- return -1;
- }
-
- return 0;
-}
-
-int ring_test_stress_end(void)
-{
- _ring_destroy(ring_name);
- return 0;
-}
-
-void ring_test_stress_1_1_producer_consumer(void)
-{
- int i = 0;
- odp_cpumask_t cpus;
- pthrd_arg worker_param;
-
- /* reset results for delayed assertion */
- memset(worker_results, 0, sizeof(worker_results));
-
- /* request 2 threads to run 1:1 stress */
- worker_param.numthrds = odp_cpumask_default_worker(&cpus, 2);
- worker_param.testcase = STRESS_1_1_PRODUCER_CONSUMER;
-
- /* not failure, insufficient resource */
- if (worker_param.numthrds < 2) {
- LOG_ERR("insufficient cpu for 1:1 "
- "producer/consumer stress.\n");
- return;
- }
-
- odp_barrier_init(&barrier, 2);
-
- /* kick the workers */
- odp_cunit_thread_create(stress_worker, &worker_param);
-
- /* collect the results */
- odp_cunit_thread_exit(&worker_param);
-
- /* delayed assertion due to cunit limitation */
- for (i = 0; i < worker_param.numthrds; i++)
- CU_ASSERT(0 == worker_results[i]);
-}
-
-void ring_test_stress_N_M_producer_consumer(void)
-{
- int i = 0;
- odp_cpumask_t cpus;
- pthrd_arg worker_param;
-
- /* reset results for delayed assertion */
- memset(worker_results, 0, sizeof(worker_results));
-
- /* request MAX_WORKERS threads to run N:M stress */
- worker_param.numthrds =
- odp_cpumask_default_worker(&cpus, MAX_WORKERS);
- worker_param.testcase = STRESS_N_M_PRODUCER_CONSUMER;
-
- /* not failure, insufficient resource */
- if (worker_param.numthrds < 3) {
- LOG_ERR("insufficient cpu for N:M "
- "producer/consumer stress.\n");
- return;
- }
-
- /* force even number of threads */
- if (worker_param.numthrds & 0x1)
- worker_param.numthrds -= 1;
-
- odp_barrier_init(&barrier, worker_param.numthrds);
-
- /* kick the workers */
- odp_cunit_thread_create(stress_worker, &worker_param);
-
- /* collect the results */
- odp_cunit_thread_exit(&worker_param);
-
- /* delayed assertion due to cunit limitation */
- for (i = 0; i < worker_param.numthrds; i++)
- CU_ASSERT(0 == worker_results[i]);
-}
-
-void ring_test_stress_1_N_producer_consumer(void)
-{
-}
-
-void ring_test_stress_N_1_producer_consumer(void)
-{
-}
-
-void ring_test_stress_ring_list_dump(void)
-{
- /* improve code coverage */
- _ring_list_dump();
-}
-
-/* worker function for multiple producer instances */
-static int do_producer(_ring_t *r)
-{
- void *enq[PIECE_BULK];
- int i;
- int num = NUM_BULK_OP;
-
- /* data pattern to be evaluated later in consumer */
- for (i = 0; i < PIECE_BULK; i++)
- enq[i] = (void *)(uintptr_t)i;
-
- while (num)
- if (_ring_mp_enqueue_bulk(r, enq, PIECE_BULK) == 0)
- num--;
-
- return 0;
-}
-
-/* worker function for multiple consumer instances */
-static int do_consumer(_ring_t *r)
-{
- void *deq[PIECE_BULK];
- int i;
- int num = NUM_BULK_OP;
-
- while (num) {
- if (_ring_mc_dequeue_bulk(r, deq, PIECE_BULK) == 0) {
- num--;
-
- /* evaluate the data pattern */
- for (i = 0; i < PIECE_BULK; i++)
- CU_ASSERT(deq[i] == (void *)(uintptr_t)i);
- }
- }
-
- return 0;
-}
-
-static int stress_worker(void *_data)
-{
- pthrd_arg *worker_param = (pthrd_arg *)_data;
- _ring_t *r_stress = NULL;
- int *result = NULL;
- int worker_id = odp_thread_id();
-
- /* save the worker result for delayed assertion */
- result = &worker_results[(worker_id % worker_param->numthrds)];
-
- /* verify ring lookup in worker context */
- r_stress = _ring_lookup(ring_name);
- if (NULL == r_stress) {
- LOG_ERR("ring lookup %s not found\n", ring_name);
- return (*result = -1);
- }
-
- odp_barrier_wait(&barrier);
-
- switch (worker_param->testcase) {
- case STRESS_1_1_PRODUCER_CONSUMER:
- case STRESS_N_M_PRODUCER_CONSUMER:
- /* interleaved producer/consumer */
- if (0 == (worker_id % 2))
- *result = do_producer(r_stress);
- else if (1 == (worker_id % 2))
- *result = do_consumer(r_stress);
- break;
- case STRESS_1_N_PRODUCER_CONSUMER:
- case STRESS_N_1_PRODUCER_CONSUMER:
- default:
- LOG_ERR("invalid or not-implemented stress type (%d)\n",
- worker_param->testcase);
- break;
- }
-
- odp_barrier_wait(&barrier);
-
- return 0;
-}
diff --git a/test/linux-generic/ring/ring_suites.c b/test/linux-generic/ring/ring_suites.c
deleted file mode 100644
index f321a762a..000000000
--- a/test/linux-generic/ring/ring_suites.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#include <odp_api.h>
-#include <test_debug.h>
-#include <odp_cunit_common.h>
-#include <odp_packet_io_ring_internal.h>
-
-#include "ring_suites.h"
-
-static int ring_suites_init(odp_instance_t *inst)
-{
- if (0 != odp_init_global(inst, NULL, NULL)) {
- LOG_ERR("error: odp_init_global() failed.\n");
- return -1;
- }
- if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
- LOG_ERR("error: odp_init_local() failed.\n");
- return -1;
- }
-
- _ring_tailq_init();
- return 0;
-}
-
-static odp_testinfo_t ring_suite_basic[] = {
- ODP_TEST_INFO(ring_test_basic_create),
- ODP_TEST_INFO(ring_test_basic_burst),
- ODP_TEST_INFO(ring_test_basic_bulk),
- ODP_TEST_INFO(ring_test_basic_watermark),
- ODP_TEST_INFO_NULL,
-};
-
-static odp_testinfo_t ring_suite_stress[] = {
- ODP_TEST_INFO(ring_test_stress_1_1_producer_consumer),
- ODP_TEST_INFO(ring_test_stress_1_N_producer_consumer),
- ODP_TEST_INFO(ring_test_stress_N_1_producer_consumer),
- ODP_TEST_INFO(ring_test_stress_N_M_producer_consumer),
- ODP_TEST_INFO(ring_test_stress_ring_list_dump),
- ODP_TEST_INFO_NULL,
-};
-
-static odp_suiteinfo_t ring_suites[] = {
- {"ring basic", ring_test_basic_start,
- ring_test_basic_end, ring_suite_basic},
- {"ring stress", ring_test_stress_start,
- ring_test_stress_end, ring_suite_stress},
- ODP_SUITE_INFO_NULL
-};
-
-int ring_suites_main(int argc, char *argv[])
-{
- int ret;
-
- /* let helper collect its own arguments (e.g. --odph_proc) */
- if (odp_cunit_parse_options(argc, argv))
- return -1;
-
- odp_cunit_register_global_init(ring_suites_init);
-
- ret = odp_cunit_register(ring_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/linux-generic/ring/ring_suites.h b/test/linux-generic/ring/ring_suites.h
deleted file mode 100644
index 5fa5b9c52..000000000
--- a/test/linux-generic/ring/ring_suites.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#define RING_SIZE 4096
-#define PIECE_BULK 32
-
-#define HALF_BULK (RING_SIZE >> 1)
-#define ILLEGAL_SIZE (RING_SIZE | 0x3)
-
-/* test suite start and stop */
-int ring_test_basic_start(void);
-int ring_test_basic_end(void);
-
-/* basic test cases */
-void ring_test_basic_create(void);
-void ring_test_basic_burst(void);
-void ring_test_basic_bulk(void);
-void ring_test_basic_watermark(void);
-
-/* test suite start and stop */
-int ring_test_stress_start(void);
-int ring_test_stress_end(void);
-
-/* stress test cases */
-void ring_test_stress_1_1_producer_consumer(void);
-void ring_test_stress_1_N_producer_consumer(void);
-void ring_test_stress_N_1_producer_consumer(void);
-void ring_test_stress_N_M_producer_consumer(void);
-void ring_test_stress_ring_list_dump(void);
-
-int ring_suites_main(int argc, char *argv[]);
diff --git a/test/linux-generic/run-test b/test/linux-generic/run-test
deleted file mode 100755
index 2bff651cc..000000000
--- a/test/linux-generic/run-test
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/bin/bash
-#
-# Run the ODP test applications and report status in a format that
-# matches the automake "make check" output.
-#
-# The list of tests to be run is obtained by sourcing a file that
-# contains an environment variable in the form;
-#
-# TEST="test_app1 test_app2"
-#
-# The default behaviour is to run all the tests defined in files
-# named tests-*.env in the same directory as this script, but a single
-# test definition file can be specified using the TEST_DEF environment
-# variable.
-#
-# Test definition files may optionally also specify a LOG_COMPILER
-# which will be invoked as a wrapper to each of the test application
-# (as per automake).
-#
-TDIR=$(dirname $(readlink -f $0))
-PASS=0
-FAIL=0
-SKIP=0
-res=0
-
-if [ "$V" != "0" ]; then
- verbose=1
-else
- verbose=0
- mkdir -p logs
-fi
-
-do_run_tests() {
- source $1
-
- for tc in $TESTS; do
- tc=$(basename $tc)
- if [ "$verbose" = "0" ]; then
- logfile=logs/${tc}.log
- touch $logfile || logfile=/dev/null
- $LOG_COMPILER $TDIR/$tc > $logfile 2>&1
- else
- $LOG_COMPILER $TDIR/$tc
- fi
-
- tres=$?
- case $tres in
- 0) echo "PASS: $tc"; let PASS=$PASS+1 ;;
- 77) echo "SKIP: $tc"; let SKIP=$SKIP+1 ;;
- *) echo "FAIL: $tc"; let FAIL=$FAIL+1; res=1 ;;
- esac
- done
-}
-
-if [ "$TEST_DEFS" != "" -a -f "$TEST_DEFS" ]; then
- do_run_tests $TEST_DEFS
-elif [ "$1" != "" ]; then
- do_run_tests $TDIR/tests-${1}.env
-else
- for tenv in $TDIR/tests-*.env; do
- do_run_tests $tenv
- done
-fi
-
-echo "TEST RESULT: $PASS tests passed, $SKIP skipped, $FAIL failed"
-
-exit $res
diff --git a/test/linux-generic/validation/Makefile.inc b/test/linux-generic/validation/Makefile.inc
deleted file mode 100644
index cf1dedb9f..000000000
--- a/test/linux-generic/validation/Makefile.inc
+++ /dev/null
@@ -1 +0,0 @@
-include $(top_srcdir)/test/linux-generic/Makefile.inc
diff --git a/test/linux-generic/validation/api/Makefile.inc b/test/linux-generic/validation/api/Makefile.inc
deleted file mode 100644
index 19c9448c0..000000000
--- a/test/linux-generic/validation/api/Makefile.inc
+++ /dev/null
@@ -1 +0,0 @@
-include $(top_srcdir)/test/linux-generic/validation/Makefile.inc
diff --git a/test/linux-generic/validation/api/pktio/.gitignore b/test/linux-generic/validation/api/pktio/.gitignore
deleted file mode 100644
index 7e563b8b3..000000000
--- a/test/linux-generic/validation/api/pktio/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*.log
-*.trs
diff --git a/test/linux-generic/validation/api/pktio/Makefile.am b/test/linux-generic/validation/api/pktio/Makefile.am
deleted file mode 100644
index 4a1434397..000000000
--- a/test/linux-generic/validation/api/pktio/Makefile.am
+++ /dev/null
@@ -1,15 +0,0 @@
-dist_check_SCRIPTS = pktio_env \
- pktio_run.sh \
- pktio_run_tap.sh
-
-if HAVE_PCAP
-dist_check_SCRIPTS += pktio_run_pcap.sh
-endif
-if netmap_support
-dist_check_SCRIPTS += pktio_run_netmap.sh
-endif
-if PKTIO_DPDK
-dist_check_SCRIPTS += pktio_run_dpdk.sh
-endif
-
-test_SCRIPTS = $(dist_check_SCRIPTS)
diff --git a/test/linux-generic/validation/api/pktio/pktio_env b/test/linux-generic/validation/api/pktio/pktio_env
deleted file mode 100644
index 345b5bd56..000000000
--- a/test/linux-generic/validation/api/pktio/pktio_env
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2015, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-# Test script wrapper for running ODP pktio apps on linux-generic.
-#
-# For linux-generic the default behavior is to create two pairs of
-# virtual Ethernet interfaces and provide the names of these via
-# environment variables to pktio apps, the interfaces will be removed
-# before the script exits.
-#
-# Note that the creation of virtual Ethernet devices depends on having
-# CONFIG_VETH enabled in the kernel, if not enabled the env setup will be skipped.
-#
-# Network set up
-# IF0 <---> IF1
-# IF2 <---> IF3
-IF0=pktiop0p1
-IF1=pktiop1p0
-IF2=pktiop2p3
-IF3=pktiop3p2
-
-if [ "$0" = "$BASH_SOURCE" ]; then
- echo "Error: Platform specific env file has to be sourced."
-fi
-
-check_for_root()
-{
- if [ "$(id -u)" != "0" ]; then
- echo "check_for_root(): need to be root to setup VETH"
- return 1
- fi
- return 0
-}
-
-# wait for a network interface's operational state to be "up"
-wait_for_iface_up()
-{
- iface=$1
- cnt=0
-
- while [ $cnt -lt 50 ]; do
- read operstate < /sys/class/net/$iface/operstate
-
- if [ $? -ne 0 ]; then
- break
- elif [ "$operstate" = "up" ]; then
- return 0
- fi
-
- sleep 0.1
- cnt=`expr $cnt + 1`
- done
-
- return 1
-}
-
-setup_pktio_env()
-{
- echo "pktio: setting up test interfaces $IF0, $IF1, $IF2, $IF3."
-
- check_for_root
- if [ $? -ne 0 ]; then
- return 1
- fi
-
- for iface in $IF0 $IF1 $IF2 $IF3; do
- ip link show $iface 2> /dev/null
- if [ $? -eq 0 ]; then
- echo "pktio: interface $iface already exist $?"
- return 2
- fi
- done
-
- if [ "$1" = "clean" ]; then
- trap cleanup_pktio_env EXIT
- fi
-
- ip link add $IF0 type veth peer name $IF1
- if [ $? -ne 0 ]; then
- echo "pktio: error: unable to create veth pair"
- return 3
- fi
- ip link add $IF2 type veth peer name $IF3
- if [ $? -ne 0 ]; then
- echo "pktio: error: unable to create veth pair"
- return 4
- fi
-
- for iface in $IF0 $IF1 $IF2 $IF3; do
- ip link set $iface mtu 9216 up
- ifconfig $iface -arp
- done
-
- # check that the interface has come up before starting the test
- for iface in $IF0 $IF1 $IF2 $IF3; do
- wait_for_iface_up $iface
- if [ $? -ne 0 ]; then
- echo "pktio: interface $iface failed to come up"
- return 5
- fi
- done
-}
-
-cleanup_pktio_env()
-{
- echo "pktio: removing test interfaces $IF0, $IF1, $IF2, $IF3"
- check_for_root
- if [ $? -ne 0 ]; then
- return 1
- fi
-
- for iface in $IF0 $IF1 $IF2 $IF3; do
- ip link del $iface 2> /dev/null
- done
- return 0
-}
diff --git a/test/linux-generic/validation/api/pktio/pktio_run.sh b/test/linux-generic/validation/api/pktio/pktio_run.sh
deleted file mode 100755
index e8b0f936f..000000000
--- a/test/linux-generic/validation/api/pktio/pktio_run.sh
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2015, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-# Proceed the pktio tests. This script expects at least one argument:
-# setup) setup the pktio test environment
-# cleanup) cleanup the pktio test environment
-# run) run the pktio tests (setup, run, cleanup)
-# extra arguments are passed unchanged to the test itself (pktio_main)
-# Without arguments, "run" is assumed and no extra argument is passed to the
-# test (legacy mode).
-#
-
-# directories where pktio_main binary can be found:
-# -in the validation dir when running make check (intree or out of tree)
-# -in the script directory, when running after 'make install', or
-# -in the validation when running standalone (./pktio_run) intree.
-# -in the current directory.
-# running stand alone out of tree requires setting PATH
-PATH=${TEST_DIR}/api/pktio:$PATH
-PATH=$(dirname $0):$PATH
-PATH=$(dirname $0)/../../../../common_plat/validation/api/pktio:$PATH
-PATH=.:$PATH
-
-pktio_main_path=$(which pktio_main${EXEEXT})
-if [ -x "$pktio_main_path" ] ; then
- echo "running with pktio_main: $pktio_run_path"
-else
- echo "cannot find pktio_main: please set you PATH for it."
-fi
-
-# directory where platform test sources are, including scripts
-TEST_SRC_DIR=$(dirname $0)
-
-# exit codes expected by automake for skipped tests
-TEST_SKIPPED=77
-
-# Use installed pktio env or for make check take it from platform directory
-if [ -f "./pktio_env" ]; then
- . ./pktio_env
-elif [ -f ${TEST_SRC_DIR}/pktio_env ]; then
- . ${TEST_SRC_DIR}/pktio_env
-else
- echo "BUG: unable to find pktio_env!"
- echo "pktio_env has to be in current directory or in platform/\$ODP_PLATFORM/test."
- echo "ODP_PLATFORM=\"$ODP_PLATFORM\""
- exit 1
-fi
-
-run_test()
-{
- local ret=0
-
- # environment variables are used to control which socket method is
- # used, so try each combination to ensure decent coverage.
- for distype in MMAP MMSG; do
- unset ODP_PKTIO_DISABLE_SOCKET_${distype}
- done
-
- # this script doesn't support testing with netmap
- export ODP_PKTIO_DISABLE_NETMAP=y
-
- for distype in SKIP MMAP; do
- if [ "$disabletype" != "SKIP" ]; then
- export ODP_PKTIO_DISABLE_SOCKET_${distype}=y
- fi
- pktio_main${EXEEXT} $*
- if [ $? -ne 0 ]; then
- ret=1
- fi
- done
-
- if [ $ret -ne 0 ]; then
- echo "!!! FAILED !!!"
- fi
-
- return $ret
-}
-
-run()
-{
- echo "pktio: using 'loop' device"
- pktio_main${EXEEXT} $*
- loop_ret=$?
-
- # need to be root to run tests with real interfaces
- if [ "$(id -u)" != "0" ]; then
- exit $ret
- fi
-
- if [ "$ODP_PKTIO_IF0" = "" ]; then
- # no interfaces specified, use default veth interfaces
- # setup by the pktio_env script
- setup_pktio_env clean
- if [ $? != 0 ]; then
- echo "Failed to setup test environment, skipping test."
- exit $TEST_SKIPPED
- fi
- export ODP_PKTIO_IF0=$IF0
- export ODP_PKTIO_IF1=$IF1
- fi
-
- run_test
- ret=$?
-
- [ $ret = 0 ] && ret=$loop_ret
-
- exit $ret
-}
-
-if [ $# != 0 ]; then
- action=$1
- shift
-fi
-
-case "$action" in
- setup) setup_pktio_env ;;
- cleanup) cleanup_pktio_env ;;
- run) run ;;
- *) run ;;
-esac
diff --git a/test/linux-generic/validation/api/pktio/pktio_run_dpdk.sh b/test/linux-generic/validation/api/pktio/pktio_run_dpdk.sh
deleted file mode 100755
index fa46fa430..000000000
--- a/test/linux-generic/validation/api/pktio/pktio_run_dpdk.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2016, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-# Proceed the pktio tests. This script expects at least one argument:
-# setup) setup the pktio test environment
-# cleanup) cleanup the pktio test environment
-# run) run the pktio tests (setup, run, cleanup)
-# extra arguments are passed unchanged to the test itself (pktio_main)
-# Without arguments, "run" is assumed and no extra argument is passed to the
-# test (legacy mode).
-#
-
-# directories where pktio_main binary can be found:
-# -in the validation dir when running make check (intree or out of tree)
-# -in the script directory, when running after 'make install', or
-# -in the validation when running standalone (./pktio_run) intree.
-# -in the current directory.
-# running stand alone out of tree requires setting PATH
-PATH=${TEST_DIR}/api/pktio:$PATH
-PATH=$(dirname $0):$PATH
-PATH=$(dirname $0)/../../../../common_plat/validation/api/pktio:$PATH
-PATH=.:$PATH
-
-pktio_main_path=$(which pktio_main${EXEEXT})
-if [ -x "$pktio_main_path" ] ; then
- echo "running with pktio_main: $pktio_run_path"
-else
- echo "cannot find pktio_main: please set you PATH for it."
-fi
-
-# directory where platform test sources are, including scripts
-TEST_SRC_DIR=$(dirname $0)
-
-# exit codes expected by automake for skipped tests
-TEST_SKIPPED=77
-
-# Use installed pktio env or for make check take it from platform directory
-if [ -f "./pktio_env" ]; then
- . ./pktio_env
-elif [ -f ${TEST_SRC_DIR}/pktio_env ]; then
- . ${TEST_SRC_DIR}/pktio_env
-else
- echo "BUG: unable to find pktio_env!"
- echo "pktio_env has to be in current directory or in platform/\$ODP_PLATFORM/test."
- echo "ODP_PLATFORM=\"$ODP_PLATFORM\""
- exit 1
-fi
-
-run_test()
-{
- local ret=0
-
- pktio_main${EXEEXT} $*
- ret=$?
- if [ $ret -ne 0 ]; then
- echo "!!! FAILED !!!"
- fi
-
- exit $ret
-}
-
-run()
-{
- # need to be root to set the interface.
- if [ "$(id -u)" != "0" ]; then
- echo "pktio: need to be root to setup DPDK interfaces."
- return $TEST_SKIPPED
- fi
-
- if [ "$ODP_PKTIO_IF0" = "" ]; then
- setup_pktio_env clean
- export ODP_PKTIO_DPDK_PARAMS="--vdev eth_pcap0,iface=$IF0 --vdev eth_pcap1,iface=$IF1"
- export ODP_PKTIO_IF0=0
- export ODP_PKTIO_IF1=1
- fi
-
- run_test
-}
-
-if [ $# != 0 ]; then
- action=$1
- shift
-fi
-
-case "$1" in
- setup) setup_pktio_env ;;
- cleanup) cleanup_pktio_env ;;
- run) run ;;
- *) run ;;
-esac
diff --git a/test/linux-generic/validation/api/pktio/pktio_run_netmap.sh b/test/linux-generic/validation/api/pktio/pktio_run_netmap.sh
deleted file mode 100755
index 7dde7ae1c..000000000
--- a/test/linux-generic/validation/api/pktio/pktio_run_netmap.sh
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2016, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-# any parameter passed as arguments to this script is passed unchanged to
-# the test itself (pktio_main)
-
-# directories where pktio_main binary can be found:
-# -in the validation dir when running make check (intree or out of tree)
-# -in the script directory, when running after 'make install', or
-# -in the validation when running standalone (./pktio_run) intree.
-# -in the current directory.
-# running stand alone out of tree requires setting PATH
-PATH=${TEST_DIR}/api/pktio:$PATH
-PATH=$(dirname $0):$PATH
-PATH=$(dirname $0)/../../../../common_plat/validation/api/pktio:$PATH
-PATH=.:$PATH
-
-pktio_main_path=$(which pktio_main${EXEEXT})
-if [ -x "$pktio_main_path" ] ; then
- echo "running with pktio_main: $pktio_main_path"
-else
- echo "cannot find pktio_main: please set you PATH for it."
-fi
-
-# directory where platform test sources are, including scripts
-TEST_SRC_DIR=$(dirname $0)
-
-# exit codes expected by automake for skipped tests
-TEST_SKIPPED=77
-
-# Use installed pktio env or for make check take it from the test directory
-if [ -f "./pktio_env" ]; then
- . ./pktio_env
-elif [ -f ${TEST_SRC_DIR}/pktio_env ]; then
- . ${TEST_SRC_DIR}/pktio_env
-else
- echo "ERROR: unable to find pktio_env!"
- echo "pktio_env has to be in current directory or in ${TEST_SRC_DIR}"
- exit 1
-fi
-
-run_test()
-{
- local ret=0
-
- pktio_main${EXEEXT} $*
- ret=$?
-
- if [ $ret -ne 0 ]; then
- echo "!!! FAILED !!!"
- fi
-
- return $ret
-}
-
-run_test_vale()
-{
- # use two vale ports on the same switch
- export ODP_PKTIO_IF0=valetest:0
- export ODP_PKTIO_IF1=valetest:1
- run_test
- return $?
-}
-
-run_test_pipe()
-{
- # use a netmap pipe
- export ODP_PKTIO_IF0=valetest:0{0
- export ODP_PKTIO_IF1=valetest:0}0
- run_test
- return $?
-}
-
-run_test_veth()
-{
- if [ "$(lsmod | grep veth)" = "" ]; then
- echo "netmap enabled veth module not loaded, skipping test."
- return 0
- fi
-
- setup_pktio_env clean
- export ODP_PKTIO_IF0=$IF0
- export ODP_PKTIO_IF1=$IF1
- run_test
- return $?
-}
-
-run()
-{
- local ret=0
-
- # need to be root to run these tests
- if [ "$(id -u)" != "0" ]; then
- echo "netmap tests must be run as root, skipping test."
- exit $TEST_SKIPPED
- fi
-
- if [ "$(lsmod | grep netmap)" = "" ]; then
- echo "netmap kernel module not loaded, skipping test."
- exit $TEST_SKIPPED
- fi
-
- if [ "$ODP_PKTIO_IF0" != "" ]; then
- run_test
- ret=$?
- else
- run_test_vale
- r=$?; [ $ret = 0 ] && ret=$r
- run_test_pipe
- r=$?; [ $ret = 0 ] && ret=$r
- run_test_veth
- r=$?; [ $ret = 0 ] && ret=$r
- fi
-
- exit $ret
-}
-
-run
diff --git a/test/linux-generic/validation/api/pktio/pktio_run_pcap.sh b/test/linux-generic/validation/api/pktio/pktio_run_pcap.sh
deleted file mode 100755
index b5b773548..000000000
--- a/test/linux-generic/validation/api/pktio/pktio_run_pcap.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2015, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-# any parameter passed as arguments to this script is passed unchanged to
-# the test itself (pktio_main)
-
-# directories where pktio_main binary can be found:
-# -in the validation dir when running make check (intree or out of tree)
-# -in the script directory, when running after 'make install', or
-# -in the validation when running standalone intree.
-# -in the current directory.
-# running stand alone out of tree requires setting PATH
-PATH=${TEST_DIR}/api/pktio:$PATH
-PATH=$(dirname $0):$PATH
-PATH=$(dirname $0)/../../../../common_plat/validation/api/pktio:$PATH
-PATH=.:$PATH
-
-pktio_main_path=$(which pktio_main${EXEEXT})
-if [ -x "$pktio_main_path" ] ; then
- echo "running with $pktio_main_path"
-else
- echo "cannot find pktio_main${EXEEXT}: please set you PATH for it."
-fi
-
-PCAP_FNAME=vald.pcap
-export ODP_PKTIO_IF0="pcap:out=${PCAP_FNAME}"
-export ODP_PKTIO_IF1="pcap:in=${PCAP_FNAME}"
-pktio_main${EXEEXT} $*
-ret=$?
-rm -f ${PCAP_FNAME}
-exit $ret
diff --git a/test/linux-generic/validation/api/pktio/pktio_run_tap.sh b/test/linux-generic/validation/api/pktio/pktio_run_tap.sh
deleted file mode 100755
index 89579ca68..000000000
--- a/test/linux-generic/validation/api/pktio/pktio_run_tap.sh
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2015, Ilya Maximets <i.maximets@samsung.com>
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-
-# any parameter passed as arguments to this script is passed unchanged to
-# the test itself (pktio_main)
-
-# directories where pktio_main binary can be found:
-# -in the validation dir when running make check (intree or out of tree)
-# -in the script directory, when running after 'make install', or
-# -in the validation when running standalone intree.
-# -in the current directory.
-# running stand alone out of tree requires setting PATH
-PATH=${TEST_DIR}/api/pktio:$PATH
-PATH=$(dirname $0):$PATH
-PATH=$(dirname $0)/../../../../common_plat/validation/api/pktio:$PATH
-PATH=.:$PATH
-
-pktio_main_path=$(which pktio_main${EXEEXT})
-if [ -x "$pktio_main_path" ] ; then
- echo "running with $pktio_main_path"
-else
- echo "cannot find pktio_main${EXEEXT}: please set you PATH for it."
-fi
-
-# exit code expected by automake for skipped tests
-TEST_SKIPPED=77
-
-TAP_BASE_NAME=iotap_vald
-IF0=${TAP_BASE_NAME}0
-IF1=${TAP_BASE_NAME}1
-BR=${TAP_BASE_NAME}_br
-
-export ODP_PKTIO_IF0="tap:$IF0"
-export ODP_PKTIO_IF1="tap:$IF1"
-
-tap_cleanup()
-{
- ret=$?
-
- for iface in $IF0 $IF1; do
- ip link set dev $iface nomaster
- done
-
- ip link delete $BR type bridge
-
- for iface in $IF0 $IF1; do
- ip tuntap del mode tap $iface
- done
-
- trap - EXIT
- exit $ret
-}
-
-tap_setup()
-{
- if [ "$(id -u)" != "0" ]; then
- echo "pktio: need to be root to setup TAP interfaces."
- return $TEST_SKIPPED
- fi
-
- for iface in $IF0 $IF1 $BR; do
- ip link show $iface 2> /dev/null
- if [ $? -eq 0 ]; then
- echo "pktio: interface $iface already exist $?"
- return 2
- fi
- done
-
- trap tap_cleanup EXIT
-
- for iface in $IF0 $IF1; do
- ip tuntap add mode tap $iface
- if [ $? -ne 0 ]; then
- echo "pktio: error: unable to create TAP device $iface"
- return 3
- fi
- done
-
- ip link add name $BR type bridge
- if [ $? -ne 0 ]; then
- echo "pktio: error: unable to create bridge $BR"
- return 3
- fi
-
- for iface in $IF0 $IF1; do
- ip link set dev $iface master $BR
- if [ $? -ne 0 ]; then
- echo "pktio: error: unable to add $iface to bridge $BR"
- return 4
- fi
- done
-
- for iface in $IF0 $IF1 $BR; do
- ifconfig $iface -arp
- sysctl -w net.ipv6.conf.${iface}.disable_ipv6=1
- ip link set dev $iface mtu 9216 up
- done
-
- return 0
-}
-
-tap_setup
-ret=$?
-if [ $ret -ne 0 ]; then
- echo "pktio: tap_setup() FAILED!"
- exit $TEST_SKIPPED
-fi
-
-# Using ODP_WAIT_FOR_NETWORK to prevent fail if tap still not enabled in bridge
-ODP_WAIT_FOR_NETWORK=yes pktio_main${EXEEXT} $*
-ret=$?
-
-exit $ret
diff --git a/test/linux-generic/validation/api/shmem/.gitignore b/test/linux-generic/validation/api/shmem/.gitignore
deleted file mode 100644
index 74195f576..000000000
--- a/test/linux-generic/validation/api/shmem/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-shmem_linux
-shmem_odp1
-shmem_odp2
diff --git a/test/linux-generic/validation/api/shmem/Makefile.am b/test/linux-generic/validation/api/shmem/Makefile.am
deleted file mode 100644
index b0ae62738..000000000
--- a/test/linux-generic/validation/api/shmem/Makefile.am
+++ /dev/null
@@ -1,28 +0,0 @@
-include ../Makefile.inc
-
-#the main test program is shmem_linux, which, in turn, starts a shmem_odp:
-test_PROGRAMS = shmem_linux$(EXEEXT)
-test_extra_PROGRAMS = shmem_odp1$(EXEEXT) shmem_odp2$(EXEEXT)
-test_extradir = $(testdir)
-
-#shmem_linux is stand alone, pure linux (no ODP):
-dist_shmem_linux_SOURCES = shmem_linux.c
-shmem_linux_LDFLAGS = $(AM_LDFLAGS) -lrt
-
-#shmem_odp1 and shmem_odp2 are the 2 ODP processes:
-dist_shmem_odp1_SOURCES = shmem_odp1.c
-shmem_odp1_CFLAGS = $(AM_CFLAGS) \
- $(INCCUNIT_COMMON) \
- $(INCODP)
-shmem_odp1_LDFLAGS = $(AM_LDFLAGS)
-shmem_odp1_LDADD = $(LIBCUNIT_COMMON) $(LIBODP)
-
-dist_shmem_odp2_SOURCES = shmem_odp2.c
-shmem_odp2_CFLAGS = $(AM_CFLAGS) \
- $(INCCUNIT_COMMON) \
- $(INCODP)
-shmem_odp2_LDFLAGS = $(AM_LDFLAGS)
-shmem_odp2_LDADD = $(LIBCUNIT_COMMON) $(LIBODP)
-
-
-noinst_HEADERS = shmem_common.h shmem_linux.h shmem_odp1.h shmem_odp2.h
diff --git a/test/linux-generic/validation/api/shmem/shmem.h b/test/linux-generic/validation/api/shmem/shmem.h
deleted file mode 100644
index 2368a2e1c..000000000
--- a/test/linux-generic/validation/api/shmem/shmem.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_LINUX_TEST_SHMEM_H_
-#define _ODP_LINUX_TEST_SHMEM_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void shmem_test_odp_shm_proc(void);
-
-/* test arrays: */
-extern odp_testinfo_t shmem_linux_suite[];
-
-/* test registry: */
-extern odp_suiteinfo_t shmem_linux_suites[];
-
-#endif
diff --git a/test/linux-generic/validation/api/shmem/shmem_common.h b/test/linux-generic/validation/api/shmem/shmem_common.h
deleted file mode 100644
index 16227ecd5..000000000
--- a/test/linux-generic/validation/api/shmem/shmem_common.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _COMMON_TEST_SHMEM_H_
-#define _COMMON_TEST_SHMEM_H_
-
-#define ODP_SHM_NAME "odp_linux_shared_mem"
-#define FIFO_NAME_FMT "/tmp/shmem_test_fifo-%d"
-#define ALIGN_SIZE (128)
-#define TEST_SHARE_FOO (0xf0f0f0f0)
-#define TEST_SHARE_BAR (0xf0f0f0f)
-#define TEST_FAILURE 'F'
-#define TEST_SUCCESS 'S'
-
-typedef struct {
- uint32_t foo;
- uint32_t bar;
-} test_shared_linux_data_t;
-
-#endif
diff --git a/test/linux-generic/validation/api/shmem/shmem_linux.c b/test/linux-generic/validation/api/shmem/shmem_linux.c
deleted file mode 100644
index 2f4c7628d..000000000
--- a/test/linux-generic/validation/api/shmem/shmem_linux.c
+++ /dev/null
@@ -1,299 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/* this test makes sure that odp shared memory created with the ODP_SHM_PROC
- * flag is visible under linux, and checks that memory created with the
- * ODP_SHM_EXPORT flag is visible by other ODP instances.
- * It therefore checks both that the link
- * name under /tmp is correct, and also checks that the memory contents
- * is indeed shared.
- * we want:
- * -the odp test to run using C UNIT
- * -the main process to return the correct return code.
- * (for the autotools test harness)
- *
- * To achieve this, the flow of operations is as follows:
- *
- * linux process (main, non odp) |
- * (shmem_linux.c) |
- * |
- * |
- * |
- * main() |
- * forks odp_app1 process |
- * wait for named pipe creation |
- * |
- * | ODP_APP1 process
- * | (shmem_odp1.c)
- * |
- * | allocate shmem
- * | populate shmem
- * | create named pipe
- * | wait for test report in fifo...
- * read shared memory |
- * check if memory contents is OK |
- * If not OK, write "F" in fifo and |
- * exit with failure code. | -------------------
- * |
- * forks odp app2 process | ODP APP2 process
- * wait for child terminaison & status| (shmem_odp2.c)
- * | lookup ODP_APP1 shared memory,
- * | check if memory contents is OK
- * | Exit(0) on success, exit(1) on fail
- * If child failed, write "F" in fifo |
- * exit with failure code. | -------------------
- * |
- * OK, write "S" in fifo, |
- * wait for child terminaison & status|
- * terminate with same status as child|
- * | ODP APP1 process
- * | (shmem_odp1.c)
- * |
- * | ...(continued)
- * | read S(success) or F(fail) from fifo
- * | report success or failure to C-Unit
- * | Exit(0) on success, exit(1) on fail
- * wait for child terminaison & status |
- * terminate with same status as child |
- * |
- * \|/
- * time
- */
-
-#include <stdint.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <string.h>
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/wait.h>
-#include <linux/limits.h>
-#include <stdio.h>
-#include <errno.h>
-#include <sys/mman.h>
-#include <libgen.h>
-#include <linux/limits.h>
-#include <inttypes.h>
-#include "shmem_linux.h"
-#include "shmem_common.h"
-
-#define ODP_APP1_NAME "shmem_odp1" /* name of the odp1 program, in this dir */
-#define ODP_APP2_NAME "shmem_odp2" /* name of the odp2 program, in this dir */
-#define DEVNAME_FMT "/tmp/odp-%" PRIu64 "-shm-%s" /* odp-<pid>-shm-<name> */
-#define MAX_FIFO_WAIT 30 /* Max time waiting for the fifo (sec) */
-
-/*
- * read the attributes of a externaly shared mem object:
- * input: ext_odp_pid, blockname: the remote ODP instance and the exported
- * block name to be searched.
- * Output: filename: the memory block underlaying file to be opened
- * (the given buffer should be big enough i.e. at
- * least ISHM_FILENAME_MAXLEN bytes)
- * The 3 following parameters are really here for debug
- * as they are really meaningles in a non-odp process:
- * len: the block real length (bytes, multiple of page sz)
- * flags: the _ishm flags setting the block was created with
- * align: the alignement setting the block was created with
- *
- * return 0 on success, non zero on error
- */
-static int read_shmem_attribues(uint64_t ext_odp_pid, const char *blockname,
- char *filename, uint64_t *len,
- uint32_t *flags, uint64_t *user_len,
- uint32_t *user_flags, uint32_t *align)
-{
- char shm_attr_filename[PATH_MAX];
- FILE *export_file;
-
- sprintf(shm_attr_filename, DEVNAME_FMT, ext_odp_pid, blockname);
-
- /* O_CREAT flag not given => failure if shm_attr_filename does not
- * already exist */
- export_file = fopen(shm_attr_filename, "r");
- if (export_file == NULL)
- return -1;
-
- if (fscanf(export_file, "ODP exported shm block info: ") != 0)
- goto export_file_read_err;
-
- if (fscanf(export_file, "ishm_blockname: %*s ") != 0)
- goto export_file_read_err;
-
- if (fscanf(export_file, "file: %s ", filename) != 1)
- goto export_file_read_err;
-
- if (fscanf(export_file, "length: %" PRIu64 " ", len) != 1)
- goto export_file_read_err;
-
- if (fscanf(export_file, "flags: %" PRIu32 " ", flags) != 1)
- goto export_file_read_err;
-
- if (fscanf(export_file, "user_length: %" PRIu64 " ", user_len) != 1)
- goto export_file_read_err;
-
- if (fscanf(export_file, "user_flags: %" PRIu32 " ", user_flags) != 1)
- goto export_file_read_err;
-
- if (fscanf(export_file, "align: %" PRIu32 " ", align) != 1)
- goto export_file_read_err;
-
- fclose(export_file);
- return 0;
-
-export_file_read_err:
- fclose(export_file);
- return -1;
-}
-
-void test_success(char *fifo_name, int fd, pid_t odp_app)
-{
- int status;
- int nb_char;
- char result = TEST_SUCCESS;
- /* write "Success" to the FIFO */
- nb_char = write(fd, &result, sizeof(char));
- close(fd);
- /* wait for the odp app1 to terminate */
- waitpid(odp_app, &status, 0);
- /* if the write failed, report an error anyway */
- if (nb_char != 1)
- status = 1;
- unlink(fifo_name);
- exit(status); /* the status reported by the odp side is returned */
-}
-
-void test_failure(char *fifo_name, int fd, pid_t odp_app)
-{
- int status;
- char result;
-
- int nb_char __attribute__((unused)); /*ignored: we fail anyway */
-
- result = TEST_FAILURE;
- /* write "Failure" to the FIFO */
- nb_char = write(fd, &result, sizeof(char));
- close(fd);
- /* wait for the odp app1 to terminate */
- waitpid(odp_app, &status, 0);
- unlink(fifo_name);
- exit(1); /* error */
-}
-
-int main(int argc __attribute__((unused)), char *argv[])
-{
- char prg_name[PATH_MAX];
- char odp_name1[PATH_MAX];
- char odp_name2[PATH_MAX];
- int nb_sec;
- int size;
- pid_t odp_app1;
- pid_t odp_app2;
- char *odp_params1 = NULL;
- char *odp_params2[3];
- char pid1[10];
- char fifo_name[PATH_MAX]; /* fifo for linux->odp feedback */
- int fifo_fd = -1;
- char shm_filename[PATH_MAX];/* shared mem device name, under /dev/shm */
- uint64_t len;
- uint32_t flags;
- uint64_t user_len;
- uint32_t user_flags;
- uint32_t align;
- int shm_fd;
- test_shared_linux_data_t *addr;
- int app2_status;
-
- /* odp_app1 is in the same directory as this file: */
- strncpy(prg_name, argv[0], PATH_MAX - 1);
- sprintf(odp_name1, "%s/%s", dirname(prg_name), ODP_APP1_NAME);
-
- /* start the ODP application: */
- odp_app1 = fork();
- if (odp_app1 < 0) /* error */
- exit(1);
-
- if (odp_app1 == 0) { /* child */
- execv(odp_name1, &odp_params1); /* no return unless error */
- fprintf(stderr, "execv failed: %s\n", strerror(errno));
- }
-
- /* wait max 30 sec for the fifo to be created by the ODP side.
- * Just die if time expire as there is no fifo to communicate
- * through... */
- sprintf(fifo_name, FIFO_NAME_FMT, odp_app1);
- for (nb_sec = 0; nb_sec < MAX_FIFO_WAIT; nb_sec++) {
- fifo_fd = open(fifo_name, O_WRONLY);
- if (fifo_fd >= 0)
- break;
- sleep(1);
- }
- if (fifo_fd < 0)
- exit(1);
- printf("pipe found\n");
-
- /* the linux named pipe has now been found, meaning that the
- * ODP application is up and running, and has allocated shmem.
- * check to see if linux can see the created shared memory: */
-
- /* read the shared memory attributes (includes the shm filename): */
- if (read_shmem_attribues(odp_app1, ODP_SHM_NAME,
- shm_filename, &len, &flags,
- &user_len, &user_flags, &align) != 0)
- test_failure(fifo_name, fifo_fd, odp_app1);
-
- /* open the shm filename (which is either on /tmp or on hugetlbfs)
- * O_CREAT flag not given => failure if shm_devname does not already
- * exist */
- shm_fd = open(shm_filename, O_RDONLY,
- S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
- if (shm_fd == -1)
- test_failure(fifo_name, fifo_fd, odp_app1); /* no return */
-
- /* linux ODP guarantees page size alignement. Larger alignment may
- * fail as 2 different processes will have fully unrelated
- * virtual spaces.
- */
- size = sizeof(test_shared_linux_data_t);
-
- addr = mmap(NULL, size, PROT_READ, MAP_SHARED, shm_fd, 0);
- if (addr == MAP_FAILED) {
- printf("shmem_linux: map failed!\n");
- test_failure(fifo_name, fifo_fd, odp_app1);
- }
-
- /* check that we see what the ODP application wrote in the memory */
- if ((addr->foo != TEST_SHARE_FOO) || (addr->bar != TEST_SHARE_BAR))
- test_failure(fifo_name, fifo_fd, odp_app1); /* no return */
-
- /* odp_app2 is in the same directory as this file: */
- strncpy(prg_name, argv[0], PATH_MAX - 1);
- sprintf(odp_name2, "%s/%s", dirname(prg_name), ODP_APP2_NAME);
-
- /* start the second ODP application with pid of ODP_APP1 as parameter:*/
- sprintf(pid1, "%d", odp_app1);
- odp_params2[0] = odp_name2;
- odp_params2[1] = pid1;
- odp_params2[2] = NULL;
- odp_app2 = fork();
- if (odp_app2 < 0) /* error */
- exit(1);
-
- if (odp_app2 == 0) { /* child */
- execv(odp_name2, odp_params2); /* no return unless error */
- fprintf(stderr, "execv failed: %s\n", strerror(errno));
- }
-
- /* wait for the second ODP application to terminate:
- * status is OK if that second ODP application could see the
- * memory shared by the first one. */
- waitpid(odp_app2, &app2_status, 0);
-
- if (app2_status)
- test_failure(fifo_name, fifo_fd, odp_app1); /* no return */
-
- /* everything looked good: */
- test_success(fifo_name, fifo_fd, odp_app1);
-}
diff --git a/test/linux-generic/validation/api/shmem/shmem_linux.h b/test/linux-generic/validation/api/shmem/shmem_linux.h
deleted file mode 100644
index a07a7758f..000000000
--- a/test/linux-generic/validation/api/shmem/shmem_linux.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-void test_success(char *fifo_name, int fd, pid_t odp_app);
-void test_failure(char *fifo_name, int fd, pid_t odp_app);
-int main(int argc, char *argv[]);
diff --git a/test/linux-generic/validation/api/shmem/shmem_odp1.c b/test/linux-generic/validation/api/shmem/shmem_odp1.c
deleted file mode 100644
index 3869c2e1c..000000000
--- a/test/linux-generic/validation/api/shmem/shmem_odp1.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp.h>
-#include <linux/limits.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-
-#include <odp_cunit_common.h>
-#include "shmem_odp1.h"
-#include "shmem_common.h"
-
-#define TEST_SHARE_FOO (0xf0f0f0f0)
-#define TEST_SHARE_BAR (0xf0f0f0f)
-
-void shmem_test_odp_shm_proc(void)
-{
- char fifo_name[PATH_MAX];
- int fd;
- odp_shm_t shm;
- test_shared_data_t *test_shared_data;
- char test_result;
-
- /* reminder: ODP_SHM_PROC => export to linux, ODP_SHM_EXPORT=>to odp */
- shm = odp_shm_reserve(ODP_SHM_NAME,
- sizeof(test_shared_data_t),
- ALIGN_SIZE, ODP_SHM_PROC | ODP_SHM_EXPORT);
- CU_ASSERT_FATAL(ODP_SHM_INVALID != shm);
- test_shared_data = odp_shm_addr(shm);
- CU_ASSERT_FATAL(NULL != test_shared_data);
- test_shared_data->foo = TEST_SHARE_FOO;
- test_shared_data->bar = TEST_SHARE_BAR;
-
- odp_mb_full();
-
- /* open the fifo: this will indicate to linux process that it can
- * start the shmem lookups and check if it sees the data */
- sprintf(fifo_name, FIFO_NAME_FMT, getpid());
- CU_ASSERT_FATAL(mkfifo(fifo_name, 0666) == 0);
-
- /* read from the fifo: the linux process result: */
- printf("shmem_odp1: opening fifo: %s\n", fifo_name);
- fd = open(fifo_name, O_RDONLY);
- CU_ASSERT_FATAL(fd >= 0);
-
- printf("shmem_odp1: reading fifo: %s\n", fifo_name);
- CU_ASSERT(read(fd, &test_result, sizeof(char)) == 1);
- printf("shmem_odp1: closing fifo: %s\n", fifo_name);
- close(fd);
- CU_ASSERT_FATAL(test_result == TEST_SUCCESS);
-
- CU_ASSERT(odp_shm_free(shm) == 0);
-}
-
-odp_testinfo_t shmem_suite[] = {
- ODP_TEST_INFO(shmem_test_odp_shm_proc),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t shmem_suites[] = {
- {"Shared Memory", NULL, NULL, shmem_suite},
- ODP_SUITE_INFO_NULL,
-};
-
-int main(void)
-{
- int ret = odp_cunit_register(shmem_suites);
-
- if (ret == 0)
- ret = odp_cunit_run();
-
- return ret;
-}
diff --git a/test/linux-generic/validation/api/shmem/shmem_odp1.h b/test/linux-generic/validation/api/shmem/shmem_odp1.h
deleted file mode 100644
index 614bbf805..000000000
--- a/test/linux-generic/validation/api/shmem/shmem_odp1.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-void shmem_test_odp_shm_proc(void);
diff --git a/test/linux-generic/validation/api/shmem/shmem_odp2.c b/test/linux-generic/validation/api/shmem/shmem_odp2.c
deleted file mode 100644
index 7d8c682b1..000000000
--- a/test/linux-generic/validation/api/shmem/shmem_odp2.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp.h>
-#include <linux/limits.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <stdlib.h>
-
-#include <odp_cunit_common.h>
-#include "shmem_odp2.h"
-#include "shmem_common.h"
-
-#define TEST_SHARE_FOO (0xf0f0f0f0)
-#define TEST_SHARE_BAR (0xf0f0f0f)
-
-/* The C unit test harness is run by ODP1 app which will be told the return
- * staus of this process. See top of shmem_linux.c for chart flow of events
- */
-int main(int argc, char *argv[])
-{
- odp_instance_t odp1;
- odp_instance_t odp2;
- odp_shm_t shm;
- odp_shm_info_t info;
- test_shared_data_t *test_shared_data;
-
- /* odp init: */
- if (0 != odp_init_global(&odp2, NULL, NULL)) {
- fprintf(stderr, "error: odp_init_global() failed.\n");
- return 1;
- }
- if (0 != odp_init_local(odp2, ODP_THREAD_CONTROL)) {
- fprintf(stderr, "error: odp_init_local() failed.\n");
- return 1;
- }
-
- /* test: map ODP1 memory and check its contents:
- * The pid of the ODP instantiation process sharing its memory
- * is given as first arg. In linux-generic ODP, this pid is actually
- * the ODP instance */
- if (argc != 2) {
- fprintf(stderr, "One single parameter expected, %d found.\n",
- argc);
- return 1;
- }
- odp1 = (odp_instance_t)atoi(argv[1]);
-
- printf("shmem_odp2: trying to grab %s from pid %d\n",
- ODP_SHM_NAME, (int)odp1);
- shm = odp_shm_import(ODP_SHM_NAME, odp1, ODP_SHM_NAME);
- if (shm == ODP_SHM_INVALID) {
- fprintf(stderr, "error: odp_shm_lookup_external failed.\n");
- return 1;
- }
-
- /* check that the read size matches the allocated size (in other ODP):*/
- if ((odp_shm_info(shm, &info)) ||
- (info.size != sizeof(*test_shared_data))) {
- fprintf(stderr, "error: odp_shm_info failed.\n");
- return 1;
- }
-
- test_shared_data = odp_shm_addr(shm);
- if (test_shared_data == NULL) {
- fprintf(stderr, "error: odp_shm_addr failed.\n");
- return 1;
- }
-
- if (test_shared_data->foo != TEST_SHARE_FOO) {
- fprintf(stderr, "error: Invalid data TEST_SHARE_FOO.\n");
- return 1;
- }
-
- if (test_shared_data->bar != TEST_SHARE_BAR) {
- fprintf(stderr, "error: Invalid data TEST_SHARE_BAR.\n");
- return 1;
- }
-
- if (odp_shm_free(shm) != 0) {
- fprintf(stderr, "error: odp_shm_free() failed.\n");
- return 1;
- }
-
- /* odp term: */
- if (0 != odp_term_local()) {
- fprintf(stderr, "error: odp_term_local() failed.\n");
- return 1;
- }
-
- if (0 != odp_term_global(odp2)) {
- fprintf(stderr, "error: odp_term_global() failed.\n");
- return 1;
- }
-
- return 0;
-}
diff --git a/test/linux-generic/validation/api/shmem/shmem_odp2.h b/test/linux-generic/validation/api/shmem/shmem_odp2.h
deleted file mode 100644
index a8db909a8..000000000
--- a/test/linux-generic/validation/api/shmem/shmem_odp2.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-int main(int argc, char *argv[]);
diff --git a/test/m4/configure.m4 b/test/m4/configure.m4
index 460e8449f..ea05e954f 100644
--- a/test/m4/configure.m4
+++ b/test/m4/configure.m4
@@ -1,3 +1,53 @@
-m4_include([test/common_plat/m4/configure.m4])
+##########################################################################
+# Build and install test applications
+##########################################################################
+AC_ARG_WITH([tests],
+ [AS_HELP_STRING([--without-tests],
+ [don't build and install test applications]
+ [[default=with]])],
+ [],
+ [with_tests=yes])
+AM_CONDITIONAL([WITH_TESTS], [test x$with_tests != xno])
-AC_CONFIG_FILES([test/Makefile])
+m4_include([test/m4/miscellaneous.m4])
+m4_include([test/m4/performance.m4])
+m4_include([test/m4/validation.m4])
+
+AC_CONFIG_FILES([test/common/Makefile
+ test/miscellaneous/Makefile
+ test/performance/Makefile
+ test/validation/Makefile
+ test/validation/api/align/Makefile
+ test/validation/api/atomic/Makefile
+ test/validation/api/barrier/Makefile
+ test/validation/api/buffer/Makefile
+ test/validation/api/byteorder/Makefile
+ test/validation/api/chksum/Makefile
+ test/validation/api/classification/Makefile
+ test/validation/api/comp/Makefile
+ test/validation/api/cpumask/Makefile
+ test/validation/api/crypto/Makefile
+ test/validation/api/dma/Makefile
+ test/validation/api/errno/Makefile
+ test/validation/api/event/Makefile
+ test/validation/api/hash/Makefile
+ test/validation/api/hints/Makefile
+ test/validation/api/init/Makefile
+ test/validation/api/ipsec/Makefile
+ test/validation/api/lock/Makefile
+ test/validation/api/Makefile
+ test/validation/api/ml/Makefile
+ test/validation/api/packet/Makefile
+ test/validation/api/pktio/Makefile
+ test/validation/api/pool/Makefile
+ test/validation/api/queue/Makefile
+ test/validation/api/random/Makefile
+ test/validation/api/scheduler/Makefile
+ test/validation/api/shmem/Makefile
+ test/validation/api/stash/Makefile
+ test/validation/api/std/Makefile
+ test/validation/api/system/Makefile
+ test/validation/api/thread/Makefile
+ test/validation/api/time/Makefile
+ test/validation/api/timer/Makefile
+ test/validation/api/traffic_mngr/Makefile])
diff --git a/test/m4/miscellaneous.m4 b/test/m4/miscellaneous.m4
new file mode 100644
index 000000000..62178c9f2
--- /dev/null
+++ b/test/m4/miscellaneous.m4
@@ -0,0 +1,23 @@
+##########################################################################
+# Enable/disable test-cpp
+##########################################################################
+AC_ARG_ENABLE([test-cpp],
+ [AS_HELP_STRING([--disable-test-cpp], [run basic test against cpp]
+ [[default=enable-if-cpp-works]])],
+ [test_cpp=$enableval],
+ [test_cpp=check])
+
+if test "x$test_cpp" != "xno" ; then
+ AC_CACHE_CHECK([if C++ compiler works], [odp_cv_cxx_works],
+ [AC_LANG_PUSH([C++])
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([])], [odp_cv_cxx_works=yes],
+ [odp_cv_cxx_works=no])
+ AC_LANG_POP([C++])])
+ AS_IF([test "x$test_cpp$odp_cv_cxx_works" = "xyesno"],
+ [AC_MSG_FAILURE([C++ compiler test failed])],
+ [test "x$test_cpp$odp_cv_cxx_works" = "xcheckno"],
+ [AC_MSG_NOTICE([disabling C++ test]) ; test_cpp=no],
+ [test_cpp=yes])
+fi
+
+AM_CONDITIONAL([test_cpp], [test x$test_cpp = xyes ])
diff --git a/test/m4/performance.m4 b/test/m4/performance.m4
new file mode 100644
index 000000000..fce0ffb4f
--- /dev/null
+++ b/test/m4/performance.m4
@@ -0,0 +1,9 @@
+##########################################################################
+# Enable/disable test-perf
+##########################################################################
+AC_ARG_ENABLE([test-perf],
+ [AS_HELP_STRING([--enable-test-perf],
+ [run test in test/performance [default=enabled]])],
+ [test_perf=$enableval],
+ [test_perf=yes])
+AM_CONDITIONAL([test_perf], [test x$test_perf = xyes ])
diff --git a/test/m4/validation.m4 b/test/m4/validation.m4
new file mode 100644
index 000000000..a90910008
--- /dev/null
+++ b/test/m4/validation.m4
@@ -0,0 +1,34 @@
+##########################################################################
+# Enable/disable Unit tests
+##########################################################################
+AC_ARG_ENABLE([test_vald],
+ [AS_HELP_STRING([--enable-test-vald],
+ [run test in test/validation [default=enabled]])],
+ [test_vald=$enableval],
+ [test_vald=check])
+
+##########################################################################
+# Check for CUnit availability
+##########################################################################
+cunit_support=$test_vald
+AS_IF([test "x$cunit_support" != "xno"],
+ [PKG_CHECK_MODULES([CUNIT], [cunit], [cunit_support=yes],
+ [AC_MSG_WARN([pkg-config could not find CUnit, guessing])
+ cunit_support=yes
+ AC_CHECK_HEADERS([CUnit/Basic.h], [], [cunit_support=no])
+ AC_CHECK_LIB([cunit], [CU_get_error], [CUNIT_LIBS="-lcunit"],
+ [cunit_support=no])
+])])
+
+AS_IF([test "x$test_vald" = "xyes" -a "x$cunit_support" = "xno"],
+ [AC_MSG_ERROR([Validation testsuite requested, but CUnit was not found])],
+ [test "x$test_vald" = "xcheck" -a "x$cunit_support" = "xno"],
+ [AC_MSG_WARN([CUnit was not found, disabling validation testsuite])
+ test_vald=no],
+ [test "x$test_vald" != "xno"], [test_vald=yes])
+
+AM_CONDITIONAL([cunit_support], [test "x$cunit_support" = "xyes"])
+AM_CONDITIONAL([test_vald], [test "x$test_vald" = "xyes"])
+
+AC_SUBST([CUNIT_CFLAGS])
+AC_SUBST([CUNIT_LIBS])
diff --git a/test/common_plat/miscellaneous/.gitignore b/test/miscellaneous/.gitignore
index 6e555c58e..6069e336d 100644
--- a/test/common_plat/miscellaneous/.gitignore
+++ b/test/miscellaneous/.gitignore
@@ -1,3 +1,4 @@
odp_api_from_cpp
+odp_api_headers
*.trs
*.log
diff --git a/test/miscellaneous/Makefile.am b/test/miscellaneous/Makefile.am
new file mode 100644
index 000000000..95514edcb
--- /dev/null
+++ b/test/miscellaneous/Makefile.am
@@ -0,0 +1,45 @@
+include $(top_srcdir)/test/Makefile.inc
+
+if test_cpp
+bin_PROGRAMS = odp_api_from_cpp
+TESTS = odp_api_from_cpp
+endif
+
+odp_api_from_cpp_SOURCES = odp_api_from_cpp.cpp
+
+noinst_PROGRAMS = odp_api_headers
+odp_api_headers_CFLAGS = $(AM_CFLAGS) -Wconversion
+odp_api_headers_SOURCES = odp_api_headers.c
+
+PKGCONFIG = PKG_CONFIG_PATH=$(libdir)/pkgconfig:$$PKG_CONFIG_PATH pkg-config --cflags --libs
+
+if enable_shared
+
+PROGRAM_shared = odp_api_headers_shared
+
+installcheck-local: $(PROGRAM_shared)
+
+$(PROGRAM_shared): $(srcdir)/$(odp_api_headers_SOURCES)
+ $(CC) $(AM_CFLAGS) $(CFLAGS) $^ -o $@ \
+ `$(PKGCONFIG) libodphelper` `$(PKGCONFIG) lib$(ODP_LIB_NAME)`
+if ! cross_compile
+ LD_LIBRARY_PATH=$(libdir) ./$@
+endif
+endif
+
+if enable_static
+
+PROGRAM_static = odp_api_headers_static
+
+installcheck-local: $(PROGRAM_static)
+
+$(PROGRAM_static): $(srcdir)/$(odp_api_headers_SOURCES)
+ $(CC) $(AM_CFLAGS) $(CFLAGS) $^ -o $@ \
+ `$(PKGCONFIG) --static libodphelper | sed "s/-lodphelper/-l:libodphelper.a/"` \
+ `$(PKGCONFIG) --static lib$(ODP_LIB_NAME) | sed "s/-l$(ODP_LIB_NAME)/-l:lib$(ODP_LIB_NAME).a/"`
+if ! cross_compile
+ ./$@
+endif
+endif
+
+DISTCLEANFILES = $(PROGRAM_shared) $(PROGRAM_static)
diff --git a/test/miscellaneous/odp_api_from_cpp.cpp b/test/miscellaneous/odp_api_from_cpp.cpp
new file mode 100644
index 000000000..c5aae0c3f
--- /dev/null
+++ b/test/miscellaneous/odp_api_from_cpp.cpp
@@ -0,0 +1,26 @@
+#include <stdlib.h>
+#include <iostream>
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+int main(int argc ODP_UNUSED, const char *argv[] ODP_UNUSED)
+{
+ odp_instance_t inst;
+
+ if (odp_init_global(&inst, NULL, NULL))
+ exit(EXIT_FAILURE);
+
+ if (odp_init_local(inst, ODP_THREAD_WORKER))
+ exit(EXIT_FAILURE);
+
+ std::cout << "\tODP API version: " << odp_version_api_str() << std::endl;
+ std::cout << "\tODP implementation version: " << odp_version_impl_str() << std::endl;
+
+ if (odp_term_local())
+ exit(EXIT_FAILURE);
+
+ if (odp_term_global(inst))
+ exit(EXIT_FAILURE);
+
+ return 0;
+}
diff --git a/test/miscellaneous/odp_api_headers.c b/test/miscellaneous/odp_api_headers.c
new file mode 100644
index 000000000..0dd6b0a2e
--- /dev/null
+++ b/test/miscellaneous/odp_api_headers.c
@@ -0,0 +1,39 @@
+/* Copyright (c) 2022, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED)
+{
+ odp_instance_t inst;
+
+ if (odp_init_global(&inst, NULL, NULL)) {
+ ODPH_ERR("Global init failed.\n");
+ return -1;
+ }
+
+ if (odp_init_local(inst, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed.\n");
+ return -1;
+ }
+
+ odp_sys_info_print();
+ printf("Helper library version: %s\n", odph_version_str());
+
+ if (odp_term_local()) {
+ ODPH_ERR("Local term failed.\n");
+ return -1;
+ }
+
+ if (odp_term_global(inst)) {
+ ODPH_ERR("Global term failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/test/performance/.gitignore b/test/performance/.gitignore
new file mode 100644
index 000000000..46d9e9c2c
--- /dev/null
+++ b/test/performance/.gitignore
@@ -0,0 +1,33 @@
+*.log
+*.trs
+odp_atomic
+odp_atomic_perf
+odp_bench_buffer
+odp_bench_misc
+odp_bench_packet
+odp_bench_pktio_sp
+odp_bench_timer
+odp_cpu_bench
+odp_crc
+odp_crypto
+odp_dmafwd
+odp_dma_perf
+odp_ipsec
+odp_ipsecfwd
+odp_l2fwd
+odp_lock_perf
+odp_mem_perf
+odp_packet_gen
+odp_pktio_ordered
+odp_pktio_perf
+odp_pool_latency
+odp_pool_perf
+odp_queue_perf
+odp_random
+odp_sched_latency
+odp_sched_perf
+odp_sched_pktio
+odp_scheduling
+odp_stash_perf
+odp_stress
+odp_timer_perf
diff --git a/test/performance/Makefile.am b/test/performance/Makefile.am
new file mode 100644
index 000000000..356e98a2d
--- /dev/null
+++ b/test/performance/Makefile.am
@@ -0,0 +1,125 @@
+include $(top_srcdir)/test/Makefile.inc
+
+TESTS_ENVIRONMENT += TEST_DIR=${builddir}
+
+EXECUTABLES = odp_atomic_perf \
+ odp_bench_buffer \
+ odp_bench_misc \
+ odp_bench_packet \
+ odp_bench_pktio_sp \
+ odp_bench_timer \
+ odp_crc \
+ odp_lock_perf \
+ odp_mem_perf \
+ odp_pktio_perf \
+ odp_pool_latency \
+ odp_pool_perf \
+ odp_queue_perf \
+ odp_stash_perf \
+ odp_random \
+ odp_stress
+
+COMPILE_ONLY = odp_cpu_bench \
+ odp_crypto \
+ odp_dmafwd \
+ odp_dma_perf \
+ odp_ipsec \
+ odp_l2fwd \
+ odp_packet_gen \
+ odp_pktio_ordered \
+ odp_sched_latency \
+ odp_sched_perf \
+ odp_sched_pktio \
+ odp_scheduling \
+ odp_timer_perf
+
+if LIBCONFIG
+COMPILE_ONLY += odp_ipsecfwd
+endif
+
+TESTSCRIPTS = odp_cpu_bench_run.sh \
+ odp_crypto_run.sh \
+ odp_dma_perf_run.sh \
+ odp_ipsec_run.sh \
+ odp_l2fwd_run.sh \
+ odp_packet_gen_run.sh \
+ odp_sched_latency_run.sh \
+ odp_sched_perf_run.sh \
+ odp_sched_pktio_run.sh \
+ odp_scheduling_run.sh \
+ odp_timer_perf_run.sh
+
+if ODP_PKTIO_PCAP
+TESTSCRIPTS += odp_dmafwd_run.sh \
+ odp_pktio_ordered_run.sh
+endif
+
+TEST_EXTENSIONS = .sh
+
+if test_perf
+TESTS = $(EXECUTABLES) $(TESTSCRIPTS)
+endif
+
+bin_PROGRAMS = $(EXECUTABLES) $(COMPILE_ONLY)
+
+odp_atomic_perf_SOURCES = odp_atomic_perf.c
+odp_bench_buffer_SOURCES = odp_bench_buffer.c bench_common.c bench_common.h
+odp_bench_misc_SOURCES = odp_bench_misc.c bench_common.c bench_common.h
+odp_bench_packet_SOURCES = odp_bench_packet.c bench_common.c bench_common.h
+odp_bench_pktio_sp_SOURCES = odp_bench_pktio_sp.c bench_common.c bench_common.h
+odp_bench_timer_SOURCES = odp_bench_timer.c bench_common.c bench_common.h
+odp_cpu_bench_SOURCES = odp_cpu_bench.c
+odp_crc_SOURCES = odp_crc.c
+odp_crypto_SOURCES = odp_crypto.c
+odp_dmafwd_SOURCES = odp_dmafwd.c
+odp_dma_perf_SOURCES = odp_dma_perf.c
+odp_ipsec_SOURCES = odp_ipsec.c
+odp_lock_perf_SOURCES = odp_lock_perf.c
+odp_mem_perf_SOURCES = odp_mem_perf.c
+odp_packet_gen_SOURCES = odp_packet_gen.c
+odp_pktio_ordered_SOURCES = odp_pktio_ordered.c dummy_crc.h
+odp_sched_latency_SOURCES = odp_sched_latency.c
+odp_sched_pktio_SOURCES = odp_sched_pktio.c
+odp_scheduling_SOURCES = odp_scheduling.c
+odp_pktio_perf_SOURCES = odp_pktio_perf.c
+odp_pool_latency_SOURCES = odp_pool_latency.c
+odp_pool_perf_SOURCES = odp_pool_perf.c
+odp_queue_perf_SOURCES = odp_queue_perf.c
+odp_random_SOURCES = odp_random.c
+odp_sched_perf_SOURCES = odp_sched_perf.c
+odp_stress_SOURCES = odp_stress.c
+odp_timer_perf_SOURCES = odp_timer_perf.c
+
+if LIBCONFIG
+odp_ipsecfwd_SOURCES = odp_ipsecfwd.c
+AM_CFLAGS += $(LIBCONFIG_CFLAGS)
+endif
+
+# l2fwd test depends on generator example
+EXTRA_odp_l2fwd_DEPENDENCIES = $(top_builddir)/example/generator/odp_generator$(EXEEXT)
+$(top_builddir)/example/generator/odp_generator$(EXEEXT):
+ $(MAKE) -C $(top_builddir)/example/generator odp_generator$(EXEEXT)
+
+dist_check_SCRIPTS = $(TESTSCRIPTS)
+
+dist_check_DATA = udp64.pcap
+
+# If building out-of-tree, make check will not copy the scripts and data to the
+# $(builddir) assuming that all commands are run locally. However this prevents
+# running tests on a remote target using LOG_COMPILER.
+# So copy all script and data files explicitly here.
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(dist_check_SCRIPTS) $(dist_check_DATA); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(dist_check_SCRIPTS) $(dist_check_DATA); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/test/performance/bench_common.c b/test/performance/bench_common.c
new file mode 100644
index 000000000..640889503
--- /dev/null
+++ b/test/performance/bench_common.c
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#include "bench_common.h"
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <string.h>
+
+void bench_suite_init(bench_suite_t *suite)
+{
+ memset(suite, 0, sizeof(bench_suite_t));
+
+ suite->measure_time = true;
+
+ odp_atomic_init_u32(&suite->exit_worker, 0);
+}
+
+void bench_run_indef(bench_info_t *info, odp_atomic_u32_t *exit_thread)
+{
+ const char *desc;
+
+ desc = info->desc != NULL ? info->desc : info->name;
+
+ printf("Running odp_%s test indefinitely\n", desc);
+
+ while (!odp_atomic_load_u32(exit_thread)) {
+ int ret;
+
+ if (info->init != NULL)
+ info->init();
+
+ ret = info->run();
+
+ if (info->term != NULL)
+ info->term();
+
+ if (!ret)
+ ODPH_ABORT("Benchmark %s failed\n", desc);
+ }
+}
+
+int bench_run(void *arg)
+{
+ uint64_t c1, c2;
+ odp_time_t t1, t2;
+ bench_suite_t *suite = arg;
+ const uint64_t repeat_count = suite->repeat_count;
+ const odp_bool_t meas_time = suite->measure_time;
+ double result;
+
+ printf("\nAverage %s per function call\n", meas_time ? "time (nsec)" : "CPU cycles");
+ printf("-------------------------------------------------\n");
+
+ for (int j = 0; j < suite->num_bench; j++) {
+ int ret;
+ const char *desc;
+ const bench_info_t *bench = &suite->bench[j];
+ uint64_t max_rounds = suite->rounds;
+ uint64_t total = 0;
+
+ if (bench->max_rounds && bench->max_rounds < max_rounds)
+ max_rounds = bench->max_rounds;
+
+ /* Run selected test indefinitely */
+ if (suite->indef_idx) {
+ if ((j + 1) != suite->indef_idx) {
+ j++;
+ continue;
+ }
+ bench_run_indef(&suite->bench[j], &suite->exit_worker);
+ return 0;
+ }
+
+ desc = bench->desc != NULL ? bench->desc : bench->name;
+
+ /* The zeroeth round is a warmup round that will be ignored */
+ for (uint64_t round = 0; round <= max_rounds; round++) {
+ if (bench->init != NULL)
+ bench->init();
+
+ if (meas_time)
+ t1 = odp_time_local_strict();
+ else
+ c1 = odp_cpu_cycles();
+
+ ret = bench->run();
+
+ if (meas_time)
+ t2 = odp_time_local_strict();
+ else
+ c2 = odp_cpu_cycles();
+
+ if (bench->term != NULL)
+ bench->term();
+
+ if (!ret) {
+ ODPH_ERR("Benchmark odp_%s failed\n", desc);
+ suite->retval = -1;
+ return -1;
+ }
+
+ if (odp_unlikely(round == 0))
+ continue;
+ if (meas_time)
+ total += odp_time_diff_ns(t2, t1);
+ else
+ total += odp_cpu_cycles_diff(c2, c1);
+ }
+
+ /* Each benchmark runs internally 'repeat_count' times. */
+ result = ((double)total) / (max_rounds * repeat_count);
+
+ printf("[%02d] odp_%-26s: %12.2f\n", j + 1, desc, result);
+ if (suite->result)
+ suite->result[j] = result;
+ }
+ printf("\n");
+ /* Print dummy result to prevent compiler to optimize it away*/
+ if (suite->dummy)
+ printf("(dummy result: 0x%" PRIx64 ")\n\n", suite->dummy);
+
+ return 0;
+}
+
+void bench_tm_suite_init(bench_tm_suite_t *suite)
+{
+ memset(suite, 0, sizeof(bench_tm_suite_t));
+
+ odp_atomic_init_u32(&suite->exit_worker, 0);
+}
+
+uint8_t bench_tm_func_register(bench_tm_result_t *res, const char *func_name)
+{
+ uint8_t num_func = res->num;
+
+ if (num_func >= BENCH_TM_MAX_FUNC)
+ ODPH_ABORT("Too many test functions (max %d)\n", BENCH_TM_MAX_FUNC);
+
+ res->func[num_func].name = func_name;
+ res->num++;
+
+ return num_func;
+}
+
+void bench_tm_func_record(odp_time_t t2, odp_time_t t1, bench_tm_result_t *res, uint8_t id)
+{
+ odp_time_t diff = odp_time_diff(t2, t1);
+
+ ODPH_ASSERT(id < BENCH_TM_MAX_FUNC);
+
+ res->func[id].tot = odp_time_sum(res->func[id].tot, diff);
+
+ if (odp_time_cmp(diff, res->func[id].min) < 0)
+ res->func[id].min = diff;
+
+ if (odp_time_cmp(diff, res->func[id].max) > 0)
+ res->func[id].max = diff;
+
+ res->func[id].num++;
+}
+
+static void init_result(bench_tm_result_t *res)
+{
+ memset(res, 0, sizeof(bench_tm_result_t));
+
+ for (int i = 0; i < BENCH_TM_MAX_FUNC; i++) {
+ res->func[i].tot = ODP_TIME_NULL;
+ res->func[i].min = odp_time_local_from_ns(ODP_TIME_HOUR_IN_NS);
+ res->func[i].max = ODP_TIME_NULL;
+ }
+}
+
+static void print_results(bench_tm_result_t *res)
+{
+ for (uint8_t i = 0; i < res->num; i++) {
+ uint64_t num = res->func[i].num ? res->func[i].num : 1;
+
+ printf(" %-38s %-12" PRIu64 " %-12" PRIu64 " %-12" PRIu64 "\n",
+ res->func[i].name,
+ odp_time_to_ns(res->func[i].min),
+ odp_time_to_ns(res->func[i].tot) / num,
+ odp_time_to_ns(res->func[i].max));
+ }
+}
+
+int bench_tm_run(void *arg)
+{
+ bench_tm_suite_t *suite = arg;
+
+ printf("\nLatency (nsec) per function call min avg max\n");
+ printf("------------------------------------------------------------------------------\n");
+
+ for (uint32_t j = 0; j < suite->num_bench; j++) {
+ const bench_tm_info_t *bench = &suite->bench[j];
+ uint64_t rounds = suite->rounds;
+ bench_tm_result_t res;
+
+ /* Run only selected test case */
+ if (suite->bench_idx && (j + 1) != suite->bench_idx)
+ continue;
+
+ if (bench->cond != NULL && !bench->cond()) {
+ printf("[%02d] %-41s n/a n/a n/a\n",
+ j + 1, bench->name);
+ continue;
+ }
+
+ if (bench->max_rounds && bench->max_rounds < rounds)
+ rounds = bench->max_rounds;
+
+ /*
+ * Run each test twice.
+ * Results from the first warm-up round are ignored.
+ */
+ for (uint32_t i = 0; i < 2; i++) {
+ if (odp_atomic_load_u32(&suite->exit_worker))
+ return 0;
+
+ init_result(&res);
+
+ if (bench->init != NULL)
+ bench->init();
+
+ if (bench->run(&res, rounds)) {
+ ODPH_ERR("Benchmark %s failed\n", bench->name);
+ suite->retval = -1;
+ return -1;
+ }
+
+ if (bench->term != NULL)
+ bench->term();
+
+ }
+ printf("[%02d] %-26s\n", j + 1, bench->name);
+ print_results(&res);
+ }
+ printf("\n");
+
+ return 0;
+}
diff --git a/test/performance/bench_common.h b/test/performance/bench_common.h
new file mode 100644
index 000000000..4b59c941f
--- /dev/null
+++ b/test/performance/bench_common.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef BENCH_COMMON_H
+#define BENCH_COMMON_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#include <stdint.h>
+
+/**
+ * Check benchmark preconditions
+ *
+ * Returns !0 if benchmark precondition is met.
+ */
+typedef int (*bench_cond_fn_t)(void);
+
+/**
+ * Initialize benchmark resources
+ */
+typedef void (*bench_init_fn_t)(void);
+
+/**
+ * Run benchmark
+ *
+ * Returns >0 on success.
+ */
+typedef int (*bench_run_fn_t)(void);
+
+/**
+ * Release benchmark resources
+ */
+typedef void (*bench_term_fn_t)(void);
+
+/* Benchmark test data */
+typedef struct {
+ /* Default test name */
+ const char *name;
+
+ /* Optional alternate test description */
+ const char *desc;
+
+ /* Optional precondition to run test */
+ bench_cond_fn_t cond;
+
+ /* Optional test initializer function */
+ bench_init_fn_t init;
+
+ /* Test function to run */
+ bench_run_fn_t run;
+
+ /* Optional test terminate function */
+ bench_term_fn_t term;
+
+ /* Optional test specific limit for rounds (tuning for slow implementations) */
+ uint32_t max_rounds;
+
+} bench_info_t;
+
+/* Benchmark suite data */
+typedef struct {
+ /* Array of benchmark functions */
+ bench_info_t *bench;
+
+ /* Number of benchmark functions */
+ int num_bench;
+
+ /* Optional benchmark index to run indefinitely (1...num_bench) */
+ int indef_idx;
+
+ /* Suite exit value output */
+ int retval;
+
+ /* Measure time vs. CPU cycles */
+ odp_bool_t measure_time;
+
+ /* Break worker loop if set to 1 */
+ odp_atomic_u32_t exit_worker;
+
+ /* Number of API function calls per test case */
+ uint64_t repeat_count;
+
+ /* Number of rounds per test case */
+ uint64_t rounds;
+
+ /* Dummy test result output */
+ uint64_t dummy;
+
+ /* Optional test result output array */
+ double *result;
+
+} bench_suite_t;
+
+/**
+ * Initialize benchmark suite parameters
+ */
+void bench_suite_init(bench_suite_t *suite);
+
+/**
+ * Run selected test indefinitely
+ */
+void bench_run_indef(bench_info_t *info, odp_atomic_u32_t *exit_thread);
+
+/**
+ * Run test suite and print results
+ *
+ * The argument is of type 'bench_suite_t *'. Returns 0 on success and <0 on failure.
+ */
+int bench_run(void *arg);
+
+/*
+ * Timed benchmark framework
+ *
+ * The main difference compared to the standard benchmark suite is that all
+ * latency measurements are performed inside the test cases.
+ */
+
+/* Maximum number of benchmarked functions per test case */
+#define BENCH_TM_MAX_FUNC 8
+
+/* Timed benchmark results */
+typedef struct bench_tm_results_s {
+ /* Results per function */
+ struct {
+ /* Name of function */
+ const char *name;
+
+ /* Total duration of all function calls */
+ odp_time_t tot;
+
+ /* Minimum duration */
+ odp_time_t min;
+
+ /* Maximum duration */
+ odp_time_t max;
+
+ /* Number of measurements */
+ uint64_t num;
+
+ } func[BENCH_TM_MAX_FUNC];
+
+ /* Number of registered test functions */
+ uint8_t num;
+
+} bench_tm_result_t;
+
+/**
+ * Timed benchmark test case
+ *
+ * Returns 0 on success and <0 on failure.
+ */
+typedef int (*bench_tm_run_fn_t)(bench_tm_result_t *res, int repeat_count);
+
+/* Timed benchmark test case */
+typedef struct {
+ /* Test case name */
+ const char *name;
+
+ /* Optional precondition to run test */
+ bench_cond_fn_t cond;
+
+ /* Optional test initializer function */
+ bench_init_fn_t init;
+
+ /* Test function to run */
+ bench_tm_run_fn_t run;
+
+ /* Optional test termination function */
+ bench_term_fn_t term;
+
+ /* Optional test specific limit for rounds (tuning for slow implementations) */
+ uint32_t max_rounds;
+
+} bench_tm_info_t;
+
+/* Timed benchmark suite data */
+typedef struct {
+ /* Array of benchmark test cases */
+ bench_tm_info_t *bench;
+
+ /* Number of benchmark test cases */
+ uint32_t num_bench;
+
+ /* Optional benchmark index to run (1...num_bench) */
+ uint32_t bench_idx;
+
+ /* Suite exit value output */
+ int retval;
+
+ /* Number of rounds per test case */
+ uint64_t rounds;
+
+ /* Break worker loop if set to 1 */
+ odp_atomic_u32_t exit_worker;
+
+} bench_tm_suite_t;
+
+/**
+ * Initialize benchmark suite data
+ */
+void bench_tm_suite_init(bench_tm_suite_t *suite);
+
+/**
+ * Register function for benchmarking
+ *
+ * Called by each test case to register benchmarked functions. Returns function
+ * ID for recording benchmark results. At most BENCH_TM_MAX_FUNC functions can
+ * be registered per test case.
+ */
+uint8_t bench_tm_func_register(bench_tm_result_t *res, const char *func_name);
+
+/**
+ * Record results for previously registered function
+ *
+ * Test case must call this function every test round for each registered
+ * function.
+ */
+void bench_tm_func_record(odp_time_t t2, odp_time_t t1, bench_tm_result_t *res, uint8_t id);
+
+/**
+ * Run timed test suite and print results
+ *
+ * The argument is of type 'bench_tm_suite_t *'. Returns 0 on success and <0 on failure.
+ */
+int bench_tm_run(void *arg);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/test/common_plat/performance/dummy_crc.h b/test/performance/dummy_crc.h
index 38da44455..01e6c2433 100644
--- a/test/common_plat/performance/dummy_crc.h
+++ b/test/performance/dummy_crc.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -37,6 +37,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
/**
* @file
*
diff --git a/test/performance/odp_atomic_perf.c b/test/performance/odp_atomic_perf.c
new file mode 100644
index 000000000..e665081a2
--- /dev/null
+++ b/test/performance/odp_atomic_perf.c
@@ -0,0 +1,1406 @@
+/* Copyright (c) 2021, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_atomic_perf.c
+ *
+ * Performance test application for atomic operation APIs
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+/* Default number of test rounds */
+#define NUM_ROUNDS 100000u
+
+/* Initial value for atomic variables. Supports up to 2 billion
+ * rounds of 32-bit min and max tests. */
+#define INIT_VAL 0x80000000
+
+/* Max number of workers if num_cpu=0 */
+#define DEFAULT_MAX_WORKERS 10
+
+#define TEST_INFO(name, test, validate, op_type) \
+ {name, test, validate, op_type}
+
+/* Test function template */
+typedef void (*test_fn_t)(void *val, void *out, uint32_t num_round);
+/* Test result validation function template */
+typedef int (*validate_fn_t)(void *val, void *out, uint32_t num_round,
+ uint32_t num_worker, int private);
+
+typedef enum {
+ OP_32BIT,
+ OP_64BIT,
+ OP_128BIT
+} op_bit_t;
+
+/* Command line options */
+typedef struct test_options_t {
+ uint32_t num_cpu;
+ uint32_t num_round;
+ int private;
+
+} test_options_t;
+
+/* Cache aligned atomics for private mode operation */
+typedef struct ODP_ALIGNED_CACHE test_atomic_t {
+ union {
+ odp_atomic_u32_t u32;
+ odp_atomic_u64_t u64;
+ odp_atomic_u128_t u128;
+ };
+} test_atomic_t;
+
+typedef struct test_global_t test_global_t;
+
+/* Worker thread context */
+typedef struct test_thread_ctx_t {
+ test_global_t *global;
+ test_fn_t func;
+ uint64_t nsec;
+ uint32_t idx;
+ op_bit_t type;
+
+} test_thread_ctx_t;
+
+/* Global data */
+struct test_global_t {
+ test_options_t test_options;
+ odp_barrier_t barrier;
+ union {
+ odp_atomic_u32_t atomic_u32;
+ odp_atomic_u64_t atomic_u64;
+ odp_atomic_u128_t atomic_u128;
+ };
+ odp_cpumask_t cpumask;
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ test_thread_ctx_t thread_ctx[ODP_THREAD_COUNT_MAX];
+ test_atomic_t atomic_private[ODP_THREAD_COUNT_MAX];
+ union {
+ uint32_t u32;
+ uint64_t u64;
+ odp_u128_t u128;
+ } output[ODP_THREAD_COUNT_MAX];
+};
+
+typedef struct {
+ const char *name;
+ test_fn_t test_fn;
+ validate_fn_t validate_fn;
+ op_bit_t type;
+} test_case_t;
+
+static test_global_t *test_global;
+
+static inline void test_atomic_load_u32(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t *result = out;
+ uint32_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_load_u32(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_load_u64(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t *result = out;
+ uint64_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_load_u64(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_load_u128(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u128_t *atomic_val = val;
+ odp_u128_t *result = out;
+ odp_u128_t ret;
+
+ ret.u64[0] = 0;
+ ret.u64[1] = 0;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ odp_u128_t cur_val = odp_atomic_load_u128(atomic_val);
+
+ ret.u64[0] += cur_val.u64[0];
+ ret.u64[1] += cur_val.u64[1];
+ }
+
+ *result = ret;
+}
+
+static inline int validate_atomic_init_val_u32(void *val, void *out, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED,
+ int private ODP_UNUSED)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t *result = out;
+
+ return (odp_atomic_load_u32(atomic_val) != INIT_VAL) ||
+ (*result != (uint32_t)INIT_VAL * num_round);
+}
+
+static inline int validate_atomic_init_val_u64(void *val, void *out, uint32_t num_round,
+ uint32_t worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t *result = out;
+
+ return (odp_atomic_load_u64(atomic_val) != INIT_VAL) ||
+ (*result != (uint64_t)INIT_VAL * num_round);
+}
+
+static inline int validate_atomic_init_val_u128(void *val, void *out, uint32_t num_round,
+ uint32_t worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ odp_u128_t atomic_val = odp_atomic_load_u128((odp_atomic_u128_t *)val);
+ odp_u128_t *result = out;
+
+ if (atomic_val.u64[0] != INIT_VAL || atomic_val.u64[1] != INIT_VAL)
+ return -1;
+
+ if (result->u64[0] != (uint64_t)INIT_VAL * num_round ||
+ result->u64[1] != (uint64_t)INIT_VAL * num_round)
+ return -1;
+
+ return 0;
+}
+
+static inline void test_atomic_store_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_val = INIT_VAL + 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_store_u32(atomic_val, new_val++);
+}
+
+static inline void test_atomic_store_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_val = INIT_VAL + 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_store_u64(atomic_val, new_val++);
+}
+
+static inline void test_atomic_store_u128(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u128_t *atomic_val = val;
+ odp_u128_t new_val;
+
+ new_val.u64[0] = INIT_VAL + 1;
+ new_val.u64[1] = INIT_VAL + 1;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ odp_atomic_store_u128(atomic_val, new_val);
+ new_val.u64[0]++;
+ new_val.u64[1]++;
+ }
+}
+
+static inline int validate_atomic_num_round_u32(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ return odp_atomic_load_u32(atomic_val) != ((uint32_t)INIT_VAL + num_round);
+}
+
+static inline int validate_atomic_num_round_u64(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ return odp_atomic_load_u64(atomic_val) != ((uint64_t)INIT_VAL + num_round);
+}
+
+static inline int validate_atomic_num_round_u128(void *val, void *out ODP_UNUSED,
+ uint32_t num_round, uint32_t worker ODP_UNUSED,
+ int private ODP_UNUSED)
+{
+ odp_u128_t atomic_val = odp_atomic_load_u128((odp_atomic_u128_t *)val);
+
+ return (atomic_val.u64[0] != ((uint64_t)INIT_VAL + num_round) ||
+ atomic_val.u64[1] != ((uint64_t)INIT_VAL + num_round));
+}
+
+static inline void test_atomic_fetch_add_u32(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t *result = out;
+ uint32_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_add_u32(atomic_val, 1);
+
+ *result = ret;
+}
+
+static inline void test_atomic_fetch_add_u64(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t *result = out;
+ uint64_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_add_u64(atomic_val, 1);
+
+ *result = ret;
+}
+
+static inline int validate_atomic_add_round_u32(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker, int private)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ if (private)
+ return odp_atomic_load_u32(atomic_val) != ((uint32_t)INIT_VAL + num_round);
+
+ return odp_atomic_load_u32(atomic_val) != (INIT_VAL + (num_worker * num_round));
+}
+
+static inline int validate_atomic_add_round_u64(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker, int private)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ if (private)
+ return odp_atomic_load_u64(atomic_val) != ((uint64_t)INIT_VAL + num_round);
+
+ return odp_atomic_load_u64(atomic_val) != (INIT_VAL + ((uint64_t)num_worker * num_round));
+}
+
+static inline void test_atomic_add_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_add_u32(atomic_val, 1);
+}
+
+static inline void test_atomic_add_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_add_u64(atomic_val, 1);
+}
+
+static inline void test_atomic_fetch_sub_u32(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t *result = out;
+ uint32_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_sub_u32(atomic_val, 1);
+
+ *result = ret;
+}
+
+static inline void test_atomic_fetch_sub_u64(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t *result = out;
+ uint64_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_sub_u64(atomic_val, 1);
+
+ *result = ret;
+}
+
+static inline int validate_atomic_sub_round_u32(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker, int private)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ if (private)
+ return odp_atomic_load_u32(atomic_val) != ((uint32_t)INIT_VAL - num_round);
+
+ return odp_atomic_load_u32(atomic_val) != ((uint32_t)INIT_VAL - (num_worker * num_round));
+}
+
+static inline int validate_atomic_sub_round_u64(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker, int private)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ if (private)
+ return odp_atomic_load_u64(atomic_val) != ((uint64_t)INIT_VAL - num_round);
+
+ return odp_atomic_load_u64(atomic_val) != ((uint64_t)INIT_VAL -
+ ((uint64_t)num_worker * num_round));
+}
+
+static inline void test_atomic_sub_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_sub_u32(atomic_val, 1);
+}
+
+static inline void test_atomic_sub_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_sub_u64(atomic_val, 1);
+}
+
+static inline void test_atomic_fetch_inc_u32(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t *result = out;
+ uint32_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_inc_u32(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_fetch_inc_u64(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t *result = out;
+ uint64_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_inc_u64(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_inc_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_inc_u32(atomic_val);
+}
+
+static inline void test_atomic_inc_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_inc_u64(atomic_val);
+}
+
+static inline void test_atomic_fetch_dec_u32(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t *result = out;
+ uint32_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_dec_u32(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_fetch_dec_u64(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t *result = out;
+ uint64_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_dec_u64(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_dec_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_dec_u32(atomic_val);
+}
+
+static inline void test_atomic_dec_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_dec_u64(atomic_val);
+}
+
+static inline void test_atomic_max_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_max = INIT_VAL + 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_max_u32(atomic_val, new_max++);
+}
+
+static inline void test_atomic_max_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_max = INIT_VAL + 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_max_u64(atomic_val, new_max++);
+}
+
+static inline int validate_atomic_max_u32(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ uint32_t result = odp_atomic_load_u32((odp_atomic_u32_t *)val);
+
+ return (result != ((uint32_t)INIT_VAL + num_round)) && (result != UINT32_MAX);
+}
+
+static inline int validate_atomic_max_u64(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ uint64_t result = odp_atomic_load_u64((odp_atomic_u64_t *)val);
+
+ return (result != ((uint64_t)INIT_VAL + num_round)) && (result != UINT64_MAX);
+}
+
+static inline void test_atomic_min_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_min = INIT_VAL - 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_min_u32(atomic_val, new_min--);
+}
+
+static inline void test_atomic_min_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_min = INIT_VAL - 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_min_u64(atomic_val, new_min--);
+}
+
+static inline int validate_atomic_min_u32(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ uint32_t result = odp_atomic_load_u32((odp_atomic_u32_t *)val);
+
+ return result != ((uint32_t)INIT_VAL - num_round) && result != 0;
+}
+
+static inline int validate_atomic_min_u64(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ uint64_t result = odp_atomic_load_u64((odp_atomic_u64_t *)val);
+
+ return result != ((uint64_t)INIT_VAL - num_round) && result != 0;
+}
+
+static inline void test_atomic_cas_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_val = INIT_VAL + 1;
+ uint32_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_u32(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline void test_atomic_cas_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_val = INIT_VAL + 1;
+ uint64_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_u64(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline void test_atomic_cas_u128(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u128_t *atomic_val = val;
+ odp_u128_t new_val;
+ odp_u128_t old_val;
+
+ new_val.u64[0] = INIT_VAL + 1;
+ new_val.u64[1] = INIT_VAL + 1;
+ old_val.u64[0] = INIT_VAL;
+ old_val.u64[1] = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_u128(atomic_val, &old_val, new_val)) {
+ old_val = new_val;
+ new_val.u64[0]++;
+ new_val.u64[1]++;
+ }
+ }
+}
+
+static inline int validate_atomic_cas_u32(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED, int private)
+{
+ uint32_t result = odp_atomic_load_u32((odp_atomic_u32_t *)val);
+
+ if (private)
+ return result != ((uint32_t)INIT_VAL + num_round);
+
+ return result > ((uint32_t)INIT_VAL + num_round);
+}
+
+static inline int validate_atomic_cas_u64(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED, int private)
+{
+ uint64_t result = odp_atomic_load_u64((odp_atomic_u64_t *)val);
+
+ if (private)
+ return result != ((uint64_t)INIT_VAL + num_round);
+
+ return result > ((uint64_t)INIT_VAL + num_round);
+}
+
+static inline int validate_atomic_cas_u128(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED, int private)
+{
+ odp_u128_t result = odp_atomic_load_u128((odp_atomic_u128_t *)val);
+
+ if (private)
+ return (result.u64[0] != ((uint64_t)INIT_VAL + num_round) ||
+ result.u64[1] != ((uint64_t)INIT_VAL + num_round));
+
+ return (result.u64[0] > ((uint64_t)INIT_VAL + num_round) ||
+ result.u64[1] > ((uint64_t)INIT_VAL + num_round));
+}
+
+static inline void test_atomic_xchg_u32(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_val = INIT_VAL + 1;
+ uint32_t *result = out;
+ uint32_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_xchg_u32(atomic_val, new_val++);
+
+ *result = ret;
+}
+
+static inline void test_atomic_xchg_u64(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_val = INIT_VAL + 1;
+ uint64_t *result = out;
+ uint64_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_xchg_u64(atomic_val, new_val++);
+
+ *result = ret;
+}
+
+static inline void test_atomic_load_acq_u32(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t *result = out;
+ uint32_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_load_acq_u32(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_load_acq_u64(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t *result = out;
+ uint64_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_load_acq_u64(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_store_rel_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_val = INIT_VAL + 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_store_rel_u32(atomic_val, new_val++);
+}
+
+static inline void test_atomic_store_rel_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_val = INIT_VAL + 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_store_rel_u64(atomic_val, new_val++);
+}
+
+static inline void test_atomic_add_rel_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_add_rel_u32(atomic_val, 1);
+}
+
+static inline void test_atomic_add_rel_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_add_rel_u64(atomic_val, 1);
+}
+
+static inline void test_atomic_sub_rel_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_sub_rel_u32(atomic_val, 1);
+}
+
+static inline void test_atomic_sub_rel_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_sub_rel_u64(atomic_val, 1);
+}
+
+static inline void test_atomic_cas_acq_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_val = INIT_VAL + 1;
+ uint32_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_acq_u32(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline void test_atomic_cas_acq_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_val = INIT_VAL + 1;
+ uint64_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_acq_u64(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline void test_atomic_cas_acq_u128(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u128_t *atomic_val = val;
+ odp_u128_t new_val;
+ odp_u128_t old_val;
+
+ new_val.u64[0] = INIT_VAL + 1;
+ new_val.u64[1] = INIT_VAL + 1;
+ old_val.u64[0] = INIT_VAL;
+ old_val.u64[1] = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_acq_u128(atomic_val, &old_val, new_val)) {
+ old_val = new_val;
+ new_val.u64[0]++;
+ new_val.u64[1]++;
+ }
+ }
+}
+
+static inline void test_atomic_cas_rel_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_val = INIT_VAL + 1;
+ uint32_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_rel_u32(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline void test_atomic_cas_rel_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_val = INIT_VAL + 1;
+ uint64_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_rel_u64(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline void test_atomic_cas_rel_u128(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u128_t *atomic_val = val;
+ odp_u128_t new_val;
+ odp_u128_t old_val;
+
+ new_val.u64[0] = INIT_VAL + 1;
+ new_val.u64[1] = INIT_VAL + 1;
+ old_val.u64[0] = INIT_VAL;
+ old_val.u64[1] = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_rel_u128(atomic_val, &old_val, new_val)) {
+ old_val = new_val;
+ new_val.u64[0]++;
+ new_val.u64[1]++;
+ }
+ }
+}
+
+static inline void test_atomic_cas_acq_rel_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_val = INIT_VAL + 1;
+ uint32_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_acq_rel_u32(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline void test_atomic_cas_acq_rel_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_val = INIT_VAL + 1;
+ uint64_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_acq_rel_u64(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline void test_atomic_cas_acq_rel_u128(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u128_t *atomic_val = val;
+ odp_u128_t new_val;
+ odp_u128_t old_val;
+
+ new_val.u64[0] = INIT_VAL + 1;
+ new_val.u64[1] = INIT_VAL + 1;
+ old_val.u64[0] = INIT_VAL;
+ old_val.u64[1] = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_acq_rel_u128(atomic_val, &old_val, new_val)) {
+ old_val = new_val;
+ new_val.u64[0]++;
+ new_val.u64[1]++;
+ }
+ }
+}
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Atomic operations performance test\n"
+ "\n"
+ "Usage: odp_atomic_perf [options]\n"
+ "\n"
+ " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs (or max %d) (default)\n"
+ " -r, --num_round Number of rounds (default %u)\n"
+ " -p, --private 0: The same atomic variable is shared between threads (default)\n"
+ " 1: Atomic variables are private to each thread\n"
+ " -h, --help This help\n"
+ "\n", DEFAULT_MAX_WORKERS, NUM_ROUNDS);
+}
+
+static void print_info(test_options_t *test_options)
+{
+ odp_atomic_op_t atomic_ops;
+
+ printf("\nAtomic operations performance test configuration:\n");
+ printf(" num cpu %u\n", test_options->num_cpu);
+ printf(" num rounds %u\n", test_options->num_round);
+ printf(" private %i\n", test_options->private);
+ printf("\n");
+
+ atomic_ops.all_bits = 0;
+ odp_atomic_lock_free_u64(&atomic_ops);
+
+ printf("\nAtomic operations lock-free:\n");
+ printf(" odp_atomic_load_u64: %" PRIu32 "\n", atomic_ops.op.load);
+ printf(" odp_atomic_store_u64: %" PRIu32 "\n", atomic_ops.op.store);
+ printf(" odp_atomic_fetch_add_u64: %" PRIu32 "\n", atomic_ops.op.fetch_add);
+ printf(" odp_atomic_add_u64: %" PRIu32 "\n", atomic_ops.op.add);
+ printf(" odp_atomic_fetch_sub_u64: %" PRIu32 "\n", atomic_ops.op.fetch_sub);
+ printf(" odp_atomic_sub_u64: %" PRIu32 "\n", atomic_ops.op.sub);
+ printf(" odp_atomic_fetch_inc_u64: %" PRIu32 "\n", atomic_ops.op.fetch_inc);
+ printf(" odp_atomic_inc_u64: %" PRIu32 "\n", atomic_ops.op.inc);
+ printf(" odp_atomic_fetch_dec_u64: %" PRIu32 "\n", atomic_ops.op.fetch_dec);
+ printf(" odp_atomic_dec_u64: %" PRIu32 "\n", atomic_ops.op.dec);
+ printf(" odp_atomic_min_u64: %" PRIu32 "\n", atomic_ops.op.min);
+ printf(" odp_atomic_max_u64: %" PRIu32 "\n", atomic_ops.op.max);
+ printf(" odp_atomic_cas_u64: %" PRIu32 "\n", atomic_ops.op.cas);
+ printf(" odp_atomic_xchg_u64: %" PRIu32 "\n", atomic_ops.op.xchg);
+
+ atomic_ops.all_bits = 0;
+ odp_atomic_lock_free_u128(&atomic_ops);
+
+ printf(" odp_atomic_load_u128: %" PRIu32 "\n", atomic_ops.op.load);
+ printf(" odp_atomic_store_u128: %" PRIu32 "\n", atomic_ops.op.store);
+ printf(" odp_atomic_cas_u128: %" PRIu32 "\n", atomic_ops.op.cas);
+
+ printf("\n\n");
+}
+
+static int parse_options(int argc, char *argv[], test_options_t *test_options)
+{
+ int opt;
+ int long_index;
+ int ret = 0;
+
+ static const struct option longopts[] = {
+ {"num_cpu", required_argument, NULL, 'c'},
+ {"num_round", required_argument, NULL, 'r'},
+ {"private", required_argument, NULL, 'p'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "+c:r:p:h";
+
+ memset(test_options, 0, sizeof(test_options_t));
+ test_options->num_cpu = 0;
+ test_options->num_round = NUM_ROUNDS;
+ test_options->private = 0;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'c':
+ test_options->num_cpu = atoi(optarg);
+ break;
+ case 'r':
+ test_options->num_round = atol(optarg);
+ break;
+ case 'p':
+ test_options->private = atoi(optarg);
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (test_options->num_round < 1) {
+ ODPH_ERR("Invalid number of test rounds: %" PRIu32 "\n", test_options->num_round);
+ return -1;
+ }
+
+ return ret;
+}
+
+static int set_num_cpu(test_global_t *global)
+{
+ int ret, max_num;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+
+ /* One thread used for the main thread */
+ if (num_cpu > ODP_THREAD_COUNT_MAX - 1) {
+ ODPH_ERR("Too many workers. Maximum is %i.\n", ODP_THREAD_COUNT_MAX - 1);
+ return -1;
+ }
+
+ max_num = num_cpu;
+ if (num_cpu == 0) {
+ max_num = ODP_THREAD_COUNT_MAX - 1;
+ if (max_num > DEFAULT_MAX_WORKERS)
+ max_num = DEFAULT_MAX_WORKERS;
+ }
+
+ ret = odp_cpumask_default_worker(&global->cpumask, max_num);
+
+ if (num_cpu && ret != num_cpu) {
+ ODPH_ERR("Too many workers. Max supported %i.\n", ret);
+ return -1;
+ }
+
+ /* Zero: all available workers */
+ if (num_cpu == 0) {
+ if (ret > max_num) {
+ ODPH_ERR("Too many cpus from odp_cpumask_default_worker(): %i\n", ret);
+ return -1;
+ }
+
+ num_cpu = ret;
+ test_options->num_cpu = num_cpu;
+ }
+
+ odp_barrier_init(&global->barrier, num_cpu);
+
+ return 0;
+}
+
+static int init_test(test_global_t *global, const char *name, op_bit_t type)
+{
+ odp_u128_t init_val;
+
+ init_val.u64[0] = INIT_VAL;
+ init_val.u64[1] = INIT_VAL;
+
+ printf("TEST: %s\n", name);
+
+ if (type == OP_32BIT)
+ odp_atomic_init_u32(&global->atomic_u32, INIT_VAL);
+ else if (type == OP_64BIT)
+ odp_atomic_init_u64(&global->atomic_u64, INIT_VAL);
+ else if (type == OP_128BIT)
+ odp_atomic_init_u128(&global->atomic_u128, init_val);
+ else
+ return -1;
+
+ for (int i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (type == OP_32BIT) {
+ global->output[i].u32 = 0;
+ odp_atomic_init_u32(&global->atomic_private[i].u32, INIT_VAL);
+ } else if (type == OP_64BIT) {
+ global->output[i].u64 = 0;
+ odp_atomic_init_u64(&global->atomic_private[i].u64, INIT_VAL);
+ } else {
+ global->output[i].u128.u64[0] = 0;
+ global->output[i].u128.u64[1] = 0;
+ odp_atomic_init_u128(&global->atomic_private[i].u128, init_val);
+ }
+ }
+ return 0;
+}
+
+static int run_test(void *arg)
+{
+ uint64_t nsec;
+ odp_time_t t1, t2;
+ test_thread_ctx_t *thread_ctx = arg;
+ test_global_t *global = thread_ctx->global;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_round = test_options->num_round;
+ uint32_t idx = thread_ctx->idx;
+ test_fn_t test_func = thread_ctx->func;
+ op_bit_t type = thread_ctx->type;
+ void *val;
+ void *out;
+ uint32_t out_u32 = 0;
+ uint64_t out_u64 = 0;
+ odp_u128_t out_u128;
+
+ out_u128.u64[0] = 0;
+ out_u128.u64[1] = 0;
+
+ if (type == OP_32BIT) {
+ val = &global->atomic_u32;
+ out = &out_u32;
+ } else if (type == OP_64BIT) {
+ val = &global->atomic_u64;
+ out = &out_u64;
+ } else {
+ val = &global->atomic_u128;
+ out = &out_u128;
+ }
+
+ if (global->test_options.private) {
+ if (type == OP_32BIT)
+ val = &global->atomic_private[idx].u32;
+ else if (type == OP_64BIT)
+ val = &global->atomic_private[idx].u64;
+ else
+ val = &global->atomic_private[idx].u128;
+ }
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ t1 = odp_time_local();
+
+ test_func(val, out, num_round);
+
+ t2 = odp_time_local();
+
+ nsec = odp_time_diff_ns(t2, t1);
+
+ /* Update stats */
+ thread_ctx->nsec = nsec;
+ if (type == OP_32BIT)
+ global->output[idx].u32 = out_u32;
+ else if (type == OP_64BIT)
+ global->output[idx].u64 = out_u64;
+ else
+ global->output[idx].u128 = out_u128;
+
+ return 0;
+}
+
+static int start_workers(test_global_t *global, odp_instance_t instance,
+ test_fn_t func, op_bit_t type)
+{
+ odph_thread_common_param_t param;
+ int i, ret;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ odph_thread_param_t thr_param[num_cpu];
+
+ odph_thread_common_param_init(&param);
+ param.instance = instance;
+ param.cpumask = &global->cpumask;
+
+ for (i = 0; i < num_cpu; i++) {
+ test_thread_ctx_t *thread_ctx = &global->thread_ctx[i];
+
+ thread_ctx->global = global;
+ thread_ctx->idx = i;
+ thread_ctx->func = func;
+ thread_ctx->type = type;
+
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ thr_param[i].start = run_test;
+ thr_param[i].arg = thread_ctx;
+ }
+
+ ret = odph_thread_create(global->thread_tbl, &param, thr_param, num_cpu);
+ if (ret != num_cpu) {
+ ODPH_ERR("Failed to create all threads %i\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int validate_results(test_global_t *global, validate_fn_t validate, op_bit_t type)
+{
+ int i;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_round = test_options->num_round;
+ int num_cpu = test_options->num_cpu;
+ int private = global->test_options.private;
+ void *val;
+ void *out;
+
+ for (i = 0; i < num_cpu; i++) {
+ if (type == OP_32BIT) {
+ out = &global->output[i].u32;
+ val = &global->atomic_u32;
+ if (private)
+ val = &global->atomic_private[i].u32;
+ } else if (type == OP_64BIT) {
+ out = &global->output[i].u64;
+ val = &global->atomic_u64;
+ if (private)
+ val = &global->atomic_private[i].u64;
+ } else {
+ out = &global->output[i].u128;
+ val = &global->atomic_u128;
+ if (private)
+ val = &global->atomic_private[i].u128;
+ }
+
+ if (validate(val, out, num_round, num_cpu, private))
+ return -1;
+ }
+ return 0;
+}
+
+static void print_stat(test_global_t *global)
+{
+ int i, num;
+ double nsec_ave;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ uint32_t num_round = test_options->num_round;
+ uint64_t nsec_sum = 0;
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ nsec_sum += global->thread_ctx[i].nsec;
+
+ if (nsec_sum == 0) {
+ printf("No results.\n");
+ return;
+ }
+
+ nsec_ave = nsec_sum / num_cpu;
+ num = 0;
+
+ printf("---------------------------------------------\n");
+ printf("Per thread results (Millions of ops per sec):\n");
+ printf("---------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->thread_ctx[i].nsec) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%8.2f ", num_round / (global->thread_ctx[i].nsec / 1000.0));
+ num++;
+ }
+ }
+ printf("\n\n");
+
+ printf("Average results over %i threads:\n", num_cpu);
+ printf("---------------------------------------\n");
+ printf(" duration: %8.2f sec\n", nsec_ave / ODP_TIME_SEC_IN_NS);
+ printf(" operations per cpu: %8.2fM ops/sec\n", num_round / (nsec_ave / 1000.0));
+ printf(" total operations: %8.2fM ops/sec\n",
+ (num_cpu * num_round) / (nsec_ave / 1000.0));
+ printf("\n\n");
+}
+
+/**
+ * Test functions
+ */
+static test_case_t test_suite[] = {
+ TEST_INFO("odp_atomic_load_u32", test_atomic_load_u32,
+ validate_atomic_init_val_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_store_u32", test_atomic_store_u32,
+ validate_atomic_num_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_fetch_add_u32", test_atomic_fetch_add_u32,
+ validate_atomic_add_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_add_u32", test_atomic_add_u32,
+ validate_atomic_add_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_fetch_sub_u32", test_atomic_fetch_sub_u32,
+ validate_atomic_sub_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_sub_u32", test_atomic_sub_u32,
+ validate_atomic_sub_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_fetch_inc_u32", test_atomic_fetch_inc_u32,
+ validate_atomic_add_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_inc_u32", test_atomic_inc_u32,
+ validate_atomic_add_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_fetch_dec_u32", test_atomic_fetch_dec_u32,
+ validate_atomic_sub_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_dec_u32", test_atomic_dec_u32,
+ validate_atomic_sub_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_max_u32", test_atomic_max_u32,
+ validate_atomic_max_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_min_u32", test_atomic_min_u32,
+ validate_atomic_min_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_cas_u32", test_atomic_cas_u32,
+ validate_atomic_cas_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_xchg_u32", test_atomic_xchg_u32,
+ validate_atomic_num_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_load_acq_u32", test_atomic_load_acq_u32,
+ validate_atomic_init_val_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_store_rel_u32", test_atomic_store_rel_u32,
+ validate_atomic_num_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_add_rel_u32", test_atomic_add_rel_u32,
+ validate_atomic_add_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_sub_rel_u32", test_atomic_sub_rel_u32,
+ validate_atomic_sub_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_cas_acq_u32", test_atomic_cas_acq_u32,
+ validate_atomic_cas_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_cas_rel_u32", test_atomic_cas_rel_u32,
+ validate_atomic_cas_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_cas_acq_rel_u32", test_atomic_cas_acq_rel_u32,
+ validate_atomic_cas_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_load_u64", test_atomic_load_u64,
+ validate_atomic_init_val_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_store_u64", test_atomic_store_u64,
+ validate_atomic_num_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_fetch_add_u64", test_atomic_fetch_add_u64,
+ validate_atomic_add_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_add_u64", test_atomic_add_u64,
+ validate_atomic_add_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_fetch_sub_u64", test_atomic_fetch_sub_u64,
+ validate_atomic_sub_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_sub_u64", test_atomic_sub_u64,
+ validate_atomic_sub_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_fetch_inc_u64", test_atomic_fetch_inc_u64,
+ validate_atomic_add_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_inc_u64", test_atomic_inc_u64,
+ validate_atomic_add_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_fetch_dec_u64", test_atomic_fetch_dec_u64,
+ validate_atomic_sub_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_dec_u64", test_atomic_dec_u64,
+ validate_atomic_sub_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_max_u64", test_atomic_max_u64,
+ validate_atomic_max_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_min_u64", test_atomic_min_u64,
+ validate_atomic_min_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_cas_u64", test_atomic_cas_u64,
+ validate_atomic_cas_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_xchg_u64", test_atomic_xchg_u64,
+ validate_atomic_num_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_load_acq_u64", test_atomic_load_acq_u64,
+ validate_atomic_init_val_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_store_rel_u64", test_atomic_store_rel_u64,
+ validate_atomic_num_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_add_rel_u64", test_atomic_add_rel_u64,
+ validate_atomic_add_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_sub_rel_u64", test_atomic_sub_rel_u64,
+ validate_atomic_sub_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_cas_acq_u64", test_atomic_cas_acq_u64,
+ validate_atomic_cas_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_cas_rel_u64", test_atomic_cas_rel_u64,
+ validate_atomic_cas_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_cas_acq_rel_u64", test_atomic_cas_acq_rel_u64,
+ validate_atomic_cas_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_load_u128", test_atomic_load_u128,
+ validate_atomic_init_val_u128, OP_128BIT),
+ TEST_INFO("odp_atomic_store_u128", test_atomic_store_u128,
+ validate_atomic_num_round_u128, OP_128BIT),
+ TEST_INFO("odp_atomic_cas_u128", test_atomic_cas_u128,
+ validate_atomic_cas_u128, OP_128BIT),
+ TEST_INFO("odp_atomic_cas_acq_u128", test_atomic_cas_acq_u128,
+ validate_atomic_cas_u128, OP_128BIT),
+ TEST_INFO("odp_atomic_cas_rel_u128", test_atomic_cas_rel_u128,
+ validate_atomic_cas_u128, OP_128BIT),
+ TEST_INFO("odp_atomic_cas_acq_rel_u128", test_atomic_cas_acq_rel_u128,
+ validate_atomic_cas_u128, OP_128BIT),
+};
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t helper_options;
+ odp_instance_t instance;
+ odp_init_t init;
+ odp_shm_t shm;
+ test_options_t test_options;
+ int num_tests, i;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (parse_options(argc, argv, &test_options))
+ exit(EXIT_FAILURE);
+
+ /* List features not to be used */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.schedule = 1;
+ init.not_used.feat.stash = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ init.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("Global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Reserve memory for global data from shared mem */
+ shm = odp_shm_reserve("test_global", sizeof(test_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Shared memory reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ test_global = odp_shm_addr(shm);
+ if (test_global == NULL) {
+ ODPH_ERR("Shared memory alloc failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ memset(test_global, 0, sizeof(test_global_t));
+ test_global->test_options = test_options;
+
+ odp_sys_info_print();
+
+ if (set_num_cpu(test_global))
+ exit(EXIT_FAILURE);
+
+ print_info(&test_global->test_options);
+
+ /* Loop all test cases */
+ num_tests = ODPH_ARRAY_SIZE(test_suite);
+
+ for (i = 0; i < num_tests; i++) {
+ /* Initialize test variables */
+ if (init_test(test_global, test_suite[i].name, test_suite[i].type)) {
+ ODPH_ERR("Failed to initialize atomics.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Start workers */
+ if (start_workers(test_global, instance, test_suite[i].test_fn, test_suite[i].type))
+ exit(EXIT_FAILURE);
+
+ /* Wait workers to exit */
+ odph_thread_join(test_global->thread_tbl, test_global->test_options.num_cpu);
+
+ print_stat(test_global);
+
+ /* Validate test results */
+ if (validate_results(test_global, test_suite[i].validate_fn, test_suite[i].type)) {
+ ODPH_ERR("Test %s result validation failed.\n", test_suite[i].name);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Shm free failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Local terminate failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Global terminate failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
diff --git a/test/performance/odp_bench_buffer.c b/test/performance/odp_bench_buffer.c
new file mode 100644
index 000000000..ce14ec8b3
--- /dev/null
+++ b/test/performance/odp_bench_buffer.c
@@ -0,0 +1,896 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2022-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_bench_buffer.c
+ *
+ * Microbenchmark application for buffer API functions
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#include "bench_common.h"
+
+#include <getopt.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+/** Default buffer size */
+#define TEST_BUF_SIZE 1024
+
+/** Default pool user area size in bytes */
+#define TEST_UAREA_SIZE 8
+
+/** Number of API function calls per test case */
+#define TEST_REPEAT_COUNT 1000
+
+/** Default number of rounds per test case */
+#define TEST_ROUNDS 100u
+
+/** Maximum burst size for *_multi operations */
+#define TEST_MAX_BURST 64
+
+/** Default burst size for *_multi operations */
+#define TEST_DEF_BURST 8
+
+/** Get rid of path in filename - only for unix-type paths using '/' */
+#define NO_PATH(file_name) (strrchr((file_name), '/') ? \
+ strrchr((file_name), '/') + 1 : (file_name))
+
+#define BENCH_INFO(run_fn, init_fn, term_fn, alt_name) \
+ {.name = #run_fn, .run = run_fn, .init = init_fn, .term = term_fn, .desc = alt_name}
+
+#define BENCH_INFO_COND(run_fn, init_fn, term_fn, alt_name, cond_fn) \
+ {.name = #run_fn, .run = run_fn, .init = init_fn, .term = term_fn, .desc = alt_name, \
+ .cond = cond_fn}
+
+/**
+ * Parsed command line arguments
+ */
+typedef struct {
+ int bench_idx; /** Benchmark index to run indefinitely */
+ int burst_size; /** Burst size for *_multi operations */
+ int cache_size; /** Pool cache size */
+ int time; /** Measure time vs. CPU cycles */
+ uint32_t rounds; /** Rounds per test case */
+} appl_args_t;
+
+/**
+ * Grouping of all global data
+ */
+typedef struct {
+ /** Application (parsed) arguments */
+ appl_args_t appl;
+ /** Common benchmark suite data */
+ bench_suite_t suite;
+ /** Buffer pool */
+ odp_pool_t pool;
+ /** Buffer size */
+ uint32_t buf_size;
+ /** Buffer user area size */
+ uint32_t uarea_size;
+ /** Max flow id */
+ uint32_t max_flow_id;
+ /** Array for storing test buffers */
+ odp_buffer_t buf_tbl[TEST_REPEAT_COUNT * TEST_MAX_BURST];
+ /** Array for storing test event */
+ odp_event_t event_tbl[TEST_REPEAT_COUNT * TEST_MAX_BURST];
+ /** Array for storing test pointers */
+ void *ptr_tbl[TEST_REPEAT_COUNT];
+ /** Array for storing test pool handles */
+ odp_pool_t pool_tbl[TEST_REPEAT_COUNT];
+ /** Array for storing test event types */
+ odp_event_type_t event_type_tbl[TEST_REPEAT_COUNT * TEST_MAX_BURST];
+ /** Array for storing test event subtypes */
+ odp_event_subtype_t event_subtype_tbl[TEST_REPEAT_COUNT * TEST_MAX_BURST];
+ /** CPU mask as string */
+ char cpumask_str[ODP_CPUMASK_STR_SIZE];
+} args_t;
+
+/** Global pointer to args */
+static args_t *gbl_args;
+
+static void sig_handler(int signo ODP_UNUSED)
+{
+ if (gbl_args == NULL)
+ return;
+ odp_atomic_store_u32(&gbl_args->suite.exit_worker, 1);
+}
+
+static void allocate_test_buffers(odp_buffer_t buf[], int num)
+{
+ int num_buf = 0;
+
+ while (num_buf < num) {
+ int ret;
+
+ ret = odp_buffer_alloc_multi(gbl_args->pool, &buf[num_buf], num - num_buf);
+ if (ret < 0)
+ ODPH_ABORT("Allocating test buffers failed\n");
+
+ num_buf += ret;
+ }
+}
+
+static void create_buffers(void)
+{
+ allocate_test_buffers(gbl_args->buf_tbl, TEST_REPEAT_COUNT);
+}
+
+static void create_buffers_multi(void)
+{
+ allocate_test_buffers(gbl_args->buf_tbl, TEST_REPEAT_COUNT * gbl_args->appl.burst_size);
+}
+
+static void create_events(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+
+ allocate_test_buffers(gbl_args->buf_tbl, TEST_REPEAT_COUNT);
+
+ for (int i = 0; i < TEST_REPEAT_COUNT; i++)
+ gbl_args->event_tbl[i] = odp_buffer_to_event(buf_tbl[i]);
+}
+
+static void create_events_multi(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+
+ allocate_test_buffers(gbl_args->buf_tbl,
+ TEST_REPEAT_COUNT * gbl_args->appl.burst_size);
+
+ for (int i = 0; i < TEST_REPEAT_COUNT * gbl_args->appl.burst_size; i++)
+ gbl_args->event_tbl[i] = odp_buffer_to_event(buf_tbl[i]);
+}
+
+static void free_buffers(void)
+{
+ odp_buffer_free_multi(gbl_args->buf_tbl, TEST_REPEAT_COUNT);
+}
+
+static void free_buffers_multi(void)
+{
+ odp_buffer_free_multi(gbl_args->buf_tbl, TEST_REPEAT_COUNT * gbl_args->appl.burst_size);
+}
+
+static int check_uarea(void)
+{
+ return !!gbl_args->uarea_size;
+}
+
+static int check_flow_aware(void)
+{
+ return !!gbl_args->max_flow_id;
+}
+
+static int buffer_from_event(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ buf_tbl[i] = odp_buffer_from_event(event_tbl[i]);
+
+ return i;
+}
+
+static int buffer_from_event_multi(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ int burst_size = gbl_args->appl.burst_size;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ odp_buffer_from_event_multi(&buf_tbl[i * burst_size],
+ &event_tbl[i * burst_size], burst_size);
+
+ return i;
+}
+
+static int buffer_to_event(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ event_tbl[i] = odp_buffer_to_event(buf_tbl[i]);
+
+ return i;
+}
+
+static int buffer_to_event_multi(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ int burst_size = gbl_args->appl.burst_size;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ odp_buffer_to_event_multi(&buf_tbl[i * burst_size],
+ &event_tbl[i * burst_size], burst_size);
+
+ return i;
+}
+
+static int buffer_addr(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+ void **ptr_tbl = gbl_args->ptr_tbl;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ ptr_tbl[i] = odp_buffer_addr(buf_tbl[i]);
+
+ return i;
+}
+
+static int buffer_size(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+ uint32_t ret = 0;
+
+ for (int i = 0; i < TEST_REPEAT_COUNT; i++)
+ ret += odp_buffer_size(buf_tbl[i]);
+
+ return ret;
+}
+
+static int buffer_user_area(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+ void **ptr_tbl = gbl_args->ptr_tbl;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ ptr_tbl[i] = odp_buffer_user_area(buf_tbl[i]);
+
+ return i;
+}
+
+static int buffer_pool(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+ odp_pool_t *pool_tbl = gbl_args->pool_tbl;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ pool_tbl[i] = odp_buffer_pool(buf_tbl[i]);
+
+ return i;
+}
+
+static int buffer_alloc(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+ odp_pool_t pool = gbl_args->pool;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ buf_tbl[i] = odp_buffer_alloc(pool);
+
+ return i;
+}
+
+static int buffer_alloc_multi(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+ odp_pool_t pool = gbl_args->pool;
+ int burst_size = gbl_args->appl.burst_size;
+ int num = 0;
+
+ for (int i = 0; i < TEST_REPEAT_COUNT; i++)
+ num += odp_buffer_alloc_multi(pool, &buf_tbl[num], burst_size);
+
+ return num;
+}
+
+static int buffer_free(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ odp_buffer_free(buf_tbl[i]);
+
+ return i;
+}
+
+static int buffer_free_multi(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+ int burst_size = gbl_args->appl.burst_size;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ odp_buffer_free_multi(&buf_tbl[i * burst_size], burst_size);
+
+ return i;
+}
+
+static int buffer_alloc_free(void)
+{
+ odp_pool_t pool = gbl_args->pool;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++) {
+ odp_buffer_t buf = odp_buffer_alloc(pool);
+
+ if (odp_unlikely(buf == ODP_BUFFER_INVALID))
+ return 0;
+
+ odp_buffer_free(buf);
+ }
+ return i;
+}
+
+static int buffer_alloc_free_multi(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+ odp_pool_t pool = gbl_args->pool;
+ int burst_size = gbl_args->appl.burst_size;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++) {
+ int num = odp_buffer_alloc_multi(pool, buf_tbl, burst_size);
+
+ if (odp_unlikely(num < 1))
+ return 0;
+
+ odp_buffer_free_multi(buf_tbl, num);
+ }
+ return i;
+}
+
+static int buffer_is_valid(void)
+{
+ odp_buffer_t *buf_tbl = gbl_args->buf_tbl;
+ uint32_t ret = 0;
+
+ for (int i = 0; i < TEST_REPEAT_COUNT; i++)
+ ret += odp_buffer_is_valid(buf_tbl[i]);
+
+ return ret;
+}
+
+static int event_type(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ odp_event_type_t *event_type_tbl = gbl_args->event_type_tbl;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ event_type_tbl[i] = odp_event_type(event_tbl[i]);
+
+ return i;
+}
+
+static int event_subtype(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ odp_event_subtype_t *event_subtype_tbl = gbl_args->event_subtype_tbl;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ event_subtype_tbl[i] = odp_event_subtype(event_tbl[i]);
+
+ return i;
+}
+
+static int event_types(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ odp_event_type_t *event_type_tbl = gbl_args->event_type_tbl;
+ odp_event_subtype_t *event_subtype_tbl = gbl_args->event_subtype_tbl;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ event_type_tbl[i] = odp_event_types(event_tbl[i], &event_subtype_tbl[i]);
+
+ return i;
+}
+
+static int event_types_multi(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ odp_event_type_t *event_type_tbl = gbl_args->event_type_tbl;
+ odp_event_subtype_t *event_subtype_tbl = gbl_args->event_subtype_tbl;
+ int burst_size = gbl_args->appl.burst_size;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ odp_event_types_multi(&event_tbl[i * burst_size],
+ &event_type_tbl[i * burst_size],
+ &event_subtype_tbl[i * burst_size], burst_size);
+
+ return i;
+}
+
+static int event_types_multi_no_sub(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ odp_event_type_t *event_type_tbl = gbl_args->event_type_tbl;
+ int burst_size = gbl_args->appl.burst_size;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ odp_event_types_multi(&event_tbl[i * burst_size],
+ &event_type_tbl[i * burst_size], NULL, burst_size);
+
+ return i;
+}
+
+static int event_type_multi(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ odp_event_type_t *event_type_tbl = gbl_args->event_type_tbl;
+ int burst_size = gbl_args->appl.burst_size;
+ uint32_t ret = 0;
+
+ for (int i = 0; i < TEST_REPEAT_COUNT; i++)
+ ret += odp_event_type_multi(&event_tbl[i * burst_size], burst_size,
+ &event_type_tbl[i]);
+
+ return ret;
+}
+
+static int event_pool(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ odp_pool_t *pool_tbl = gbl_args->pool_tbl;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ pool_tbl[i] = odp_event_pool(event_tbl[i]);
+
+ return i;
+}
+
+static int event_user_area(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ void **ptr_tbl = gbl_args->ptr_tbl;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ ptr_tbl[i] = odp_event_user_area(event_tbl[i]);
+
+ return i;
+}
+
+static int event_user_area_and_flag(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ void **ptr_tbl = gbl_args->ptr_tbl;
+ int ret = 0;
+ int flag;
+
+ for (int i = 0; i < TEST_REPEAT_COUNT; i++) {
+ ptr_tbl[i] = odp_event_user_area_and_flag(event_tbl[i], &flag);
+ ret += flag;
+ }
+
+ return ret;
+}
+
+static int event_is_valid(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+
+ uint32_t ret = 0;
+
+ for (int i = 0; i < TEST_REPEAT_COUNT; i++)
+ ret += odp_event_is_valid(event_tbl[i]);
+
+ return ret;
+}
+
+static int event_free(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ odp_event_free(event_tbl[i]);
+
+ return i;
+}
+
+static int event_free_multi(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ int burst_size = gbl_args->appl.burst_size;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ odp_event_free_multi(&event_tbl[i * burst_size], burst_size);
+
+ return i;
+}
+
+static int event_free_sp(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ int burst_size = gbl_args->appl.burst_size;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ odp_event_free_sp(&event_tbl[i * burst_size], burst_size);
+
+ return i;
+}
+
+static int event_flow_id(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ uint32_t ret = 0;
+
+ for (int i = 0; i < TEST_REPEAT_COUNT; i++)
+ ret += odp_event_flow_id(event_tbl[i]);
+
+ return !ret;
+}
+
+static int event_flow_id_set(void)
+{
+ odp_event_t *event_tbl = gbl_args->event_tbl;
+ int i = 0;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ odp_event_flow_id_set(event_tbl[i], 0);
+
+ return i;
+}
+
+/**
+ * Print usage information
+ */
+static void usage(char *progname)
+{
+ printf("\n"
+ "OpenDataPlane Buffer/Event API microbenchmarks.\n"
+ "\n"
+ "Usage: %s OPTIONS\n"
+ " E.g. %s\n"
+ "\n"
+ "Optional OPTIONS:\n"
+ " -b, --burst <num> Test burst size.\n"
+ " -c, --cache_size <num> Pool cache size.\n"
+ " -i, --index <idx> Benchmark index to run indefinitely.\n"
+ " -r, --rounds <num> Run each test case 'num' times (default %u).\n"
+ " -t, --time <opt> Time measurement. 0: measure CPU cycles (default), 1: measure time\n"
+ " -h, --help Display help and exit.\n\n"
+ "\n", NO_PATH(progname), NO_PATH(progname), TEST_ROUNDS);
+}
+
+/**
+ * Parse and store the command line arguments
+ *
+ * @param argc argument count
+ * @param argv[] argument vector
+ * @param appl_args Store application arguments here
+ */
+static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
+{
+ int opt;
+ int long_index;
+ static const struct option longopts[] = {
+ {"burst", required_argument, NULL, 'b'},
+ {"cache_size", required_argument, NULL, 'c'},
+ {"index", required_argument, NULL, 'i'},
+ {"rounds", required_argument, NULL, 'r'},
+ {"time", required_argument, NULL, 't'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "c:b:i:r:t:h";
+
+ appl_args->bench_idx = 0; /* Run all benchmarks */
+ appl_args->burst_size = TEST_DEF_BURST;
+ appl_args->cache_size = -1;
+ appl_args->rounds = TEST_ROUNDS;
+ appl_args->time = 0;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break; /* No more options */
+
+ switch (opt) {
+ case 'c':
+ appl_args->cache_size = atoi(optarg);
+ break;
+ case 'b':
+ appl_args->burst_size = atoi(optarg);
+ break;
+ case 'h':
+ usage(argv[0]);
+ exit(EXIT_SUCCESS);
+ break;
+ case 'i':
+ appl_args->bench_idx = atoi(optarg);
+ break;
+ case 'r':
+ appl_args->rounds = atoi(optarg);
+ break;
+ case 't':
+ appl_args->time = atoi(optarg);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (appl_args->burst_size < 1 ||
+ appl_args->burst_size > TEST_MAX_BURST) {
+ printf("Invalid burst size (max %d)\n", TEST_MAX_BURST);
+ exit(EXIT_FAILURE);
+ }
+
+ if (appl_args->rounds < 1) {
+ printf("Invalid number test rounds: %d\n", appl_args->rounds);
+ exit(EXIT_FAILURE);
+ }
+
+ optind = 1; /* Reset 'extern optind' from the getopt lib */
+}
+
+/**
+ * Print system and application info
+ */
+static void print_info(void)
+{
+ odp_sys_info_print();
+
+ printf("\n"
+ "odp_bench_buffer options\n"
+ "------------------------\n");
+
+ printf("Burst size: %d\n", gbl_args->appl.burst_size);
+ printf("Buffer size: %d\n", gbl_args->buf_size);
+ printf("CPU mask: %s\n", gbl_args->cpumask_str);
+ if (gbl_args->appl.cache_size < 0)
+ printf("Pool cache size: default\n");
+ else
+ printf("Pool cache size: %d\n", gbl_args->appl.cache_size);
+ printf("Measurement unit: %s\n", gbl_args->appl.time ? "nsec" : "CPU cycles");
+ printf("Test rounds: %u\n", gbl_args->appl.rounds);
+ printf("\n");
+}
+
+/**
+ * Test functions
+ */
+bench_info_t test_suite[] = {
+ BENCH_INFO(buffer_from_event, create_events, free_buffers, NULL),
+ BENCH_INFO(buffer_from_event_multi, create_events_multi, free_buffers_multi, NULL),
+ BENCH_INFO(buffer_to_event, create_buffers, free_buffers, NULL),
+ BENCH_INFO(buffer_to_event_multi, create_buffers_multi, free_buffers_multi, NULL),
+ BENCH_INFO(buffer_addr, create_buffers, free_buffers, NULL),
+ BENCH_INFO(buffer_size, create_buffers, free_buffers, NULL),
+ BENCH_INFO_COND(buffer_user_area, create_buffers, free_buffers, NULL, check_uarea),
+ BENCH_INFO(buffer_pool, create_buffers, free_buffers, NULL),
+ BENCH_INFO(buffer_alloc, NULL, free_buffers, NULL),
+ BENCH_INFO(buffer_alloc_multi, NULL, free_buffers_multi, NULL),
+ BENCH_INFO(buffer_free, create_buffers, NULL, NULL),
+ BENCH_INFO(buffer_free_multi, create_buffers_multi, NULL, NULL),
+ BENCH_INFO(buffer_alloc_free, NULL, NULL, NULL),
+ BENCH_INFO(buffer_alloc_free_multi, NULL, NULL, NULL),
+ BENCH_INFO(buffer_is_valid, create_buffers, free_buffers, NULL),
+ BENCH_INFO(event_type, create_events, free_buffers, NULL),
+ BENCH_INFO(event_subtype, create_events, free_buffers, NULL),
+ BENCH_INFO(event_types, create_events, free_buffers, NULL),
+ BENCH_INFO(event_types_multi, create_events_multi, free_buffers_multi, NULL),
+ BENCH_INFO(event_types_multi_no_sub, create_events_multi, free_buffers_multi,
+ "event_types_multi (no sub)"),
+ BENCH_INFO(event_type_multi, create_events_multi, free_buffers_multi, NULL),
+ BENCH_INFO(event_pool, create_events, free_buffers, NULL),
+ BENCH_INFO_COND(event_user_area, create_events, free_buffers, NULL, check_uarea),
+ BENCH_INFO_COND(event_user_area_and_flag, create_events, free_buffers, NULL, check_uarea),
+ BENCH_INFO(event_is_valid, create_events, free_buffers, NULL),
+ BENCH_INFO(event_free, create_events, NULL, NULL),
+ BENCH_INFO(event_free_multi, create_events_multi, NULL, NULL),
+ BENCH_INFO(event_free_sp, create_events_multi, NULL, NULL),
+ BENCH_INFO_COND(event_flow_id, create_events, free_buffers, NULL, check_flow_aware),
+ BENCH_INFO_COND(event_flow_id_set, create_events, free_buffers, NULL, check_flow_aware),
+};
+
+/**
+ * ODP buffer microbenchmark application
+ */
+int main(int argc, char *argv[])
+{
+ odph_helper_options_t helper_options;
+ odph_thread_t worker_thread;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
+ int cpu;
+ odp_shm_t shm;
+ odp_cpumask_t cpumask, default_mask;
+ odp_schedule_capability_t sched_capa;
+ odp_pool_capability_t capa;
+ odp_pool_param_t params;
+ odp_instance_t instance;
+ odp_init_t init_param;
+ uint32_t buf_num;
+ uint8_t ret;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: reading ODP helper options failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init_param, NULL)) {
+ ODPH_ERR("Error: ODP global init failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Error: ODP local init failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Reserve memory for args from shared mem */
+ shm = odp_shm_reserve("shm_args", sizeof(args_t), ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Error: shared mem reserve failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ gbl_args = odp_shm_addr(shm);
+ if (gbl_args == NULL) {
+ ODPH_ERR("Error: shared mem alloc failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(gbl_args, 0, sizeof(args_t));
+
+ /* Parse and store the application arguments */
+ parse_args(argc, argv, &gbl_args->appl);
+
+ bench_suite_init(&gbl_args->suite);
+ gbl_args->suite.bench = test_suite;
+ gbl_args->suite.num_bench = ODPH_ARRAY_SIZE(test_suite);
+ gbl_args->suite.indef_idx = gbl_args->appl.bench_idx;
+ gbl_args->suite.rounds = gbl_args->appl.rounds;
+ gbl_args->suite.repeat_count = TEST_REPEAT_COUNT;
+ gbl_args->suite.measure_time = !!gbl_args->appl.time;
+
+ /* Get default worker cpumask */
+ if (odp_cpumask_default_worker(&default_mask, 1) != 1) {
+ ODPH_ERR("Error: unable to allocate worker thread\n");
+ exit(EXIT_FAILURE);
+ }
+ (void)odp_cpumask_to_str(&default_mask, gbl_args->cpumask_str,
+ sizeof(gbl_args->cpumask_str));
+
+ if (odp_schedule_capability(&sched_capa)) {
+ ODPH_ERR("Error: schedule capability failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ gbl_args->max_flow_id = 0;
+ if (sched_capa.max_flow_id) {
+ odp_schedule_config_t sched_config;
+
+ odp_schedule_config_init(&sched_config);
+ sched_config.max_flow_id = 1;
+
+ if (odp_schedule_config(&sched_config)) {
+ ODPH_ERR("Error: schedule config failed\n");
+ exit(EXIT_FAILURE);
+ }
+ gbl_args->max_flow_id = 1;
+ }
+
+ if (odp_pool_capability(&capa)) {
+ ODPH_ERR("Error: unable to query pool capability\n");
+ exit(EXIT_FAILURE);
+ }
+
+ buf_num = gbl_args->appl.burst_size * TEST_REPEAT_COUNT;
+
+ if (capa.buf.max_num && capa.buf.max_num < buf_num) {
+ ODPH_ERR("Error: pool size not supported (max %" PRIu32 ")\n", capa.buf.max_num);
+ exit(EXIT_FAILURE);
+ } else if (gbl_args->appl.cache_size > (int)capa.buf.max_cache_size) {
+ ODPH_ERR("Error: cache size not supported (max %" PRIu32 ")\n",
+ capa.buf.max_cache_size);
+ exit(EXIT_FAILURE);
+ }
+
+ gbl_args->buf_size = TEST_BUF_SIZE;
+ if (capa.buf.max_size && capa.buf.max_size < TEST_BUF_SIZE)
+ gbl_args->buf_size = capa.buf.max_size;
+
+ gbl_args->uarea_size = TEST_UAREA_SIZE < capa.buf.max_uarea_size ?
+ TEST_UAREA_SIZE : capa.buf.max_uarea_size;
+
+ print_info();
+
+ /* Create buffer pool */
+ odp_pool_param_init(&params);
+ params.buf.size = gbl_args->buf_size;
+ params.buf.num = buf_num;
+ params.buf.uarea_size = gbl_args->uarea_size;
+ if (gbl_args->appl.cache_size >= 0)
+ params.buf.cache_size = gbl_args->appl.cache_size;
+ params.type = ODP_POOL_BUFFER;
+
+ gbl_args->pool = odp_pool_create("microbench", &params);
+ if (gbl_args->pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error: pool create failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_pool_print(gbl_args->pool);
+
+ memset(&worker_thread, 0, sizeof(odph_thread_t));
+
+ signal(SIGINT, sig_handler);
+
+ /* Create worker thread */
+ cpu = odp_cpumask_first(&default_mask);
+
+ odp_cpumask_zero(&cpumask);
+ odp_cpumask_set(&cpumask, cpu);
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = bench_run;
+ thr_param.arg = &gbl_args->suite;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_create(&worker_thread, &thr_common, &thr_param, 1);
+
+ odph_thread_join(&worker_thread, 1);
+
+ ret = gbl_args->suite.retval;
+
+ if (odp_pool_destroy(gbl_args->pool)) {
+ ODPH_ERR("Error: pool destroy\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Error: shm free\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Error: term local\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Error: term global\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return ret;
+}
diff --git a/test/performance/odp_bench_misc.c b/test/performance/odp_bench_misc.c
new file mode 100644
index 000000000..61afdc398
--- /dev/null
+++ b/test/performance/odp_bench_misc.c
@@ -0,0 +1,1063 @@
+/* Copyright (c) 2022-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_bench_misc.c
+ *
+ * Microbenchmark application for miscellaneous API functions
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE /* Needed for sigaction */
+#endif
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#include "bench_common.h"
+
+#include <getopt.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+/* Number of API function calls per test case */
+#define REPEAT_COUNT 1024
+
+/* Default number of rounds per test case */
+#define ROUNDS 1000u
+
+#define BENCH_INFO(run_fn, init_fn, max, alt_name) \
+ {.name = #run_fn, .run = run_fn, .init = init_fn, .max_rounds = max, .desc = alt_name}
+
+typedef struct {
+ /* Measure time vs CPU cycles */
+ int time;
+
+ /* Benchmark index to run indefinitely */
+ int bench_idx;
+
+ /* Rounds per test case */
+ uint32_t rounds;
+
+} appl_args_t;
+
+/* Global data */
+typedef struct {
+ appl_args_t appl;
+
+ /* Common benchmark suite data */
+ bench_suite_t suite;
+
+ /* Time stamp 1 */
+ odp_time_t t1[REPEAT_COUNT];
+ /* Time stamp 2 */
+ odp_time_t t2[REPEAT_COUNT];
+ /* Resulting time stamp */
+ odp_time_t t3[REPEAT_COUNT];
+
+ odp_time_t global_short[REPEAT_COUNT];
+ odp_time_t global_long[REPEAT_COUNT];
+
+ /* Integer input / output data */
+ uint64_t a1[REPEAT_COUNT];
+ uint64_t a2[REPEAT_COUNT];
+ uint32_t b1[REPEAT_COUNT];
+ uint32_t b2[REPEAT_COUNT];
+ uint16_t c1[REPEAT_COUNT];
+ uint16_t c2[REPEAT_COUNT];
+
+ /* CPU mask as string */
+ char cpumask_str[ODP_CPUMASK_STR_SIZE];
+
+} gbl_args_t;
+
+static gbl_args_t *gbl_args;
+
+static void sig_handler(int signo ODP_UNUSED)
+{
+ if (gbl_args == NULL)
+ return;
+ odp_atomic_store_u32(&gbl_args->suite.exit_worker, 1);
+}
+
+static int setup_sig_handler(void)
+{
+ struct sigaction action;
+
+ memset(&action, 0, sizeof(action));
+ action.sa_handler = sig_handler;
+
+ /* No additional signals blocked. By default, the signal which triggered
+ * the handler is blocked. */
+ if (sigemptyset(&action.sa_mask))
+ return -1;
+
+ if (sigaction(SIGINT, &action, NULL))
+ return -1;
+
+ return 0;
+}
+
+static void init_time_global(void)
+{
+ int i;
+ odp_time_t *t1 = gbl_args->t1;
+ odp_time_t *t2 = gbl_args->t2;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ t1[i] = odp_time_global();
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ t2[i] = odp_time_global();
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_time_global_ns();
+}
+
+static void init_time_local(void)
+{
+ int i;
+ odp_time_t *t1 = gbl_args->t1;
+ odp_time_t *t2 = gbl_args->t2;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ t1[i] = odp_time_local();
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ t2[i] = odp_time_local();
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_time_local_ns();
+}
+
+static void init_cpu_cycles(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+ uint64_t *a2 = gbl_args->a2;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_cpu_cycles();
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a2[i] = odp_cpu_cycles();
+}
+
+static int time_local(void)
+{
+ int i;
+ odp_time_t *t1 = gbl_args->t1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ t1[i] = odp_time_local();
+
+ return i;
+}
+
+static int time_local_strict(void)
+{
+ int i;
+ odp_time_t *t1 = gbl_args->t1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ t1[i] = odp_time_local_strict();
+
+ return i;
+}
+
+static int time_local_ns(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_time_local_ns();
+
+ return i;
+}
+
+static int time_local_strict_ns(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_time_local_strict_ns();
+
+ return i;
+}
+
+static int time_global(void)
+{
+ int i;
+ odp_time_t *t1 = gbl_args->t1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ t1[i] = odp_time_global();
+
+ return i;
+}
+
+static int time_global_strict(void)
+{
+ int i;
+ odp_time_t *t1 = gbl_args->t1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ t1[i] = odp_time_global_strict();
+
+ return i;
+}
+
+static int time_global_ns(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_time_global_ns();
+
+ return i;
+}
+
+static int time_global_strict_ns(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_time_global_strict_ns();
+
+ return i;
+}
+
+static int time_diff(void)
+{
+ int i;
+ odp_time_t *t1 = gbl_args->t1;
+ odp_time_t *t2 = gbl_args->t2;
+ odp_time_t *t3 = gbl_args->t3;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ t3[i] = odp_time_diff(t2[i], t1[i]);
+
+ return i;
+}
+
+static int time_diff_ns(void)
+{
+ int i;
+ odp_time_t *t1 = gbl_args->t1;
+ odp_time_t *t2 = gbl_args->t2;
+ uint64_t res = 0;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ res += odp_time_diff_ns(t2[i], t1[i]);
+
+ gbl_args->suite.dummy += res;
+
+ return i;
+}
+
+static int time_add_ns(void)
+{
+ int i;
+ odp_time_t *t1 = gbl_args->t1;
+ odp_time_t *t3 = gbl_args->t3;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ t3[i] = odp_time_add_ns(t1[i], a1[i]);
+
+ return i;
+}
+
+static int time_sum(void)
+{
+ int i;
+ odp_time_t *t1 = gbl_args->t1;
+ odp_time_t *t2 = gbl_args->t2;
+ odp_time_t *t3 = gbl_args->t3;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ t3[i] = odp_time_sum(t1[i], t2[i]);
+
+ return i;
+}
+
+static int time_to_ns_short(void)
+{
+ int i;
+ odp_time_t *t = gbl_args->global_short;
+ uint64_t res = 0;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ res += odp_time_to_ns(t[i]);
+
+ gbl_args->suite.dummy += res;
+
+ return i;
+}
+
+static int time_to_ns_long(void)
+{
+ int i;
+ odp_time_t *t = gbl_args->global_long;
+ uint64_t res = 0;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ res += odp_time_to_ns(t[i]);
+
+ gbl_args->suite.dummy += res;
+
+ return i;
+}
+
+static int time_local_from_ns(void)
+{
+ int i;
+ odp_time_t *t1 = gbl_args->t1;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ t1[i] = odp_time_local_from_ns(a1[i]);
+
+ return i;
+}
+
+static int time_global_from_ns(void)
+{
+ int i;
+ odp_time_t *t1 = gbl_args->t1;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ t1[i] = odp_time_global_from_ns(a1[i]);
+
+ return i;
+}
+
+static int time_cmp(void)
+{
+ int i;
+ odp_time_t *t1 = gbl_args->t1;
+ odp_time_t *t2 = gbl_args->t2;
+ int res = 0;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ res += odp_time_cmp(t1[i], t2[i]);
+
+ gbl_args->suite.dummy += res;
+
+ return i;
+}
+
+static int time_local_res(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_time_local_res();
+
+ return i;
+}
+
+static int time_global_res(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_time_global_res();
+
+ return i;
+}
+
+static int time_startup(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+ odp_time_startup_t startup;
+
+ for (i = 0; i < REPEAT_COUNT; i++) {
+ odp_time_startup(&startup);
+ a1[i] = startup.global_ns;
+ }
+
+ return i;
+}
+
+static int cpu_id(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_cpu_id();
+
+ return i;
+}
+
+static int cpu_count(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_cpu_count();
+
+ return i;
+}
+
+static int cpu_hz(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_cpu_hz();
+
+ return i;
+}
+
+static int cpu_hz_id(void)
+{
+ int i;
+ const int id = odp_cpu_id();
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_cpu_hz_id(id);
+
+ return i;
+}
+
+static int cpu_hz_max(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_cpu_hz_max();
+
+ return i;
+}
+
+static int cpu_hz_max_id(void)
+{
+ int i;
+ const int id = odp_cpu_id();
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_cpu_hz_max_id(id);
+
+ return i;
+}
+
+static int cpu_cycles(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_cpu_cycles();
+
+ return i;
+}
+
+static int cpu_cycles_diff(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+ uint64_t *a2 = gbl_args->a2;
+ uint64_t res = 0;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ res += odp_cpu_cycles_diff(a2[i], a1[i]);
+
+ gbl_args->suite.dummy += res;
+
+ return i;
+}
+
+static int cpu_cycles_max(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_cpu_cycles_max();
+
+ return i;
+}
+
+static int cpu_cycles_resolution(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_cpu_cycles_resolution();
+
+ return i;
+}
+
+static int cpu_pause(void)
+{
+ int i;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ odp_cpu_pause();
+
+ return i;
+}
+
+static int thread_id(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_thread_id();
+
+ return i;
+}
+
+static int thread_count(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_thread_count();
+
+ return i;
+}
+
+static int thread_count_max(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_thread_count_max();
+
+ return i;
+}
+
+static int thread_type(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = (int)odp_thread_type();
+
+ return i;
+}
+
+static int be_to_cpu_64(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+ uint64_t *a2 = gbl_args->a2;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a2[i] = odp_be_to_cpu_64(a1[i]);
+
+ return i;
+}
+
+static int be_to_cpu_32(void)
+{
+ int i;
+ uint32_t *b1 = gbl_args->b1;
+ uint32_t *b2 = gbl_args->b2;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ b2[i] = odp_be_to_cpu_32(b1[i]);
+
+ return i;
+}
+
+static int be_to_cpu_16(void)
+{
+ int i;
+ uint16_t *c1 = gbl_args->c1;
+ uint16_t *c2 = gbl_args->c2;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ c2[i] = odp_be_to_cpu_16(c1[i]);
+
+ return i;
+}
+
+static int cpu_to_be_64(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+ uint64_t *a2 = gbl_args->a2;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a2[i] = odp_cpu_to_be_64(a1[i]);
+
+ return i;
+}
+
+static int cpu_to_be_32(void)
+{
+ int i;
+ uint32_t *b1 = gbl_args->b1;
+ uint32_t *b2 = gbl_args->b2;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ b2[i] = odp_cpu_to_be_32(b1[i]);
+
+ return i;
+}
+
+static int cpu_to_be_16(void)
+{
+ int i;
+ uint16_t *c1 = gbl_args->c1;
+ uint16_t *c2 = gbl_args->c2;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ c2[i] = odp_cpu_to_be_16(c1[i]);
+
+ return i;
+}
+
+static int le_to_cpu_64(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+ uint64_t *a2 = gbl_args->a2;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a2[i] = odp_le_to_cpu_64(a1[i]);
+
+ return i;
+}
+
+static int le_to_cpu_32(void)
+{
+ int i;
+ uint32_t *b1 = gbl_args->b1;
+ uint32_t *b2 = gbl_args->b2;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ b2[i] = odp_le_to_cpu_32(b1[i]);
+
+ return i;
+}
+
+static int le_to_cpu_16(void)
+{
+ int i;
+ uint16_t *c1 = gbl_args->c1;
+ uint16_t *c2 = gbl_args->c2;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ c2[i] = odp_le_to_cpu_16(c1[i]);
+
+ return i;
+}
+
+static int cpu_to_le_64(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+ uint64_t *a2 = gbl_args->a2;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a2[i] = odp_cpu_to_le_64(a1[i]);
+
+ return i;
+}
+
+static int cpu_to_le_32(void)
+{
+ int i;
+ uint32_t *b1 = gbl_args->b1;
+ uint32_t *b2 = gbl_args->b2;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ b2[i] = odp_cpu_to_le_32(b1[i]);
+
+ return i;
+}
+
+static int cpu_to_le_16(void)
+{
+ int i;
+ uint16_t *c1 = gbl_args->c1;
+ uint16_t *c2 = gbl_args->c2;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ c2[i] = odp_cpu_to_le_16(c1[i]);
+
+ return i;
+}
+
+static int mb_release(void)
+{
+ int i;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ odp_mb_release();
+
+ return i;
+}
+
+static int mb_acquire(void)
+{
+ int i;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ odp_mb_acquire();
+
+ return i;
+}
+
+static int mb_full(void)
+{
+ int i;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ odp_mb_full();
+
+ return i;
+}
+
+static int prefetch(void)
+{
+ uint64_t *a1 = gbl_args->a1;
+ uint32_t index = 0;
+ int i;
+
+ for (i = 0; i < REPEAT_COUNT; i++) {
+ odp_prefetch(&a1[index]);
+
+ /* Prefetch every 64B */
+ index += 8;
+ if (odp_unlikely(index >= REPEAT_COUNT))
+ index = 0;
+ }
+
+ return i;
+}
+
+static int prefetch_store(void)
+{
+ uint64_t *a1 = gbl_args->a1;
+ uint32_t index = 0;
+ int i;
+
+ for (i = 0; i < REPEAT_COUNT; i++) {
+ odp_prefetch_store(&a1[index]);
+
+ /* Prefetch every 64B */
+ index += 8;
+ if (odp_unlikely(index >= REPEAT_COUNT))
+ index = 0;
+ }
+
+ return i;
+}
+
+bench_info_t test_suite[] = {
+ BENCH_INFO(time_local, NULL, 0, NULL),
+ BENCH_INFO(time_local_strict, NULL, 0, NULL),
+ BENCH_INFO(time_local_ns, NULL, 0, NULL),
+ BENCH_INFO(time_local_strict_ns, NULL, 0, NULL),
+ BENCH_INFO(time_global, NULL, 0, NULL),
+ BENCH_INFO(time_global_strict, NULL, 0, NULL),
+ BENCH_INFO(time_global_ns, NULL, 0, NULL),
+ BENCH_INFO(time_global_strict_ns, NULL, 0, NULL),
+ BENCH_INFO(time_diff, init_time_global, 0, "time_diff (global)"),
+ BENCH_INFO(time_diff, init_time_local, 0, "time_diff (local)"),
+ BENCH_INFO(time_diff_ns, init_time_global, 0, NULL),
+ BENCH_INFO(time_add_ns, init_time_global, 0, NULL),
+ BENCH_INFO(time_sum, init_time_global, 0, NULL),
+ BENCH_INFO(time_to_ns_short, NULL, 0, "time_to_ns (short)"),
+ BENCH_INFO(time_to_ns_long, NULL, 0, "time_to_ns (long)"),
+ BENCH_INFO(time_local_from_ns, init_time_global, 0, NULL),
+ BENCH_INFO(time_global_from_ns, init_time_global, 0, NULL),
+ BENCH_INFO(time_cmp, init_time_global, 0, NULL),
+ BENCH_INFO(time_local_res, NULL, 0, NULL),
+ BENCH_INFO(time_global_res, NULL, 0, NULL),
+ BENCH_INFO(time_startup, NULL, 0, NULL),
+ BENCH_INFO(cpu_id, NULL, 0, NULL),
+ BENCH_INFO(cpu_count, NULL, 0, NULL),
+ BENCH_INFO(cpu_hz, NULL, 1, NULL),
+ BENCH_INFO(cpu_hz_id, NULL, 1, NULL),
+ BENCH_INFO(cpu_hz_max, NULL, 0, NULL),
+ BENCH_INFO(cpu_hz_max_id, NULL, 0, NULL),
+ BENCH_INFO(cpu_cycles, NULL, 0, NULL),
+ BENCH_INFO(cpu_cycles_diff, init_cpu_cycles, 0, NULL),
+ BENCH_INFO(cpu_cycles_max, NULL, 0, NULL),
+ BENCH_INFO(cpu_cycles_resolution, NULL, 0, NULL),
+ BENCH_INFO(cpu_pause, NULL, 0, NULL),
+ BENCH_INFO(thread_id, NULL, 0, NULL),
+ BENCH_INFO(thread_count, NULL, 0, NULL),
+ BENCH_INFO(thread_count_max, NULL, 0, NULL),
+ BENCH_INFO(thread_type, NULL, 0, NULL),
+ BENCH_INFO(be_to_cpu_64, NULL, 0, NULL),
+ BENCH_INFO(be_to_cpu_32, NULL, 0, NULL),
+ BENCH_INFO(be_to_cpu_16, NULL, 0, NULL),
+ BENCH_INFO(cpu_to_be_64, NULL, 0, NULL),
+ BENCH_INFO(cpu_to_be_32, NULL, 0, NULL),
+ BENCH_INFO(cpu_to_be_16, NULL, 0, NULL),
+ BENCH_INFO(le_to_cpu_64, NULL, 0, NULL),
+ BENCH_INFO(le_to_cpu_32, NULL, 0, NULL),
+ BENCH_INFO(le_to_cpu_16, NULL, 0, NULL),
+ BENCH_INFO(cpu_to_le_64, NULL, 0, NULL),
+ BENCH_INFO(cpu_to_le_32, NULL, 0, NULL),
+ BENCH_INFO(cpu_to_le_16, NULL, 0, NULL),
+ BENCH_INFO(mb_release, NULL, 0, NULL),
+ BENCH_INFO(mb_acquire, NULL, 0, NULL),
+ BENCH_INFO(mb_full, NULL, 0, NULL),
+ BENCH_INFO(prefetch, NULL, 0, NULL),
+ BENCH_INFO(prefetch_store, NULL, 0, NULL),
+};
+
+/* Print usage information */
+static void usage(void)
+{
+ printf("\n"
+ "ODP miscellaneous API micro benchmarks\n"
+ "\n"
+ "Options:\n"
+ " -t, --time <opt> Time measurement. 0: measure CPU cycles (default), 1: measure time\n"
+ " -i, --index <idx> Benchmark index to run indefinitely.\n"
+ " -r, --rounds <num> Run each test case 'num' times (default %u).\n"
+ " -h, --help Display help and exit.\n\n"
+ "\n", ROUNDS);
+}
+
+/* Parse command line arguments */
+static int parse_args(int argc, char *argv[])
+{
+ int opt;
+ int long_index;
+ appl_args_t *appl_args = &gbl_args->appl;
+ static const struct option longopts[] = {
+ {"time", required_argument, NULL, 't'},
+ {"index", required_argument, NULL, 'i'},
+ {"rounds", required_argument, NULL, 'r'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "t:i:r:h";
+
+ appl_args->time = 0; /* Measure CPU cycles */
+ appl_args->bench_idx = 0; /* Run all benchmarks */
+ appl_args->rounds = ROUNDS;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break; /* No more options */
+
+ switch (opt) {
+ case 't':
+ appl_args->time = atoi(optarg);
+ break;
+ case 'i':
+ appl_args->bench_idx = atoi(optarg);
+ break;
+ case 'r':
+ appl_args->rounds = atoi(optarg);
+ break;
+ case 'h':
+ usage();
+ return 1;
+ default:
+ ODPH_ERR("Bad option. Use -h for help.\n");
+ return -1;
+ }
+ }
+
+ if (appl_args->rounds < 1) {
+ ODPH_ERR("Invalid test cycle repeat count: %u\n", appl_args->rounds);
+ return -1;
+ }
+
+ if (appl_args->bench_idx < 0 || appl_args->bench_idx > (int)ODPH_ARRAY_SIZE(test_suite)) {
+ ODPH_ERR("Bad bench index %i\n", appl_args->bench_idx);
+ return -1;
+ }
+
+ optind = 1; /* Reset 'extern optind' from the getopt lib */
+
+ return 0;
+}
+
+/* Print system and application info */
+static void print_info(void)
+{
+ odp_sys_info_print();
+
+ printf("\n"
+ "odp_bench_misc options\n"
+ "----------------------\n");
+
+ printf("CPU mask: %s\n", gbl_args->cpumask_str);
+ printf("Measurement unit: %s\n", gbl_args->appl.time ? "nsec" : "CPU cycles");
+ printf("Test rounds: %u\n", gbl_args->appl.rounds);
+ printf("\n");
+}
+
+int main(int argc, char *argv[])
+{
+ odph_helper_options_t helper_options;
+ odph_thread_t worker_thread;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
+ int cpu, i;
+ odp_shm_t shm;
+ odp_cpumask_t cpumask, default_mask;
+ odp_instance_t instance;
+ odp_init_t init_param;
+ int ret = 0;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Reading ODP helper options failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init_param, NULL)) {
+ ODPH_ERR("Global init failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (setup_sig_handler()) {
+ ODPH_ERR("Signal handler setup failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Reserve memory for args from shared mem */
+ shm = odp_shm_reserve("shm_args", sizeof(gbl_args_t), ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Shared mem reserve failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ gbl_args = odp_shm_addr(shm);
+ if (gbl_args == NULL) {
+ ODPH_ERR("Shared mem alloc failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(gbl_args, 0, sizeof(gbl_args_t));
+
+ for (i = 0; i < REPEAT_COUNT; i++) {
+ gbl_args->t1[i] = ODP_TIME_NULL;
+ gbl_args->t2[i] = ODP_TIME_NULL;
+ gbl_args->t3[i] = ODP_TIME_NULL;
+ gbl_args->global_short[i] = odp_time_global_from_ns(ODP_TIME_MSEC_IN_NS);
+ gbl_args->global_long[i] = odp_time_global_from_ns(10 * ODP_TIME_SEC_IN_NS);
+ gbl_args->a1[i] = i;
+ gbl_args->a2[i] = i;
+ gbl_args->b1[i] = i;
+ gbl_args->b2[i] = i;
+ gbl_args->c1[i] = i;
+ gbl_args->c2[i] = i;
+ }
+
+ /* Parse and store the application arguments */
+ ret = parse_args(argc, argv);
+ if (ret)
+ goto exit;
+
+ bench_suite_init(&gbl_args->suite);
+ gbl_args->suite.bench = test_suite;
+ gbl_args->suite.num_bench = ODPH_ARRAY_SIZE(test_suite);
+ gbl_args->suite.measure_time = !!gbl_args->appl.time;
+ gbl_args->suite.indef_idx = gbl_args->appl.bench_idx;
+ gbl_args->suite.rounds = gbl_args->appl.rounds;
+ gbl_args->suite.repeat_count = REPEAT_COUNT;
+
+ /* Get default worker cpumask */
+ if (odp_cpumask_default_worker(&default_mask, 1) != 1) {
+ ODPH_ERR("Unable to allocate worker thread\n");
+ ret = -1;
+ goto exit;
+ }
+
+ (void)odp_cpumask_to_str(&default_mask, gbl_args->cpumask_str,
+ sizeof(gbl_args->cpumask_str));
+
+ print_info();
+
+ memset(&worker_thread, 0, sizeof(odph_thread_t));
+
+ /* Create worker thread */
+ cpu = odp_cpumask_first(&default_mask);
+
+ odp_cpumask_zero(&cpumask);
+ odp_cpumask_set(&cpumask, cpu);
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = bench_run;
+ thr_param.arg = &gbl_args->suite;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_create(&worker_thread, &thr_common, &thr_param, 1);
+
+ odph_thread_join(&worker_thread, 1);
+
+ ret = gbl_args->suite.retval;
+
+exit:
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Shared mem free failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Local term failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Global term failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (ret < 0)
+ return EXIT_FAILURE;
+
+ return EXIT_SUCCESS;
+}
diff --git a/test/common_plat/performance/odp_bench_packet.c b/test/performance/odp_bench_packet.c
index 8a1333276..cb9e3ca03 100644
--- a/test/common_plat/performance/odp_bench_packet.c
+++ b/test/performance/odp_bench_packet.c
@@ -1,13 +1,16 @@
-/* Copyright (c) 2017, Linaro Limited
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2022-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**
- * @file
+ * @example odp_bench_packet.c
*
- * @example odp_bench_packet.c Microbenchmarks for packet functions
+ * Microbenchmark application for packet API functions
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
*/
#include <stdlib.h>
@@ -17,16 +20,13 @@
#include <inttypes.h>
#include <signal.h>
-#include <test_debug.h>
+#include <test_packet_ipv4.h>
+#include <test_packet_ipv6.h>
#include <odp_api.h>
-#include <odp/helper/threads.h>
-#include <odp/helper/eth.h>
-#include <odp/helper/ip.h>
-#include <odp/helper/udp.h>
+#include <odp/helper/odph_api.h>
-/** Minimum number of packet data bytes in the first segment */
-#define PKT_POOL_SEG_LEN 128
+#include "bench_common.h"
/** Packet user area size in bytes */
#define PKT_POOL_UAREA_SIZE 8
@@ -37,11 +37,11 @@
/** Maximum test packet size */
#define TEST_MAX_PKT_SIZE 2048
-/** Number of test runs per individual benchmark */
+/** Number of API function calls per test case */
#define TEST_REPEAT_COUNT 1000
-/** Number of times to run tests for each packet size */
-#define TEST_SIZE_RUN_COUNT 10
+/** Number of rounds per test case */
+#define TEST_ROUNDS 2u
/** Maximum burst size for *_multi operations */
#define TEST_MAX_BURST 64
@@ -67,17 +67,14 @@
#define NO_PATH(file_name) (strrchr((file_name), '/') ? \
strrchr((file_name), '/') + 1 : (file_name))
-#define BENCH_INFO(run, init, term, name) \
- {#run, run, init, term, name}
+#define BENCH_INFO(run_fn, init_fn, term_fn, alt_name) \
+ {.name = #run_fn, .run = run_fn, .init = init_fn, .term = term_fn, .desc = alt_name}
ODP_STATIC_ASSERT((TEST_ALIGN_OFFSET + TEST_ALIGN_LEN) <= TEST_MIN_PKT_SIZE,
"Invalid_alignment");
-/** Warm up round packet size */
-#define WARM_UP TEST_MIN_PKT_SIZE
-
/** Test packet sizes */
-const uint32_t test_packet_len[] = {WARM_UP, TEST_MIN_PKT_SIZE, 128, 256, 512,
+const uint32_t test_packet_len[] = {TEST_MIN_PKT_SIZE, 128, 256, 512,
1024, 1518, TEST_MAX_PKT_SIZE};
/**
@@ -86,48 +83,21 @@ const uint32_t test_packet_len[] = {WARM_UP, TEST_MIN_PKT_SIZE, 128, 256, 512,
typedef struct {
int bench_idx; /** Benchmark index to run indefinitely */
int burst_size; /** Burst size for *_multi operations */
+ int cache_size; /** Pool cache size */
+ int time; /** Measure time vs. CPU cycles */
+ uint32_t rounds; /** Rounds per test case */
} appl_args_t;
/**
- * Initialize benchmark resources
- */
-typedef void (*bench_init_fn_t)(void);
-
-/**
- * Run benchmark
- *
- * @retval >0 on success
- * */
-typedef int (*bench_run_fn_t)(void);
-
-/**
- * Release benchmark resources
- */
-typedef void (*bench_term_fn_t)(void);
-
-/**
- * Benchmark data
- */
-typedef struct {
- const char *name;
- bench_run_fn_t run;
- bench_init_fn_t init;
- bench_term_fn_t term;
- const char *desc;
-} bench_info_t;
-
-/**
* Grouping of all global data
*/
typedef struct {
/** Application (parsed) arguments */
appl_args_t appl;
+ /** Common benchmark suite data */
+ bench_suite_t suite;
/** Packet pool */
odp_pool_t pool;
- /** Benchmark functions */
- bench_info_t *bench;
- /** Number of benchmark functions */
- int num_bench;
struct {
/** Test packet length */
uint32_t len;
@@ -143,7 +113,7 @@ typedef struct {
/** Array for storing test packets */
odp_packet_t pkt2_tbl[TEST_REPEAT_COUNT];
/** Array for storing test event */
- odp_event_t event_tbl[TEST_REPEAT_COUNT];
+ odp_event_t event_tbl[TEST_REPEAT_COUNT * TEST_MAX_BURST];
/** Array for storing test pointers */
void *ptr_tbl[TEST_REPEAT_COUNT];
/** Array for storing test segments */
@@ -158,48 +128,16 @@ typedef struct {
odp_time_t ts_tbl[TEST_REPEAT_COUNT];
/** Array for storing test data */
uint8_t data_tbl[TEST_REPEAT_COUNT][TEST_MAX_PKT_SIZE];
- /** Benchmark run failed */
- uint8_t bench_failed;
} args_t;
/** Global pointer to args */
static args_t *gbl_args;
-/** Global barrier to synchronize main and worker */
-static odp_barrier_t barrier;
-/** Break worker loop if set to 1 */
-static int exit_thread;
static void sig_handler(int signo ODP_UNUSED)
{
- exit_thread = 1;
-}
-
-/**
- * Run given benchmark indefinitely
- */
-static void run_indef(args_t *args, int idx)
-{
- const char *desc;
-
- desc = args->bench[idx].desc != NULL ?
- args->bench[idx].desc : args->bench[idx].name;
-
- printf("Running %s() indefinitely\n", desc);
-
- while (!exit_thread) {
- int ret;
-
- if (args->bench[idx].init != NULL)
- args->bench[idx].init();
-
- ret = args->bench[idx].run();
-
- if (args->bench[idx].term != NULL)
- args->bench[idx].term();
-
- if (!ret)
- LOG_ABORT("Benchmark %s failed\n", desc);
- }
+ if (gbl_args == NULL)
+ return;
+ odp_atomic_store_u32(&gbl_args->suite.exit_worker, 1);
}
/**
@@ -207,96 +145,38 @@ static void run_indef(args_t *args, int idx)
*/
static int run_benchmarks(void *arg)
{
- int i, j, k;
+ int i;
args_t *args = arg;
- int num_sizes = sizeof(test_packet_len) / sizeof(test_packet_len[0]);
- double results[gbl_args->num_bench][num_sizes];
+ bench_suite_t *suite = &args->suite;
+ int num_sizes = ODPH_ARRAY_SIZE(test_packet_len);
+ double results[num_sizes][suite->num_bench];
memset(results, 0, sizeof(results));
- printf("\nRunning benchmarks (cycles per call)\n"
- "------------------------------------\n");
-
for (i = 0; i < num_sizes; i++) {
- uint64_t tot_cycles = 0;
-
- printf("\nPacket length: %6d bytes\n"
- "---------------------------\n", test_packet_len[i]);
+ printf("Packet length: %6d bytes", test_packet_len[i]);
gbl_args->pkt.len = test_packet_len[i];
- for (j = 0, k = 1; j < gbl_args->num_bench; k++) {
- int ret;
- uint64_t c1, c2;
- const char *desc;
-
- if (args->appl.bench_idx &&
- (j + 1) != args->appl.bench_idx) {
- j++;
- continue;
- } else if (args->appl.bench_idx &&
- (j + 1) == args->appl.bench_idx) {
- run_indef(args, j);
- return 0;
- }
-
- desc = args->bench[j].desc != NULL ?
- args->bench[j].desc :
- args->bench[j].name;
-
- if (args->bench[j].init != NULL)
- args->bench[j].init();
-
- c1 = odp_cpu_cycles();
- ret = args->bench[j].run();
- c2 = odp_cpu_cycles();
-
- if (args->bench[j].term != NULL)
- args->bench[j].term();
-
- if (!ret) {
- LOG_ERR("Benchmark %s failed\n", desc);
- args->bench_failed = 1;
- return -1;
- }
-
- tot_cycles += odp_cpu_cycles_diff(c2, c1);
-
- if (k >= TEST_SIZE_RUN_COUNT) {
- double cycles;
-
- /** Each benchmark runs internally
- * TEST_REPEAT_COUNT times. */
- cycles = ((double)tot_cycles) /
- (TEST_SIZE_RUN_COUNT *
- TEST_REPEAT_COUNT);
- results[j][i] = cycles;
-
- printf("%-30s: %8.1f\n", desc, cycles);
-
- j++;
- k = 0;
- tot_cycles = 0;
- }
- }
- }
- printf("\n%-30s", "Benchmark / packet_size [B]");
- for (i = 0; i < num_sizes; i++) {
- if (i == 0)
- printf(" WARM UP ");
- else
- printf("%8.1d ", test_packet_len[i]);
+ suite->result = results[i];
+
+ bench_run(suite);
}
+
+ printf("\n%-35s", "Benchmark / packet_size [B]");
+ for (i = 0; i < num_sizes; i++)
+ printf("%8.1d ", test_packet_len[i]);
+
printf("\n---------------------------------");
for (i = 0; i < num_sizes; i++)
printf("----------");
- for (i = 0; i < gbl_args->num_bench; i++) {
- printf("\n[%02d] %-30s", i + 1, args->bench[i].desc != NULL ?
- args->bench[i].desc : args->bench[i].name);
+ for (i = 0; i < suite->num_bench; i++) {
+ printf("\n[%02d] odp_%-26s", i + 1, suite->bench[i].desc != NULL ?
+ suite->bench[i].desc : suite->bench[i].name);
- for (j = 0; j < num_sizes; j++)
- printf("%8.1f ", results[i][j]);
+ for (int j = 0; j < num_sizes; j++)
+ printf("%8.1f ", results[j][i]);
}
printf("\n\n");
return 0;
@@ -312,7 +192,7 @@ static void allocate_test_packets(uint32_t len, odp_packet_t pkt[], int num)
ret = odp_packet_alloc_multi(gbl_args->pool, len, &pkt[pkts],
num - pkts);
if (ret < 0)
- LOG_ABORT("Allocating test packets failed\n");
+ ODPH_ABORT("Allocating test packets failed\n");
pkts += ret;
}
@@ -338,6 +218,21 @@ static void alloc_concat_packets(void)
TEST_REPEAT_COUNT);
}
+static void alloc_ref_packets(void)
+{
+ int i;
+ odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
+ odp_packet_t *ref_tbl = gbl_args->pkt2_tbl;
+
+ allocate_test_packets(gbl_args->pkt.len, pkt_tbl, TEST_REPEAT_COUNT);
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++) {
+ ref_tbl[i] = odp_packet_ref(pkt_tbl[i], TEST_MIN_PKT_SIZE / 2);
+ if (ref_tbl[i] == ODP_PACKET_INVALID)
+ ODPH_ABORT("Allocating packet reference failed\n");
+ }
+}
+
static void alloc_packets_twice(void)
{
allocate_test_packets(gbl_args->pkt.len, gbl_args->pkt_tbl,
@@ -346,6 +241,77 @@ static void alloc_packets_twice(void)
TEST_REPEAT_COUNT);
}
+static void alloc_parse_packets(const void *pkt_data, uint32_t len)
+{
+ int i;
+
+ allocate_test_packets(len, gbl_args->pkt_tbl, TEST_REPEAT_COUNT);
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++) {
+ if (odp_packet_copy_from_mem(gbl_args->pkt_tbl[i], 0, len,
+ pkt_data))
+ ODPH_ABORT("Copying test packet failed\n");
+ }
+}
+
+static void alloc_parse_packets_ipv4_tcp(void)
+{
+ alloc_parse_packets(test_packet_ipv4_tcp, sizeof(test_packet_ipv4_tcp));
+}
+
+static void alloc_parse_packets_ipv4_udp(void)
+{
+ alloc_parse_packets(test_packet_ipv4_udp, sizeof(test_packet_ipv4_udp));
+}
+
+static void alloc_parse_packets_ipv6_tcp(void)
+{
+ alloc_parse_packets(test_packet_ipv6_tcp, sizeof(test_packet_ipv6_tcp));
+}
+
+static void alloc_parse_packets_ipv6_udp(void)
+{
+ alloc_parse_packets(test_packet_ipv6_udp, sizeof(test_packet_ipv6_udp));
+}
+
+static void alloc_parse_packets_multi(const void *pkt_data, uint32_t len)
+{
+ int i;
+
+ allocate_test_packets(len, gbl_args->pkt_tbl,
+ TEST_REPEAT_COUNT * gbl_args->appl.burst_size);
+
+ for (i = 0; i < TEST_REPEAT_COUNT * gbl_args->appl.burst_size; i++) {
+ if (odp_packet_copy_from_mem(gbl_args->pkt_tbl[i], 0, len,
+ pkt_data))
+ ODPH_ABORT("Copying test packet failed\n");
+ }
+}
+
+static void alloc_parse_packets_multi_ipv4_tcp(void)
+{
+ alloc_parse_packets_multi(test_packet_ipv4_tcp,
+ sizeof(test_packet_ipv4_tcp));
+}
+
+static void alloc_parse_packets_multi_ipv4_udp(void)
+{
+ alloc_parse_packets_multi(test_packet_ipv4_udp,
+ sizeof(test_packet_ipv4_udp));
+}
+
+static void alloc_parse_packets_multi_ipv6_tcp(void)
+{
+ alloc_parse_packets_multi(test_packet_ipv6_tcp,
+ sizeof(test_packet_ipv6_tcp));
+}
+
+static void alloc_parse_packets_multi_ipv6_udp(void)
+{
+ alloc_parse_packets_multi(test_packet_ipv6_udp,
+ sizeof(test_packet_ipv6_udp));
+}
+
static void create_packets(void)
{
int i;
@@ -382,7 +348,7 @@ static void create_packets(void)
if (odp_packet_l2_offset_set(pkt_tbl[i], TEST_L2_OFFSET) ||
odp_packet_l3_offset_set(pkt_tbl[i], TEST_L3_OFFSET) ||
odp_packet_l4_offset_set(pkt_tbl[i], TEST_L4_OFFSET))
- LOG_ABORT("Setting test packet offsets failed\n");
+ ODPH_ABORT("Setting test packet offsets failed\n");
odp_packet_flow_hash_set(pkt_tbl[i], i);
odp_packet_ts_set(pkt_tbl[i], odp_time_local());
@@ -403,6 +369,18 @@ static void create_events(void)
gbl_args->event_tbl[i] = odp_packet_to_event(pkt_tbl[i]);
}
+static void create_events_multi(void)
+{
+ int i;
+ odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
+
+ allocate_test_packets(gbl_args->pkt.len, gbl_args->pkt_tbl,
+ TEST_REPEAT_COUNT * gbl_args->appl.burst_size);
+
+ for (i = 0; i < TEST_REPEAT_COUNT * gbl_args->appl.burst_size; i++)
+ gbl_args->event_tbl[i] = odp_packet_to_event(pkt_tbl[i]);
+}
+
static void free_packets(void)
{
odp_packet_free_multi(gbl_args->pkt_tbl, TEST_REPEAT_COUNT);
@@ -420,17 +398,7 @@ static void free_packets_twice(void)
odp_packet_free_multi(gbl_args->pkt2_tbl, TEST_REPEAT_COUNT);
}
-static int bench_empty(void)
-{
- int i;
-
- for (i = 0; i < TEST_REPEAT_COUNT; i++)
- gbl_args->output_tbl[i] = i;
-
- return i;
-}
-
-static int bench_packet_alloc(void)
+static int packet_alloc(void)
{
int i;
@@ -445,7 +413,7 @@ static int bench_packet_alloc(void)
return i;
}
-static int bench_packet_alloc_multi(void)
+static int packet_alloc_multi(void)
{
int i;
int pkts = 0;
@@ -458,7 +426,7 @@ static int bench_packet_alloc_multi(void)
return pkts;
}
-static int bench_packet_free(void)
+static int packet_free(void)
{
int i;
@@ -468,7 +436,7 @@ static int bench_packet_free(void)
return i;
}
-static int bench_packet_free_multi(void)
+static int packet_free_multi(void)
{
int i;
@@ -481,7 +449,20 @@ static int bench_packet_free_multi(void)
return i;
}
-static int bench_packet_alloc_free(void)
+static int packet_free_sp(void)
+{
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++) {
+ int pkt_idx = i * gbl_args->appl.burst_size;
+
+ odp_packet_free_sp(&gbl_args->pkt_tbl[pkt_idx],
+ gbl_args->appl.burst_size);
+ }
+ return i;
+}
+
+static int packet_alloc_free(void)
{
int i;
@@ -495,7 +476,7 @@ static int bench_packet_alloc_free(void)
return i;
}
-static int bench_packet_alloc_free_multi(void)
+static int packet_alloc_free_multi(void)
{
int i;
int pkts;
@@ -504,12 +485,16 @@ static int bench_packet_alloc_free_multi(void)
pkts = odp_packet_alloc_multi(gbl_args->pool, gbl_args->pkt.len,
gbl_args->pkt_tbl,
gbl_args->appl.burst_size);
+
+ if (pkts < 0)
+ ODPH_ABORT("Packet alloc failed\n");
+
odp_packet_free_multi(gbl_args->pkt_tbl, pkts);
}
return i;
}
-static int bench_packet_reset(void)
+static int packet_reset(void)
{
int i;
int ret = 0;
@@ -520,7 +505,7 @@ static int bench_packet_reset(void)
return !ret;
}
-static int bench_packet_from_event(void)
+static int packet_from_event(void)
{
int i;
odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
@@ -531,7 +516,21 @@ static int bench_packet_from_event(void)
return i;
}
-static int bench_packet_to_event(void)
+static int packet_from_event_multi(void)
+{
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++) {
+ int idx = i * gbl_args->appl.burst_size;
+
+ odp_packet_from_event_multi(&gbl_args->pkt_tbl[idx],
+ &gbl_args->event_tbl[idx],
+ gbl_args->appl.burst_size);
+ }
+ return i;
+}
+
+static int packet_to_event(void)
{
int i;
odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
@@ -542,7 +541,21 @@ static int bench_packet_to_event(void)
return i;
}
-static int bench_packet_head(void)
+static int packet_to_event_multi(void)
+{
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++) {
+ int idx = i * gbl_args->appl.burst_size;
+
+ odp_packet_to_event_multi(&gbl_args->pkt_tbl[idx],
+ &gbl_args->event_tbl[idx],
+ gbl_args->appl.burst_size);
+ }
+ return i;
+}
+
+static int packet_head(void)
{
int i;
@@ -552,7 +565,7 @@ static int bench_packet_head(void)
return i;
}
-static int bench_packet_buf_len(void)
+static int packet_buf_len(void)
{
int i;
uint32_t ret = 0;
@@ -563,7 +576,7 @@ static int bench_packet_buf_len(void)
return ret;
}
-static int bench_packet_data(void)
+static int packet_data(void)
{
int i;
@@ -573,7 +586,19 @@ static int bench_packet_data(void)
return i;
}
-static int bench_packet_seg_len(void)
+static int packet_data_seg_len(void)
+{
+ odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
+ uint32_t *output_tbl = gbl_args->output_tbl;
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ gbl_args->ptr_tbl[i] = odp_packet_data_seg_len(pkt_tbl[i],
+ &output_tbl[i]);
+ return i;
+}
+
+static int packet_seg_len(void)
{
int i;
uint32_t ret = 0;
@@ -584,7 +609,7 @@ static int bench_packet_seg_len(void)
return ret;
}
-static int bench_packet_len(void)
+static int packet_len(void)
{
int i;
uint32_t ret = 0;
@@ -595,7 +620,7 @@ static int bench_packet_len(void)
return ret;
}
-static int bench_packet_headroom(void)
+static int packet_headroom(void)
{
int i;
uint32_t ret = 0;
@@ -603,10 +628,10 @@ static int bench_packet_headroom(void)
for (i = 0; i < TEST_REPEAT_COUNT; i++)
ret += odp_packet_headroom(gbl_args->pkt_tbl[i]);
- return i;
+ return i + ret;
}
-static int bench_packet_tailroom(void)
+static int packet_tailroom(void)
{
int i;
uint32_t ret = 0;
@@ -614,10 +639,10 @@ static int bench_packet_tailroom(void)
for (i = 0; i < TEST_REPEAT_COUNT; i++)
ret += odp_packet_tailroom(gbl_args->pkt_tbl[i]);
- return i;
+ return i + ret;
}
-static int bench_packet_tail(void)
+static int packet_tail(void)
{
int i;
@@ -627,7 +652,7 @@ static int bench_packet_tail(void)
return i;
}
-static int bench_packet_offset(void)
+static int packet_offset(void)
{
int i;
uint32_t offset = gbl_args->pkt.len / 2;
@@ -638,7 +663,7 @@ static int bench_packet_offset(void)
return i;
}
-static int bench_packet_prefetch(void)
+static int packet_prefetch(void)
{
int i;
@@ -648,7 +673,7 @@ static int bench_packet_prefetch(void)
return i;
}
-static int bench_packet_push_head(void)
+static int packet_push_head(void)
{
int i;
odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
@@ -660,7 +685,7 @@ static int bench_packet_push_head(void)
return i;
}
-static int bench_packet_pull_head(void)
+static int packet_pull_head(void)
{
int i;
uint32_t len = gbl_args->pkt.seg_len - 1;
@@ -672,7 +697,7 @@ static int bench_packet_pull_head(void)
return i;
}
-static int bench_packet_push_tail(void)
+static int packet_push_tail(void)
{
int i;
odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
@@ -684,7 +709,7 @@ static int bench_packet_push_tail(void)
return i;
}
-static int bench_packet_pull_tail(void)
+static int packet_pull_tail(void)
{
int i;
uint32_t len = gbl_args->pkt.seg_len - 1;
@@ -696,7 +721,7 @@ static int bench_packet_pull_tail(void)
return i;
}
-static int bench_packet_extend_head(void)
+static int packet_extend_head(void)
{
int i;
int ret = 0;
@@ -706,12 +731,12 @@ static int bench_packet_extend_head(void)
uint32_t *data_tbl = gbl_args->output_tbl;
for (i = 0; i < TEST_REPEAT_COUNT; i++)
- ret += odp_packet_extend_head(&pkt_tbl[i], len, ptr_tbl[i],
+ ret += odp_packet_extend_head(&pkt_tbl[i], len, &ptr_tbl[i],
&data_tbl[i]);
return ret >= 0;
}
-static int bench_packet_trunc_head(void)
+static int packet_trunc_head(void)
{
int i;
int ret = 0;
@@ -721,12 +746,12 @@ static int bench_packet_trunc_head(void)
uint32_t *data_tbl = gbl_args->output_tbl;
for (i = 0; i < TEST_REPEAT_COUNT; i++)
- ret += odp_packet_trunc_head(&pkt_tbl[i], len, ptr_tbl[i],
+ ret += odp_packet_trunc_head(&pkt_tbl[i], len, &ptr_tbl[i],
&data_tbl[i]);
return ret >= 0;
}
-static int bench_packet_extend_tail(void)
+static int packet_extend_tail(void)
{
int i;
int ret = 0;
@@ -736,12 +761,12 @@ static int bench_packet_extend_tail(void)
uint32_t *data_tbl = gbl_args->output_tbl;
for (i = 0; i < TEST_REPEAT_COUNT; i++)
- ret += odp_packet_extend_tail(&pkt_tbl[i], len, ptr_tbl[i],
+ ret += odp_packet_extend_tail(&pkt_tbl[i], len, &ptr_tbl[i],
&data_tbl[i]);
return ret >= 0;
}
-static int bench_packet_trunc_tail(void)
+static int packet_trunc_tail(void)
{
int i;
int ret = 0;
@@ -751,12 +776,12 @@ static int bench_packet_trunc_tail(void)
uint32_t *data_tbl = gbl_args->output_tbl;
for (i = 0; i < TEST_REPEAT_COUNT; i++)
- ret += odp_packet_trunc_tail(&pkt_tbl[i], len, ptr_tbl[i],
+ ret += odp_packet_trunc_tail(&pkt_tbl[i], len, &ptr_tbl[i],
&data_tbl[i]);
return ret >= 0;
}
-static int bench_packet_add_data(void)
+static int packet_add_data(void)
{
int i;
int ret = 0;
@@ -769,7 +794,7 @@ static int bench_packet_add_data(void)
return ret >= 0;
}
-static int bench_packet_rem_data(void)
+static int packet_rem_data(void)
{
int i;
int ret = 0;
@@ -782,7 +807,7 @@ static int bench_packet_rem_data(void)
return ret >= 0;
}
-static int bench_packet_align(void)
+static int packet_align(void)
{
int i;
int ret = 0;
@@ -794,7 +819,7 @@ static int bench_packet_align(void)
return ret >= 0;
}
-static int bench_packet_is_segmented(void)
+static int packet_is_segmented(void)
{
int i;
uint32_t ret = 0;
@@ -805,7 +830,7 @@ static int bench_packet_is_segmented(void)
return (ret == 0) ? 1 : ret;
}
-static int bench_packet_num_segs(void)
+static int packet_num_segs(void)
{
int i;
uint32_t ret = 0;
@@ -816,7 +841,7 @@ static int bench_packet_num_segs(void)
return ret;
}
-static int bench_packet_first_seg(void)
+static int packet_first_seg(void)
{
int i;
odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
@@ -827,7 +852,7 @@ static int bench_packet_first_seg(void)
return i;
}
-static int bench_packet_last_seg(void)
+static int packet_last_seg(void)
{
int i;
odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
@@ -838,7 +863,7 @@ static int bench_packet_last_seg(void)
return i;
}
-static int bench_packet_next_seg(void)
+static int packet_next_seg(void)
{
int i;
odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
@@ -850,7 +875,7 @@ static int bench_packet_next_seg(void)
return i;
}
-static int bench_packet_seg_data(void)
+static int packet_seg_data(void)
{
int i;
odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
@@ -862,7 +887,7 @@ static int bench_packet_seg_data(void)
return i;
}
-static int bench_packet_seg_data_len(void)
+static int packet_seg_data_len(void)
{
int i;
uint32_t ret = 0;
@@ -875,7 +900,7 @@ static int bench_packet_seg_data_len(void)
return ret;
}
-static int bench_packet_concat(void)
+static int packet_concat(void)
{
int i;
int ret = 0;
@@ -888,7 +913,7 @@ static int bench_packet_concat(void)
return ret >= 0;
}
-static int bench_packet_split(void)
+static int packet_split(void)
{
int i;
int ret = 0;
@@ -904,7 +929,7 @@ static int bench_packet_split(void)
return ret >= 0;
}
-static int bench_packet_copy(void)
+static int packet_copy(void)
{
int i;
odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
@@ -916,7 +941,7 @@ static int bench_packet_copy(void)
return i;
}
-static int bench_packet_copy_part(void)
+static int packet_copy_part(void)
{
int i;
uint32_t len = gbl_args->pkt.len / 2;
@@ -929,7 +954,7 @@ static int bench_packet_copy_part(void)
return i;
}
-static int bench_packet_copy_to_mem(void)
+static int packet_copy_to_mem(void)
{
int i;
uint32_t ret = 0;
@@ -942,7 +967,7 @@ static int bench_packet_copy_to_mem(void)
return !ret;
}
-static int bench_packet_copy_from_mem(void)
+static int packet_copy_from_mem(void)
{
int i;
uint32_t ret = 0;
@@ -955,7 +980,7 @@ static int bench_packet_copy_from_mem(void)
return !ret;
}
-static int bench_packet_copy_from_pkt(void)
+static int packet_copy_from_pkt(void)
{
int i;
uint32_t ret = 0;
@@ -969,7 +994,7 @@ static int bench_packet_copy_from_pkt(void)
return !ret;
}
-static int bench_packet_copy_data(void)
+static int packet_copy_data(void)
{
int i;
uint32_t ret = 0;
@@ -982,7 +1007,7 @@ static int bench_packet_copy_data(void)
return !ret;
}
-static int bench_packet_move_data(void)
+static int packet_move_data(void)
{
int i;
uint32_t ret = 0;
@@ -996,7 +1021,7 @@ static int bench_packet_move_data(void)
return !ret;
}
-static int bench_packet_pool(void)
+static int packet_pool(void)
{
int i;
@@ -1006,7 +1031,7 @@ static int bench_packet_pool(void)
return i;
}
-static int bench_packet_input(void)
+static int packet_input(void)
{
int i;
@@ -1016,7 +1041,7 @@ static int bench_packet_input(void)
return i;
}
-static int bench_packet_input_index(void)
+static int packet_input_index(void)
{
int i;
int ret = 0;
@@ -1027,7 +1052,7 @@ static int bench_packet_input_index(void)
return (ret == 0) ? 1 : ret;
}
-static int bench_packet_user_ptr(void)
+static int packet_user_ptr(void)
{
int i;
odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
@@ -1038,7 +1063,7 @@ static int bench_packet_user_ptr(void)
return i;
}
-static int bench_packet_user_ptr_set(void)
+static int packet_user_ptr_set(void)
{
int i;
@@ -1049,7 +1074,7 @@ static int bench_packet_user_ptr_set(void)
return i;
}
-static int bench_packet_user_area(void)
+static int packet_user_area(void)
{
int i;
odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
@@ -1060,7 +1085,7 @@ static int bench_packet_user_area(void)
return i;
}
-static int bench_packet_user_area_size(void)
+static int packet_user_area_size(void)
{
int i;
uint32_t ret = 0;
@@ -1071,7 +1096,28 @@ static int bench_packet_user_area_size(void)
return ret;
}
-static int bench_packet_l2_ptr(void)
+static int packet_user_flag(void)
+{
+ int i;
+ uint32_t ret = 0;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ ret += !odp_packet_user_flag(gbl_args->pkt_tbl[i]);
+
+ return ret;
+}
+
+static int packet_user_flag_set(void)
+{
+ int i;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ odp_packet_user_flag_set(gbl_args->pkt_tbl[i], 1);
+
+ return i;
+}
+
+static int packet_l2_ptr(void)
{
int i;
@@ -1081,7 +1127,7 @@ static int bench_packet_l2_ptr(void)
return i;
}
-static int bench_packet_l2_offset(void)
+static int packet_l2_offset(void)
{
int i;
int ret = 0;
@@ -1092,7 +1138,7 @@ static int bench_packet_l2_offset(void)
return ret >= 0;
}
-static int bench_packet_l2_offset_set(void)
+static int packet_l2_offset_set(void)
{
int i;
uint32_t ret = 0;
@@ -1104,7 +1150,7 @@ static int bench_packet_l2_offset_set(void)
return !ret;
}
-static int bench_packet_l3_ptr(void)
+static int packet_l3_ptr(void)
{
int i;
@@ -1114,7 +1160,7 @@ static int bench_packet_l3_ptr(void)
return i;
}
-static int bench_packet_l3_offset(void)
+static int packet_l3_offset(void)
{
int i;
int ret = 0;
@@ -1125,7 +1171,7 @@ static int bench_packet_l3_offset(void)
return ret >= 0;
}
-static int bench_packet_l3_offset_set(void)
+static int packet_l3_offset_set(void)
{
int i;
uint32_t ret = 0;
@@ -1137,7 +1183,7 @@ static int bench_packet_l3_offset_set(void)
return !ret;
}
-static int bench_packet_l4_ptr(void)
+static int packet_l4_ptr(void)
{
int i;
@@ -1147,7 +1193,7 @@ static int bench_packet_l4_ptr(void)
return i;
}
-static int bench_packet_l4_offset(void)
+static int packet_l4_offset(void)
{
int i;
int ret = 0;
@@ -1158,7 +1204,7 @@ static int bench_packet_l4_offset(void)
return ret >= 0;
}
-static int bench_packet_l4_offset_set(void)
+static int packet_l4_offset_set(void)
{
int i;
uint32_t ret = 0;
@@ -1170,7 +1216,7 @@ static int bench_packet_l4_offset_set(void)
return !ret;
}
-static int bench_packet_flow_hash(void)
+static int packet_flow_hash(void)
{
int i;
uint32_t ret = 0;
@@ -1181,7 +1227,7 @@ static int bench_packet_flow_hash(void)
return ret;
}
-static int bench_packet_flow_hash_set(void)
+static int packet_flow_hash_set(void)
{
int i;
@@ -1191,7 +1237,7 @@ static int bench_packet_flow_hash_set(void)
return i;
}
-static int bench_packet_ts(void)
+static int packet_ts(void)
{
int i;
@@ -1201,7 +1247,7 @@ static int bench_packet_ts(void)
return i;
}
-static int bench_packet_ts_set(void)
+static int packet_ts_set(void)
{
int i;
odp_time_t ts = odp_time_local();
@@ -1212,8 +1258,116 @@ static int bench_packet_ts_set(void)
return i;
}
+static int packet_ref_static(void)
+{
+ int i;
+ odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
+ odp_packet_t *ref_tbl = gbl_args->pkt2_tbl;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ ref_tbl[i] = odp_packet_ref_static(pkt_tbl[i]);
+
+ return i;
+}
+
+static int packet_ref(void)
+{
+ int i;
+ uint32_t offset = TEST_MIN_PKT_SIZE / 2;
+ odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
+ odp_packet_t *ref_tbl = gbl_args->pkt2_tbl;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ ref_tbl[i] = odp_packet_ref(pkt_tbl[i], offset);
+
+ return i;
+}
+
+static int packet_ref_pkt(void)
+{
+ int i;
+ uint32_t offset = TEST_MIN_PKT_SIZE / 2;
+ odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
+ odp_packet_t *hdr_tbl = gbl_args->pkt2_tbl;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ hdr_tbl[i] = odp_packet_ref_pkt(pkt_tbl[i], offset, hdr_tbl[i]);
+
+ return i;
+}
+
+static int packet_has_ref(void)
+{
+ int i;
+ uint32_t ret = 0;
+ odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ ret += odp_packet_has_ref(pkt_tbl[i]);
+
+ return i + ret;
+}
+
+static int packet_subtype(void)
+{
+ int i;
+ odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ gbl_args->output_tbl[i] = odp_packet_subtype(pkt_tbl[i]);
+
+ return i;
+}
+
+static int packet_parse(void)
+{
+ odp_packet_parse_param_t param;
+ odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
+ int ret = 0;
+ int i;
+
+ memset(&param, 0, sizeof(odp_packet_parse_param_t));
+ param.proto = ODP_PROTO_ETH;
+ param.last_layer = ODP_PROTO_LAYER_ALL;
+ param.chksums.chksum.ipv4 = 1;
+ param.chksums.chksum.tcp = 1;
+ param.chksums.chksum.udp = 1;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++)
+ ret += odp_packet_parse(pkt_tbl[i], 0, &param);
+
+ return !ret;
+}
+
+static int packet_parse_multi(void)
+{
+ int burst_size = gbl_args->appl.burst_size;
+ int ret = 0;
+ int i;
+ odp_packet_parse_param_t param;
+ odp_packet_t *pkt_tbl = gbl_args->pkt_tbl;
+ uint32_t offsets[burst_size];
+
+ memset(&offsets, 0, sizeof(offsets));
+
+ memset(&param, 0, sizeof(odp_packet_parse_param_t));
+ param.proto = ODP_PROTO_ETH;
+ param.last_layer = ODP_PROTO_LAYER_ALL;
+ param.chksums.chksum.ipv4 = 1;
+ param.chksums.chksum.tcp = 1;
+ param.chksums.chksum.udp = 1;
+
+ for (i = 0; i < TEST_REPEAT_COUNT; i++) {
+ int idx = i * burst_size;
+
+ ret += odp_packet_parse_multi(&pkt_tbl[idx], offsets,
+ burst_size, &param);
+ }
+ return (ret == TEST_REPEAT_COUNT * burst_size);
+}
+
/**
- * Prinf usage information
+ * Print usage information
*/
static void usage(char *progname)
{
@@ -1224,10 +1378,13 @@ static void usage(char *progname)
" E.g. %s\n"
"\n"
"Optional OPTIONS:\n"
- " -b, --burst Test packet burst size.\n"
- " -i, --index Benchmark index to run indefinitely.\n"
- " -h, --help Display help and exit.\n\n"
- "\n", NO_PATH(progname), NO_PATH(progname));
+ " -b, --burst <num> Test packet burst size.\n"
+ " -c, --cache_size <num> Pool cache size.\n"
+ " -i, --index <idx> Benchmark index to run indefinitely.\n"
+ " -r, --rounds <num> Run each test case 'num' times (default %u).\n"
+ " -t, --time <opt> Time measurement. 0: measure CPU cycles (default), 1: measure time\n"
+ " -h, --help Display help and exit.\n\n"
+ "\n", NO_PATH(progname), NO_PATH(progname), TEST_ROUNDS);
}
/**
@@ -1243,20 +1400,21 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
int long_index;
static const struct option longopts[] = {
{"burst", required_argument, NULL, 'b'},
- {"help", no_argument, NULL, 'h'},
+ {"cache_size", required_argument, NULL, 'c'},
{"index", required_argument, NULL, 'i'},
+ {"rounds", required_argument, NULL, 'r'},
+ {"time", required_argument, NULL, 't'},
+ {"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "b:i:h";
-
- /* Let helper collect its own arguments (e.g. --odph_proc) */
- odph_parse_options(argc, argv, shortopts, longopts);
-
- opterr = 0; /* Do not issue errors on helper options */
+ static const char *shortopts = "c:b:i:r:t:h";
appl_args->bench_idx = 0; /* Run all benchmarks */
appl_args->burst_size = TEST_DEF_BURST;
+ appl_args->cache_size = -1;
+ appl_args->rounds = TEST_ROUNDS;
+ appl_args->time = 0;
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -1265,18 +1423,28 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
break; /* No more options */
switch (opt) {
+ case 'c':
+ appl_args->cache_size = atoi(optarg);
+ break;
case 'b':
appl_args->burst_size = atoi(optarg);
break;
+ case 'i':
+ appl_args->bench_idx = atoi(optarg);
+ break;
+ case 'r':
+ appl_args->rounds = atoi(optarg);
+ break;
+ case 't':
+ appl_args->time = atoi(optarg);
+ break;
case 'h':
usage(argv[0]);
exit(EXIT_SUCCESS);
break;
- case 'i':
- appl_args->bench_idx = atoi(optarg);
- break;
default:
- break;
+ usage(argv[0]);
+ exit(EXIT_FAILURE);
}
}
@@ -1286,6 +1454,11 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
exit(EXIT_FAILURE);
}
+ if (appl_args->rounds < 1) {
+ printf("Invalid number test rounds: %d\n", appl_args->rounds);
+ exit(EXIT_FAILURE);
+ }
+
optind = 1; /* Reset 'extern optind' from the getopt lib */
}
@@ -1294,19 +1467,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
*/
static void print_info(char *progname, appl_args_t *appl_args ODP_UNUSED)
{
- printf("\n"
- "ODP system info\n"
- "---------------\n"
- "ODP API version: %s\n"
- "ODP impl name: %s\n"
- "CPU model: %s\n"
- "CPU freq (hz): %" PRIu64 "\n"
- "Cache line size: %i\n"
- "CPU count: %i\n"
- "\n",
- odp_version_api_str(), odp_version_impl_name(),
- odp_cpu_model_str(), odp_cpu_hz_max(),
- odp_sys_cache_line_size(), odp_cpu_count());
+ odp_sys_info_print();
printf("Running ODP appl: \"%s\"\n"
"-----------------\n", progname);
@@ -1317,134 +1478,99 @@ static void print_info(char *progname, appl_args_t *appl_args ODP_UNUSED)
* Test functions
*/
bench_info_t test_suite[] = {
- BENCH_INFO(bench_empty, NULL, NULL, NULL),
- BENCH_INFO(bench_packet_alloc, NULL, free_packets, NULL),
- BENCH_INFO(bench_packet_alloc_multi, NULL, free_packets_multi,
- NULL),
- BENCH_INFO(bench_packet_free, create_packets, NULL, NULL),
- BENCH_INFO(bench_packet_free_multi, alloc_packets_multi, NULL,
- NULL),
- BENCH_INFO(bench_packet_alloc_free, NULL, NULL, NULL),
- BENCH_INFO(bench_packet_alloc_free_multi, NULL, NULL, NULL),
- BENCH_INFO(bench_packet_reset, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_from_event, create_events, free_packets,
- NULL),
- BENCH_INFO(bench_packet_to_event, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_head, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_buf_len, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_data, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_seg_len, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_len, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_headroom, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_tailroom, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_tail, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_offset, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_prefetch, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_push_head, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_pull_head, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_push_tail, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_pull_tail, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_extend_head, alloc_packets_half,
- free_packets, NULL),
- BENCH_INFO(bench_packet_trunc_head, create_packets,
- free_packets, NULL),
- BENCH_INFO(bench_packet_extend_tail, alloc_packets_half,
- free_packets, NULL),
- BENCH_INFO(bench_packet_trunc_tail, create_packets,
- free_packets, NULL),
- BENCH_INFO(bench_packet_add_data, alloc_packets_half,
- free_packets, NULL),
- BENCH_INFO(bench_packet_rem_data, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_align, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_is_segmented, create_packets,
- free_packets, NULL),
- BENCH_INFO(bench_packet_num_segs, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_first_seg, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_last_seg, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_next_seg, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_seg_data, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_seg_data_len, create_packets,
- free_packets, NULL),
- BENCH_INFO(bench_packet_concat, alloc_concat_packets,
- free_packets, NULL),
- BENCH_INFO(bench_packet_split, create_packets,
- free_packets_twice, NULL),
- BENCH_INFO(bench_packet_copy, create_packets,
- free_packets_twice, NULL),
- BENCH_INFO(bench_packet_copy_part, create_packets,
- free_packets_twice, NULL),
- BENCH_INFO(bench_packet_copy_to_mem, create_packets,
- free_packets, NULL),
- BENCH_INFO(bench_packet_copy_from_mem, create_packets,
- free_packets, NULL),
- BENCH_INFO(bench_packet_copy_from_pkt, alloc_packets_twice,
- free_packets_twice, NULL),
- BENCH_INFO(bench_packet_copy_data, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_move_data, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_pool, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_input, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_input_index, create_packets,
- free_packets, NULL),
- BENCH_INFO(bench_packet_user_ptr, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_user_ptr_set, create_packets,
- free_packets, NULL),
- BENCH_INFO(bench_packet_user_area, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_user_area_size, create_packets,
- free_packets, NULL),
- BENCH_INFO(bench_packet_l2_ptr, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_l2_offset, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_l2_offset_set, create_packets,
- free_packets, NULL),
- BENCH_INFO(bench_packet_l3_ptr, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_l3_offset, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_l3_offset_set, create_packets,
- free_packets, NULL),
- BENCH_INFO(bench_packet_l4_ptr, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_l4_offset, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_l4_offset_set, create_packets,
- free_packets, NULL),
- BENCH_INFO(bench_packet_flow_hash, create_packets, free_packets,
- NULL),
- BENCH_INFO(bench_packet_flow_hash_set, create_packets,
- free_packets, NULL),
- BENCH_INFO(bench_packet_ts, create_packets, free_packets, NULL),
- BENCH_INFO(bench_packet_ts_set, create_packets, free_packets,
- NULL),
+ BENCH_INFO(packet_alloc, NULL, free_packets, NULL),
+ BENCH_INFO(packet_alloc_multi, NULL, free_packets_multi, NULL),
+ BENCH_INFO(packet_free, create_packets, NULL, NULL),
+ BENCH_INFO(packet_free_multi, alloc_packets_multi, NULL, NULL),
+ BENCH_INFO(packet_free_sp, alloc_packets_multi, NULL, NULL),
+ BENCH_INFO(packet_alloc_free, NULL, NULL, NULL),
+ BENCH_INFO(packet_alloc_free_multi, NULL, NULL, NULL),
+ BENCH_INFO(packet_reset, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_from_event, create_events, free_packets, NULL),
+ BENCH_INFO(packet_from_event_multi, create_events_multi, free_packets_multi, NULL),
+ BENCH_INFO(packet_to_event, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_to_event_multi, alloc_packets_multi, free_packets_multi, NULL),
+ BENCH_INFO(packet_head, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_buf_len, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_data, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_data_seg_len, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_seg_len, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_len, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_headroom, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_tailroom, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_tail, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_offset, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_prefetch, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_push_head, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_pull_head, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_push_tail, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_pull_tail, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_extend_head, alloc_packets_half, free_packets, NULL),
+ BENCH_INFO(packet_trunc_head, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_extend_tail, alloc_packets_half, free_packets, NULL),
+ BENCH_INFO(packet_trunc_tail, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_add_data, alloc_packets_half, free_packets, NULL),
+ BENCH_INFO(packet_rem_data, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_align, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_is_segmented, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_num_segs, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_first_seg, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_last_seg, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_next_seg, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_seg_data, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_seg_data_len, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_concat, alloc_concat_packets, free_packets, NULL),
+ BENCH_INFO(packet_split, create_packets, free_packets_twice, NULL),
+ BENCH_INFO(packet_copy, create_packets, free_packets_twice, NULL),
+ BENCH_INFO(packet_copy_part, create_packets, free_packets_twice, NULL),
+ BENCH_INFO(packet_copy_to_mem, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_copy_from_mem, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_copy_from_pkt, alloc_packets_twice, free_packets_twice, NULL),
+ BENCH_INFO(packet_copy_data, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_move_data, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_pool, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_input, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_input_index, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_user_ptr, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_user_ptr_set, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_user_area, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_user_area_size, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_user_flag, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_user_flag_set, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_l2_ptr, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_l2_offset, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_l2_offset_set, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_l3_ptr, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_l3_offset, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_l3_offset_set, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_l4_ptr, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_l4_offset, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_l4_offset_set, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_flow_hash, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_flow_hash_set, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_ts, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_ts_set, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_ref_static, create_packets, free_packets_twice, NULL),
+ BENCH_INFO(packet_ref, create_packets, free_packets_twice, NULL),
+ BENCH_INFO(packet_ref_pkt, alloc_packets_twice, free_packets_twice, NULL),
+ BENCH_INFO(packet_has_ref, alloc_ref_packets, free_packets_twice, NULL),
+ BENCH_INFO(packet_subtype, create_packets, free_packets, NULL),
+ BENCH_INFO(packet_parse, alloc_parse_packets_ipv4_tcp, free_packets,
+ "packet_parse ipv4/tcp"),
+ BENCH_INFO(packet_parse, alloc_parse_packets_ipv4_udp, free_packets,
+ "packet_parse ipv4/udp"),
+ BENCH_INFO(packet_parse, alloc_parse_packets_ipv6_tcp, free_packets,
+ "packet_parse ipv6/tcp"),
+ BENCH_INFO(packet_parse, alloc_parse_packets_ipv6_udp, free_packets,
+ "packet_parse ipv6/udp"),
+ BENCH_INFO(packet_parse_multi, alloc_parse_packets_multi_ipv4_tcp, free_packets_multi,
+ "packet_parse_multi ipv4/tcp"),
+ BENCH_INFO(packet_parse_multi, alloc_parse_packets_multi_ipv4_udp, free_packets_multi,
+ "packet_parse_multi ipv4/udp"),
+ BENCH_INFO(packet_parse_multi, alloc_parse_packets_multi_ipv6_tcp, free_packets_multi,
+ "packet_parse_multi ipv6/tcp"),
+ BENCH_INFO(packet_parse_multi, alloc_parse_packets_multi_ipv6_udp, free_packets_multi,
+ "packet_parse_multi ipv6/udp"),
};
/**
@@ -1452,7 +1578,10 @@ bench_info_t test_suite[] = {
*/
int main(int argc, char *argv[])
{
- odph_odpthread_t worker_thread;
+ odph_helper_options_t helper_options;
+ odph_thread_t worker_thread;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
int cpu;
odp_shm_t shm;
odp_cpumask_t cpumask;
@@ -1460,18 +1589,29 @@ int main(int argc, char *argv[])
odp_pool_capability_t capa;
odp_pool_param_t params;
odp_instance_t instance;
- uint32_t pkt_num;
+ odp_init_t init_param;
+ uint32_t pkt_num, seg_len;
uint8_t ret;
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
/* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
- LOG_ERR("Error: ODP global init failed.\n");
+ if (odp_init_global(&instance, &init_param, NULL)) {
+ ODPH_ERR("Error: ODP global init failed.\n");
exit(EXIT_FAILURE);
}
/* Init this thread */
if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
- LOG_ERR("Error: ODP local init failed.\n");
+ ODPH_ERR("Error: ODP local init failed.\n");
exit(EXIT_FAILURE);
}
@@ -1480,31 +1620,36 @@ int main(int argc, char *argv[])
ODP_CACHE_LINE_SIZE, 0);
if (shm == ODP_SHM_INVALID) {
- LOG_ERR("Error: shared mem reserve failed.\n");
+ ODPH_ERR("Error: shared mem reserve failed.\n");
exit(EXIT_FAILURE);
}
gbl_args = odp_shm_addr(shm);
if (gbl_args == NULL) {
- LOG_ERR("Error: shared mem alloc failed.\n");
+ ODPH_ERR("Error: shared mem alloc failed.\n");
exit(EXIT_FAILURE);
}
memset(gbl_args, 0, sizeof(args_t));
- gbl_args->bench = test_suite;
- gbl_args->num_bench = sizeof(test_suite) / sizeof(test_suite[0]);
-
/* Parse and store the application arguments */
parse_args(argc, argv, &gbl_args->appl);
+ bench_suite_init(&gbl_args->suite);
+ gbl_args->suite.bench = test_suite;
+ gbl_args->suite.num_bench = ODPH_ARRAY_SIZE(test_suite);
+ gbl_args->suite.indef_idx = gbl_args->appl.bench_idx;
+ gbl_args->suite.rounds = gbl_args->appl.rounds;
+ gbl_args->suite.repeat_count = TEST_REPEAT_COUNT;
+ gbl_args->suite.measure_time = !!gbl_args->appl.time;
+
/* Print both system and application information */
print_info(NO_PATH(argv[0]), &gbl_args->appl);
/* Get default worker cpumask */
if (odp_cpumask_default_worker(&cpumask, 1) != 1) {
- LOG_ERR("Error: unable to allocate worker thread.\n");
+ ODPH_ERR("Error: unable to allocate worker thread.\n");
exit(EXIT_FAILURE);
}
@@ -1512,7 +1657,7 @@ int main(int argc, char *argv[])
/* Check pool capability */
if (odp_pool_capability(&capa)) {
- LOG_ERR("Error: unable to query pool capability.\n");
+ ODPH_ERR("Error: unable to query pool capability.\n");
exit(EXIT_FAILURE);
}
@@ -1522,47 +1667,65 @@ int main(int argc, char *argv[])
2 * TEST_REPEAT_COUNT;
if (capa.pkt.max_num && capa.pkt.max_num < pkt_num) {
- LOG_ERR("Error: packet pool size not supported.\n");
+ ODPH_ERR("Error: packet pool size not supported.\n");
printf("MAX: %" PRIu32 "\n", capa.pkt.max_num);
exit(EXIT_FAILURE);
- } else if (capa.pkt.max_len && capa.pkt.max_len < TEST_MAX_PKT_SIZE) {
- LOG_ERR("Error: packet length not supported.\n");
- exit(EXIT_FAILURE);
- } else if (capa.pkt.max_seg_len &&
- capa.pkt.max_seg_len < PKT_POOL_SEG_LEN) {
- LOG_ERR("Error: segment length not supported.\n");
+ } else if (capa.pkt.max_len &&
+ capa.pkt.max_len < 2 * TEST_MAX_PKT_SIZE) {
+ ODPH_ERR("Error: packet length not supported.\n");
exit(EXIT_FAILURE);
} else if (capa.pkt.max_uarea_size &&
capa.pkt.max_uarea_size < PKT_POOL_UAREA_SIZE) {
- LOG_ERR("Error: user area size not supported.\n");
+ ODPH_ERR("Error: user area size not supported.\n");
exit(EXIT_FAILURE);
+ } else if (gbl_args->appl.cache_size > (int)capa.pkt.max_cache_size) {
+ ODPH_ERR("Error: cache size not supported (max %" PRIu32 ")\n",
+ capa.pkt.max_cache_size);
+ exit(EXIT_FAILURE);
+ }
+
+ seg_len = TEST_MAX_PKT_SIZE;
+ if (capa.pkt.max_seg_len && capa.pkt.max_seg_len < seg_len) {
+ seg_len = capa.pkt.max_seg_len;
+ printf("\nWarn: allocated packets may be segmented (min seg_len=%" PRIu32 ")\n\n",
+ seg_len);
}
/* Create packet pool */
odp_pool_param_init(&params);
- params.pkt.seg_len = PKT_POOL_SEG_LEN;
- params.pkt.len = TEST_MAX_PKT_SIZE;
+ params.pkt.seg_len = seg_len;
+ /* Using packet length as twice the TEST_MAX_PKT_SIZE as some
+ * test cases (packet_ref_pkt) might allocate a bigger
+ * packet than TEST_MAX_PKT_SIZE.
+ */
+ params.pkt.len = 2 * TEST_MAX_PKT_SIZE;
params.pkt.num = pkt_num;
params.pkt.uarea_size = PKT_POOL_UAREA_SIZE;
+ if (gbl_args->appl.cache_size >= 0)
+ params.pkt.cache_size = gbl_args->appl.cache_size;
params.type = ODP_POOL_PACKET;
gbl_args->pool = odp_pool_create("packet pool", &params);
if (gbl_args->pool == ODP_POOL_INVALID) {
- LOG_ERR("Error: packet pool create failed.\n");
+ ODPH_ERR("Error: packet pool create failed.\n");
exit(EXIT_FAILURE);
}
- printf("CPU: %i\n", odp_cpumask_first(&cpumask));
- printf("CPU mask: %s\n", cpumaskstr);
- printf("Burst size: %d\n", gbl_args->appl.burst_size);
- printf("Bench repeat: %d\n", TEST_REPEAT_COUNT);
+ printf("CPU: %i\n", odp_cpumask_first(&cpumask));
+ printf("CPU mask: %s\n", cpumaskstr);
+ printf("Burst size: %d\n", gbl_args->appl.burst_size);
+ printf("Bench repeat: %d\n", TEST_REPEAT_COUNT);
+ printf("Measurement unit: %s\n", gbl_args->appl.time ? "nsec" : "CPU cycles");
+ printf("Test rounds: %u\n", gbl_args->appl.rounds);
+ if (gbl_args->appl.cache_size < 0)
+ printf("Pool cache size: default\n");
+ else
+ printf("Pool cache size: %d\n", gbl_args->appl.cache_size);
odp_pool_print(gbl_args->pool);
- memset(&worker_thread, 0, sizeof(odph_odpthread_t));
-
- odp_barrier_init(&barrier, 1 + 1);
+ memset(&worker_thread, 0, sizeof(odph_thread_t));
signal(SIGINT, sig_handler);
@@ -1570,40 +1733,45 @@ int main(int argc, char *argv[])
cpu = odp_cpumask_first(&cpumask);
odp_cpumask_t thd_mask;
- odph_odpthread_params_t thr_params;
-
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.start = run_benchmarks;
- thr_params.arg = gbl_args;
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
odp_cpumask_zero(&thd_mask);
odp_cpumask_set(&thd_mask, cpu);
- odph_odpthreads_create(&worker_thread, &thd_mask,
- &thr_params);
- odph_odpthreads_join(&worker_thread);
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &thd_mask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_benchmarks;
+ thr_param.arg = gbl_args;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_create(&worker_thread, &thr_common, &thr_param, 1);
+
+ odph_thread_join(&worker_thread, 1);
- ret = gbl_args->bench_failed;
+ ret = gbl_args->suite.retval;
if (odp_pool_destroy(gbl_args->pool)) {
- LOG_ERR("Error: pool destroy\n");
+ ODPH_ERR("Error: pool destroy\n");
exit(EXIT_FAILURE);
}
+ gbl_args = NULL;
+ odp_mb_full();
if (odp_shm_free(shm)) {
- LOG_ERR("Error: shm free\n");
+ ODPH_ERR("Error: shm free\n");
exit(EXIT_FAILURE);
}
if (odp_term_local()) {
- LOG_ERR("Error: term local\n");
+ ODPH_ERR("Error: term local\n");
exit(EXIT_FAILURE);
}
if (odp_term_global(instance)) {
- LOG_ERR("Error: term global\n");
+ ODPH_ERR("Error: term global\n");
exit(EXIT_FAILURE);
}
diff --git a/test/performance/odp_bench_pktio_sp.c b/test/performance/odp_bench_pktio_sp.c
new file mode 100644
index 000000000..017e7565f
--- /dev/null
+++ b/test/performance/odp_bench_pktio_sp.c
@@ -0,0 +1,1140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+/**
+ * @example odp_bench_pktio_sp.c
+ *
+ * Microbenchmark application for packet IO slow path functions
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE /* Needed for sigaction */
+#endif
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#include "bench_common.h"
+
+#include <getopt.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+/* Default number of rounds per test case */
+#define ROUNDS 100u
+
+/* Maximum interface name length */
+#define MAX_NAME_LEN 128
+
+#define BENCH_INFO(run_fn, init_fn, term_fn, cond_fn, rounds) \
+ {.name = #run_fn, .run = run_fn, .init = init_fn, .term = term_fn, .cond = cond_fn,\
+ .max_rounds = rounds}
+
+typedef struct {
+ /* Command line options */
+ struct {
+ /* Rounds per test case */
+ uint32_t rounds;
+
+ /* Test case index to run */
+ uint32_t case_idx;
+
+ /* Interface name */
+ char name[MAX_NAME_LEN];
+
+ /* Packet input mode */
+ odp_pktin_mode_t in_mode;
+
+ /* Packet output mode */
+ odp_pktout_mode_t out_mode;
+
+ /* Number of packet input queues */
+ uint32_t num_input_queues;
+
+ /* Number of packet output queues */
+ uint32_t num_output_queues;
+
+ /* Number of PMRs */
+ uint32_t num_pmr;
+ } opt;
+
+ /* Packet IO device */
+ odp_pktio_t pktio;
+
+ /* Packet IO capability*/
+ odp_pktio_capability_t capa;
+
+ /* Packet pool */
+ odp_pool_t pool;
+
+ /* Packet IO statistics */
+ odp_pktio_stats_t stats;
+
+ /* Input queue statistics */
+ odp_pktin_queue_stats_t pktin_queue_stats;
+
+ /* Output queue statistics */
+ odp_pktout_queue_stats_t pktout_queue_stats;
+
+ /* Data for cls_pmr_create() test */
+ struct {
+ /* Term used to create PMRs */
+ odp_cls_pmr_term_t term;
+
+ /* Is test enabled */
+ odp_bool_t enabled;
+
+ } cls_pmr_create;
+
+ /* Common benchmark suite data */
+ bench_tm_suite_t suite;
+
+ /* CPU mask as string */
+ char cpumask_str[ODP_CPUMASK_STR_SIZE];
+
+} appl_args_t;
+
+static appl_args_t *gbl_args;
+
+static void sig_handler(int signo ODP_UNUSED)
+{
+ if (gbl_args == NULL)
+ return;
+ odp_atomic_store_u32(&gbl_args->suite.exit_worker, 1);
+}
+
+static int setup_sig_handler(void)
+{
+ struct sigaction action;
+
+ memset(&action, 0, sizeof(action));
+ action.sa_handler = sig_handler;
+
+ /* No additional signals blocked. By default, the signal which triggered
+ * the handler is blocked. */
+ if (sigemptyset(&action.sa_mask))
+ return -1;
+
+ if (sigaction(SIGINT, &action, NULL))
+ return -1;
+
+ return 0;
+}
+
+static void clean_pending_events(void)
+{
+ while (1) {
+ odp_event_t event = odp_schedule(NULL, odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS));
+
+ if (event != ODP_EVENT_INVALID) {
+ odp_event_free(event);
+ continue;
+ }
+ break;
+ };
+}
+
+static odp_pool_t create_packet_pool(void)
+{
+ odp_pool_capability_t capa;
+ odp_pool_param_t param;
+ odp_pool_t pool;
+
+ if (odp_pool_capability(&capa))
+ ODPH_ABORT("Reading pool capabilities failed\n");
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_PACKET;
+ param.pkt.num = 512;
+ param.pkt.len = 2048;
+
+ if (capa.pkt.max_num && capa.pkt.max_num < param.pkt.num)
+ param.pkt.num = capa.pkt.max_num;
+
+ if (capa.pkt.max_len && capa.pkt.max_len < param.pkt.len)
+ param.pkt.len = capa.pkt.max_len;
+
+ pool = odp_pool_create("pktio_pool", &param);
+ if (pool == ODP_POOL_INVALID)
+ ODPH_ABORT("Creating packet pool failed\n");
+
+ return pool;
+}
+
+static void pktio_setup_param(odp_pktin_mode_t in_mode, odp_pktout_mode_t out_mode, odp_bool_t cls)
+{
+ appl_args_t *appl_args = gbl_args;
+ odp_pktio_param_t param;
+ odp_pktin_queue_param_t pktin_param;
+ odp_pktout_queue_param_t pktout_param;
+ odp_pktio_t pktio;
+ odp_pool_t pool;
+ int ret;
+
+ pool = create_packet_pool();
+
+ odp_pktio_param_init(&param);
+ param.in_mode = in_mode;
+ param.out_mode = out_mode;
+
+ pktio = odp_pktio_open(appl_args->opt.name, pool, &param);
+ if (pktio == ODP_PKTIO_INVALID)
+ ODPH_ABORT("Opening pktio failed\n");
+
+ odp_pktin_queue_param_init(&pktin_param);
+ pktin_param.num_queues = appl_args->opt.num_input_queues;
+
+ if (cls) {
+ pktin_param.classifier_enable = true;
+ } else {
+ if (pktin_param.num_queues > 1) {
+ pktin_param.hash_enable = true;
+ pktin_param.hash_proto.proto.ipv4_udp = 1;
+ }
+ }
+
+ odp_pktout_queue_param_init(&pktout_param);
+ pktout_param.num_queues = appl_args->opt.num_output_queues;
+
+ ret = odp_pktin_queue_config(pktio, &pktin_param);
+ if (ret)
+ ODPH_ABORT("Configuring packet input queues failed: %d\n", ret);
+
+ ret = odp_pktout_queue_config(pktio, &pktout_param);
+ if (ret)
+ ODPH_ABORT("Configuring packet output queues failed: %d\n", ret);
+
+ ret = odp_pktio_start(pktio);
+ if (ret)
+ ODPH_ABORT("Starting pktio failed: %d\n", ret);
+
+ appl_args->pool = pool;
+ appl_args->pktio = pktio;
+}
+
+static void pktio_setup(void)
+{
+ pktio_setup_param(gbl_args->opt.in_mode, gbl_args->opt.out_mode, false);
+}
+
+static void pktio_setup_direct_rx(void)
+{
+ pktio_setup_param(ODP_PKTIN_MODE_DIRECT, gbl_args->opt.out_mode, false);
+}
+
+static void pktio_setup_sched_rx(void)
+{
+ pktio_setup_param(ODP_PKTIN_MODE_SCHED, gbl_args->opt.out_mode, false);
+}
+
+static void pktio_setup_cls(void)
+{
+ pktio_setup_param(ODP_PKTIN_MODE_SCHED, gbl_args->opt.out_mode, true);
+}
+
+static void pktio_setup_direct_tx(void)
+{
+ pktio_setup_param(gbl_args->opt.in_mode, ODP_PKTOUT_MODE_DIRECT, false);
+}
+
+static void pktio_setup_queue_tx(void)
+{
+ pktio_setup_param(gbl_args->opt.in_mode, ODP_PKTOUT_MODE_QUEUE, false);
+}
+
+static void pktio_clean_param(odp_pktin_mode_t in_mode)
+{
+ appl_args_t *appl_args = gbl_args;
+ int ret;
+
+ ret = odp_pktio_stop(appl_args->pktio);
+ if (ret)
+ ODPH_ABORT("Stopping pktio failed: %d\n", ret);
+
+ /* Clean possible pre-scheduled packets */
+ if (in_mode == ODP_PKTIN_MODE_SCHED)
+ clean_pending_events();
+
+ ret = odp_pktio_close(appl_args->pktio);
+ if (ret)
+ ODPH_ABORT("Closing pktio failed: %d\n", ret);
+
+ ret = odp_pool_destroy(appl_args->pool);
+ if (ret)
+ ODPH_ABORT("Destroying pktio pool failed: %d\n", ret);
+}
+
+static void pktio_clean(void)
+{
+ pktio_clean_param(gbl_args->opt.in_mode);
+}
+
+static void pktio_clean_direct_rx(void)
+{
+ pktio_clean_param(ODP_PKTIN_MODE_DIRECT);
+}
+
+static void pktio_clean_sched_rx(void)
+{
+ pktio_clean_param(ODP_PKTIN_MODE_SCHED);
+}
+
+static int pktio_capability(bench_tm_result_t *res, int repeat_count)
+{
+ appl_args_t *appl_args = gbl_args;
+ odp_pktio_capability_t *capa = &appl_args->capa;
+ odp_pktio_t pktio = appl_args->pktio;
+ odp_time_t t1, t2;
+ int ret;
+ uint8_t id1 = bench_tm_func_register(res, "odp_pktio_capability()");
+
+ for (int i = 0; i < repeat_count; i++) {
+ t1 = odp_time_local_strict();
+ ret = odp_pktio_capability(pktio, capa);
+ t2 = odp_time_local_strict();
+
+ if (ret) {
+ ODPH_ERR("Reading pktio capa failed: %d\n", ret);
+ return -1;
+ }
+
+ bench_tm_func_record(t2, t1, res, id1);
+ }
+ return 0;
+}
+
+static int pktio_lookup(bench_tm_result_t *res, int repeat_count)
+{
+ appl_args_t *appl_args = gbl_args;
+ const char *name = appl_args->opt.name;
+ odp_pktio_t pktio;
+ odp_time_t t1, t2;
+ uint8_t id1 = bench_tm_func_register(res, "odp_pktio_lookup()");
+
+ for (int i = 0; i < repeat_count; i++) {
+ t1 = odp_time_local_strict();
+ pktio = odp_pktio_lookup(name);
+ t2 = odp_time_local_strict();
+
+ if (pktio == ODP_PKTIO_INVALID) {
+ ODPH_ERR("Pktio lookup failed\n");
+ return -1;
+ }
+
+ bench_tm_func_record(t2, t1, res, id1);
+ }
+ return 0;
+}
+
+static int pktio_open_start_stop_close(bench_tm_result_t *res, int repeat_count)
+{
+ appl_args_t *appl_args = gbl_args;
+ odp_pktio_param_t param;
+ odp_pktin_queue_param_t pktin_param;
+ odp_pktout_queue_param_t pktout_param;
+ odp_pktio_t pktio;
+ odp_pool_t pool;
+ odp_time_t t1, t2, t3, t4, t5, t6, t7, t8;
+ int ret;
+ uint8_t id1 = bench_tm_func_register(res, "odp_pktio_open()");
+ uint8_t id2 = bench_tm_func_register(res, "odp_pktin_queue_config()");
+ uint8_t id3 = bench_tm_func_register(res, "odp_pktout_queue_config()");
+ uint8_t id4 = bench_tm_func_register(res, "odp_pktio_start()");
+ uint8_t id5 = bench_tm_func_register(res, "odp_pktio_stop()");
+ uint8_t id6 = bench_tm_func_register(res, "odp_pktio_close()");
+
+ pool = create_packet_pool();
+
+ odp_pktio_param_init(&param);
+ param.in_mode = appl_args->opt.in_mode;
+ param.out_mode = appl_args->opt.out_mode;
+
+ odp_pktin_queue_param_init(&pktin_param);
+ pktin_param.num_queues = appl_args->opt.num_input_queues;
+ if (pktin_param.num_queues > 1) {
+ pktin_param.hash_enable = true;
+ pktin_param.hash_proto.proto.ipv4_udp = 1;
+ }
+
+ odp_pktout_queue_param_init(&pktout_param);
+ pktout_param.num_queues = appl_args->opt.num_output_queues;
+
+ for (int i = 0; i < repeat_count; i++) {
+ t1 = odp_time_local_strict();
+ pktio = odp_pktio_open(appl_args->opt.name, pool, &param);
+ t2 = odp_time_local_strict();
+
+ if (pktio == ODP_PKTIO_INVALID) {
+ ODPH_ERR("Opening pktio failed\n");
+ return -1;
+ }
+
+ ret = odp_pktin_queue_config(pktio, &pktin_param);
+ t3 = odp_time_local_strict();
+
+ if (ret) {
+ ODPH_ERR("Configuring packet input queues failed: %d\n", ret);
+ return -1;
+ }
+
+ ret = odp_pktout_queue_config(pktio, &pktout_param);
+ t4 = odp_time_local_strict();
+
+ if (ret) {
+ ODPH_ERR("Configuring packet output queues failed: %d\n", ret);
+ return -1;
+ }
+
+ ret = odp_pktio_start(pktio);
+ t5 = odp_time_local_strict();
+
+ if (ret) {
+ ODPH_ERR("Starting pktio failed: %d\n", ret);
+ return -1;
+ }
+
+ ret = odp_pktio_stop(pktio);
+ t6 = odp_time_local_strict();
+
+ if (ret) {
+ ODPH_ERR("Stopping pktio failed: %d\n", ret);
+ return -1;
+ }
+
+ /* Clean possible pre-scheduled packets */
+ if (appl_args->opt.in_mode == ODP_PKTIN_MODE_SCHED)
+ clean_pending_events();
+
+ t7 = odp_time_local_strict();
+ ret = odp_pktio_close(pktio);
+ t8 = odp_time_local_strict();
+ if (ret) {
+ ODPH_ERR("Closing pktio failed: %d\n", ret);
+ return -1;
+ }
+
+ bench_tm_func_record(t2, t1, res, id1);
+ bench_tm_func_record(t3, t2, res, id2);
+ bench_tm_func_record(t4, t3, res, id3);
+ bench_tm_func_record(t5, t4, res, id4);
+ bench_tm_func_record(t6, t5, res, id5);
+ bench_tm_func_record(t8, t7, res, id6);
+ }
+
+ ret = odp_pool_destroy(pool);
+ if (ret) {
+ ODPH_ERR("Destroying pktio pool failed: %d\n", ret);
+ return -1;
+ }
+ return 0;
+}
+
+static int pktio_stats(bench_tm_result_t *res, int repeat_count)
+{
+ appl_args_t *appl_args = gbl_args;
+ odp_pktio_stats_t *stats = &appl_args->stats;
+ odp_pktio_t pktio = appl_args->pktio;
+ odp_time_t t1, t2;
+ int ret;
+ uint8_t id1 = bench_tm_func_register(res, "odp_pktio_stats()");
+
+ for (int i = 0; i < repeat_count; i++) {
+ t1 = odp_time_local_strict();
+ ret = odp_pktio_stats(pktio, stats);
+ t2 = odp_time_local_strict();
+
+ if (ret) {
+ ODPH_ERR("Reading pktio stats failed\n");
+ return -1;
+ }
+
+ bench_tm_func_record(t2, t1, res, id1);
+ }
+ return 0;
+}
+
+static int pktio_stats_reset(bench_tm_result_t *res, int repeat_count)
+{
+ appl_args_t *appl_args = gbl_args;
+ odp_pktio_t pktio = appl_args->pktio;
+ odp_time_t t1, t2;
+ int ret;
+ int id1 = bench_tm_func_register(res, "odp_pktio_stats_reset()");
+
+ for (int i = 0; i < repeat_count; i++) {
+ t1 = odp_time_local_strict();
+ ret = odp_pktio_stats_reset(pktio);
+ t2 = odp_time_local_strict();
+
+ if (ret) {
+ ODPH_ERR("Resetting pktio stats failed\n");
+ return -1;
+ }
+
+ bench_tm_func_record(t2, t1, res, id1);
+ }
+ return 0;
+}
+
+static int pktin_queue_stats(bench_tm_result_t *res, int repeat_count)
+{
+ appl_args_t *appl_args = gbl_args;
+ odp_pktin_queue_stats_t *stats = &appl_args->pktin_queue_stats;
+ odp_pktio_t pktio = appl_args->pktio;
+ odp_pktin_queue_t queue;
+ odp_time_t t1, t2;
+ int ret;
+ uint8_t id1 = bench_tm_func_register(res, "odp_pktin_queue_stats()");
+
+ ret = odp_pktin_queue(pktio, &queue, 1);
+ if (ret < 1) {
+ ODPH_ERR("Reading pktio input queue failed\n");
+ return -1;
+ }
+
+ for (int i = 0; i < repeat_count; i++) {
+ t1 = odp_time_local_strict();
+ ret = odp_pktin_queue_stats(queue, stats);
+ t2 = odp_time_local_strict();
+
+ if (ret) {
+ ODPH_ERR("Reading pktio stats failed\n");
+ return -1;
+ }
+
+ bench_tm_func_record(t2, t1, res, id1);
+ }
+ return 0;
+}
+
+static int pktin_event_queue_stats(bench_tm_result_t *res, int repeat_count)
+{
+ appl_args_t *appl_args = gbl_args;
+ odp_pktin_queue_stats_t *stats = &appl_args->pktin_queue_stats;
+ odp_pktio_t pktio = appl_args->pktio;
+ odp_queue_t queue;
+ odp_time_t t1, t2;
+ int ret;
+ uint8_t id1 = bench_tm_func_register(res, "odp_pktin_event_queue_stats()");
+
+ ret = odp_pktin_event_queue(pktio, &queue, 1);
+ if (ret < 1) {
+ ODPH_ERR("Reading pktio input queue failed\n");
+ return -1;
+ }
+
+ for (int i = 0; i < repeat_count; i++) {
+ t1 = odp_time_local_strict();
+ ret = odp_pktin_event_queue_stats(pktio, queue, stats);
+ t2 = odp_time_local_strict();
+
+ if (ret) {
+ ODPH_ERR("Reading pktio stats failed\n");
+ return -1;
+ }
+
+ bench_tm_func_record(t2, t1, res, id1);
+ }
+ return 0;
+}
+
+static int pktout_queue_stats(bench_tm_result_t *res, int repeat_count)
+{
+ appl_args_t *appl_args = gbl_args;
+ odp_pktout_queue_stats_t *stats = &appl_args->pktout_queue_stats;
+ odp_pktio_t pktio = appl_args->pktio;
+ odp_pktout_queue_t queue;
+ odp_time_t t1, t2;
+ int ret;
+ uint8_t id1 = bench_tm_func_register(res, "odp_pktout_queue_stats()");
+
+ ret = odp_pktout_queue(pktio, &queue, 1);
+ if (ret < 1) {
+ ODPH_ERR("Reading pktio input queue failed\n");
+ return -1;
+ }
+
+ for (int i = 0; i < repeat_count; i++) {
+ t1 = odp_time_local_strict();
+ ret = odp_pktout_queue_stats(queue, stats);
+ t2 = odp_time_local_strict();
+
+ if (ret) {
+ ODPH_ERR("Reading pktio stats failed\n");
+ return -1;
+ }
+
+ bench_tm_func_record(t2, t1, res, id1);
+ }
+ return 0;
+}
+
+static int pktout_event_queue_stats(bench_tm_result_t *res, int repeat_count)
+{
+ appl_args_t *appl_args = gbl_args;
+ odp_pktout_queue_stats_t *stats = &appl_args->pktout_queue_stats;
+ odp_pktio_t pktio = appl_args->pktio;
+ odp_queue_t queue;
+ odp_time_t t1, t2;
+ int ret;
+ uint8_t id1 = bench_tm_func_register(res, "odp_pktout_event_queue_stats()");
+
+ ret = odp_pktout_event_queue(pktio, &queue, 1);
+ if (ret < 1) {
+ ODPH_ERR("Reading pktio input queue failed\n");
+ return -1;
+ }
+
+ for (int i = 0; i < repeat_count; i++) {
+ t1 = odp_time_local_strict();
+ ret = odp_pktout_event_queue_stats(pktio, queue, stats);
+ t2 = odp_time_local_strict();
+
+ if (ret) {
+ ODPH_ERR("Reading pktio stats failed\n");
+ return -1;
+ }
+
+ bench_tm_func_record(t2, t1, res, id1);
+ }
+ return 0;
+}
+
+static int find_first_supported_l3_pmr(const odp_cls_capability_t *capa, odp_cls_pmr_term_t *term)
+{
+ *term = ODP_PMR_TCP_DPORT;
+
+ if (capa->supported_terms.bit.udp_sport)
+ *term = ODP_PMR_UDP_SPORT;
+ else if (capa->supported_terms.bit.udp_dport)
+ *term = ODP_PMR_UDP_DPORT;
+ else if (capa->supported_terms.bit.tcp_sport)
+ *term = ODP_PMR_TCP_SPORT;
+ else if (capa->supported_terms.bit.tcp_dport)
+ *term = ODP_PMR_TCP_DPORT;
+ else
+ return 0;
+
+ return 1;
+}
+
+/* Capabilities required for cls_pmr_create() test */
+static int check_cls_capa(void)
+{
+ appl_args_t *appl_args = gbl_args;
+ odp_cls_capability_t cls_capa;
+ odp_schedule_capability_t sched_capa;
+ int ret;
+
+ ret = odp_cls_capability(&cls_capa);
+ if (ret) {
+ ODPH_ERR("Reading classifier capa failed: %d\n", ret);
+ return -1;
+ }
+
+ ret = odp_schedule_capability(&sched_capa);
+ if (ret) {
+ ODPH_ERR("Reading scheduler capa failed: %d\n", ret);
+ return -1;
+ }
+
+ if (!find_first_supported_l3_pmr(&cls_capa, &appl_args->cls_pmr_create.term)) {
+ ODPH_ERR("Implementations doesn't support any TCP/UDP PMRs\n");
+ return 0;
+ }
+
+ /* One extra CoS and queue required for the default CoS */
+ if (appl_args->opt.num_pmr + 1 > cls_capa.max_cos) {
+ ODPH_ERR("Not enough CoSes supported for PMR test: %u/%u\n",
+ appl_args->opt.num_pmr + 1, cls_capa.max_cos);
+ return 0;
+ }
+
+ if (appl_args->opt.num_pmr + 1 > sched_capa.max_queues) {
+ ODPH_ERR("Not enough queues supported for PMR test: %u/%u\n",
+ appl_args->opt.num_pmr + 1, sched_capa.max_queues);
+ return 0;
+ }
+
+ appl_args->cls_pmr_create.enabled = true;
+
+ return 1;
+}
+
+static int check_cls_cond(void)
+{
+ return gbl_args->cls_pmr_create.enabled;
+}
+
+static int cls_pmr_create(bench_tm_result_t *res, int repeat_count)
+{
+ appl_args_t *appl_args = gbl_args;
+ odp_pktio_t pktio = appl_args->pktio;
+ odp_cls_cos_param_t cos_param;
+ odp_queue_param_t queue_param;
+ odp_pmr_param_t pmr_param;
+ odp_cos_t default_cos;
+ uint32_t num_cos = appl_args->opt.num_pmr + 1;
+ uint32_t num_pmr = num_cos - 1;
+ uint32_t cos_created = 0;
+ uint32_t queue_created = 0;
+ uint16_t val = 1024;
+ uint16_t mask = 0xffff;
+ int ret = 0;
+ odp_time_t t1, t2;
+ odp_cos_t cos[num_cos];
+ odp_queue_t queue[num_cos];
+ odp_pmr_t pmr[num_pmr];
+ uint8_t id1 = bench_tm_func_register(res, "odp_cls_pmr_create()");
+ uint8_t id2 = bench_tm_func_register(res, "odp_cls_pmr_destroy()");
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+
+ odp_cls_cos_param_init(&cos_param);
+
+ for (uint32_t i = 0; i < num_cos; i++) {
+ queue[i] = odp_queue_create(NULL, &queue_param);
+ if (queue[i] == ODP_QUEUE_INVALID) {
+ ODPH_ERR("odp_queue_create() failed %u / %u\n", i + 1, num_cos);
+ break;
+ }
+
+ cos_param.queue = queue[i];
+ queue_created++;
+
+ cos[i] = odp_cls_cos_create(NULL, &cos_param);
+ if (cos[i] == ODP_COS_INVALID) {
+ ODPH_ERR("odp_cls_cos_create() failed %u / %u\n", i + 1, num_cos);
+ break;
+ }
+ cos_created++;
+ }
+
+ if (queue_created != num_cos)
+ ODPH_ERR("Unable to create all queues: %u/%u\n", queue_created, num_cos);
+
+ if (cos_created != num_cos) {
+ ODPH_ERR("Unable to create all CoSes: %u/%u\n", cos_created, num_cos);
+ goto destroy_cos;
+ }
+
+ default_cos = cos[0];
+
+ ret = odp_pktio_default_cos_set(pktio, default_cos);
+ if (ret) {
+ ODPH_ERR("Setting default CoS failed: %d\n", ret);
+ goto destroy_cos;
+ }
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = appl_args->cls_pmr_create.term;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ for (uint32_t i = 0; i < (uint32_t)repeat_count; i++) {
+ uint32_t pmr_created = 0;
+
+ for (uint32_t j = 0; j < num_pmr; j++) {
+ t1 = odp_time_local_strict();
+ pmr[j] = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos[j + 1]);
+ t2 = odp_time_local_strict();
+
+ if (pmr[j] == ODP_PMR_INVALID)
+ break;
+ bench_tm_func_record(t2, t1, res, id1);
+
+ val++;
+ pmr_created++;
+ }
+
+ for (uint32_t j = 0; j < pmr_created; j++) {
+ t1 = odp_time_local_strict();
+ ret = odp_cls_pmr_destroy(pmr[j]);
+ t2 = odp_time_local_strict();
+
+ if (ret)
+ ODPH_ABORT("Destroying PMR failed: %d\n", ret);
+
+ bench_tm_func_record(t2, t1, res, id2);
+ }
+
+ if (i == 0)
+ ODPH_DBG("Created %u PMRs\n", pmr_created);
+ }
+
+ ret = odp_pktio_default_cos_set(pktio, ODP_COS_INVALID);
+
+destroy_cos:
+ for (uint32_t i = 0; i < cos_created; i++)
+ ret = odp_cos_destroy(cos[i]);
+
+ for (uint32_t i = 0; i < queue_created; i++)
+ ret = odp_queue_destroy(queue[i]);
+
+ return ret;
+}
+
+bench_tm_info_t test_suite[] = {
+ BENCH_INFO(pktio_capability, pktio_setup, pktio_clean, NULL, 0),
+ BENCH_INFO(pktio_lookup, pktio_setup, pktio_clean, NULL, 0),
+ BENCH_INFO(pktio_open_start_stop_close, NULL, NULL, NULL, 0),
+ BENCH_INFO(pktio_stats, pktio_setup, pktio_clean, NULL, 0),
+ BENCH_INFO(pktin_queue_stats, pktio_setup_direct_rx, pktio_clean_direct_rx, NULL, 0),
+ BENCH_INFO(pktin_event_queue_stats, pktio_setup_sched_rx, pktio_clean_sched_rx, NULL, 0),
+ BENCH_INFO(pktout_queue_stats, pktio_setup_direct_tx, pktio_clean, NULL, 0),
+ BENCH_INFO(pktout_event_queue_stats, pktio_setup_queue_tx, pktio_clean, NULL, 0),
+ BENCH_INFO(pktio_stats_reset, pktio_setup, pktio_clean, NULL, 0),
+ BENCH_INFO(cls_pmr_create, pktio_setup_cls, pktio_clean_sched_rx, check_cls_cond, 0)
+};
+
+/* Print usage information */
+static void usage(void)
+{
+ printf("\n"
+ "ODP pktio API slow path micro benchmarks\n"
+ "\n"
+ "Options:\n"
+ " -i, --interface <name> Ethernet interface name (default loop).\n"
+ " -m, --in_mode <arg> Packet input mode\n"
+ " 0: Direct mode: PKTIN_MODE_DIRECT (default)\n"
+ " 1: Scheduler mode with parallel queues:\n"
+ " PKTIN_MODE_SCHED + SCHED_SYNC_PARALLEL\n"
+ " -o, --out_mode <arg> Packet output mode\n"
+ " 0: Direct mode: PKTOUT_MODE_DIRECT (default)\n"
+ " 1: Queue mode: PKTOUT_MODE_QUEUE\n"
+ " -p, --pmr <num> Number of PMRs to create/destroy per round (default 1)\n"
+ " -q, --rx_queues <num> Number of packet input queues (default 1)\n"
+ " -t, --tx_queues <num> Number of packet output queues (default 1)\n"
+ " -r, --rounds <num> Run each test case 'num' times (default %u).\n"
+ " -s, --select <idx> Run only selected test case.\n"
+ " -h, --help Display help and exit.\n\n"
+ "\n", ROUNDS);
+}
+
+static int parse_interface(appl_args_t *appl_args, const char *optarg)
+{
+ if (strlen(optarg) + 1 > MAX_NAME_LEN) {
+ ODPH_ERR("Unable to store interface name (MAX_NAME_LEN=%d)\n", MAX_NAME_LEN);
+ return -1;
+ }
+ strncpy(appl_args->opt.name, optarg, MAX_NAME_LEN);
+ return 0;
+}
+
+/* Parse command line arguments */
+static int parse_args(int argc, char *argv[])
+{
+ int i;
+ int opt;
+ int long_index;
+ static const struct option longopts[] = {
+ {"interface", required_argument, NULL, 'i'},
+ {"in_mode", required_argument, NULL, 'm'},
+ {"out_mode", required_argument, NULL, 'o'},
+ {"pmr", required_argument, NULL, 'p'},
+ {"rx_queues", required_argument, NULL, 'q'},
+ {"tx_queues", required_argument, NULL, 't'},
+ {"rounds", required_argument, NULL, 'r'},
+ {"select", required_argument, NULL, 's'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "i:m:o:p:q:r:s:t:h";
+
+ strncpy(gbl_args->opt.name, "loop", MAX_NAME_LEN);
+ gbl_args->opt.rounds = ROUNDS;
+ gbl_args->opt.in_mode = ODP_PKTIN_MODE_DIRECT;
+ gbl_args->opt.out_mode = ODP_PKTOUT_MODE_DIRECT;
+ gbl_args->opt.num_input_queues = 1;
+ gbl_args->opt.num_output_queues = 1;
+ gbl_args->opt.num_pmr = 1;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break; /* No more options */
+
+ switch (opt) {
+ case 'i':
+ if (parse_interface(gbl_args, optarg))
+ return -1;
+ break;
+ case 'm':
+ i = atoi(optarg);
+ if (i == 1)
+ gbl_args->opt.in_mode = ODP_PKTIN_MODE_SCHED;
+ else
+ gbl_args->opt.in_mode = ODP_PKTIN_MODE_DIRECT;
+ break;
+ case 'o':
+ i = atoi(optarg);
+ if (i == 1)
+ gbl_args->opt.out_mode = ODP_PKTOUT_MODE_QUEUE;
+ else
+ gbl_args->opt.out_mode = ODP_PKTOUT_MODE_DIRECT;
+ break;
+ case 'p':
+ gbl_args->opt.num_pmr = atoi(optarg);
+ break;
+ case 'q':
+ gbl_args->opt.num_input_queues = atoi(optarg);
+ break;
+ case 'r':
+ gbl_args->opt.rounds = atoi(optarg);
+ break;
+ case 's':
+ gbl_args->opt.case_idx = atoi(optarg);
+ break;
+ case 't':
+ gbl_args->opt.num_output_queues = atoi(optarg);
+ break;
+ case 'h':
+ usage();
+ return 1;
+ default:
+ ODPH_ERR("Bad option. Use -h for help.\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int check_args(appl_args_t *appl_args)
+{
+ odp_pktio_param_t param;
+ odp_pktio_capability_t capa;
+ odp_pktio_t pktio;
+ odp_pool_t pool;
+ int ret;
+
+ if (gbl_args->opt.rounds < 1) {
+ ODPH_ERR("Invalid test repeat count: %u\n", gbl_args->opt.rounds);
+ return -1;
+ }
+
+ if (gbl_args->opt.case_idx > ODPH_ARRAY_SIZE(test_suite)) {
+ ODPH_ERR("Invalid test case index: %u\n", gbl_args->opt.case_idx);
+ return -1;
+ }
+
+ pool = create_packet_pool();
+
+ odp_pktio_param_init(&param);
+ param.in_mode = appl_args->opt.in_mode;
+ param.out_mode = appl_args->opt.out_mode;
+
+ pktio = odp_pktio_open(appl_args->opt.name, pool, &param);
+ if (pktio == ODP_PKTIO_INVALID) {
+ ODPH_ERR("Opening pktio failed\n");
+ return -1;
+ }
+
+ ret = odp_pktio_capability(pktio, &capa);
+ if (ret) {
+ ODPH_ERR("Reading pktio capa failed: %d\n", ret);
+ return -1;
+ }
+
+ if (appl_args->opt.num_input_queues > capa.max_input_queues) {
+ ODPH_ERR("Too many input queues: %u/%u\n", appl_args->opt.num_input_queues,
+ capa.max_input_queues);
+ return -1;
+ }
+
+ if (appl_args->opt.num_output_queues > capa.max_output_queues) {
+ ODPH_ERR("Too many output queues: %u/%u\n", appl_args->opt.num_output_queues,
+ capa.max_output_queues);
+ return -1;
+ }
+
+ ret = odp_pktio_close(pktio);
+ if (ret) {
+ ODPH_ERR("Closing pktio failed: %d\n", ret);
+ return -1;
+ }
+
+ ret = odp_pool_destroy(pool);
+ if (ret) {
+ ODPH_ERR("Destroying pktio pool failed: %d\n", ret);
+ return -1;
+ }
+
+ if (check_cls_capa() < 0)
+ return -1;
+
+ return 0;
+}
+
+/* Print application info */
+static void print_info(appl_args_t *appl_args)
+{
+ odp_sys_info_print();
+
+ printf("\n"
+ "odp_bench_pktio_sp options\n"
+ "--------------------------\n");
+
+ printf("CPU mask: %s\n", gbl_args->cpumask_str);
+ printf("Interface: %s\n", gbl_args->opt.name);
+
+ printf("Input mode: ");
+ if (appl_args->opt.in_mode == ODP_PKTIN_MODE_SCHED)
+ printf("sched\n");
+ else
+ printf("direct\n");
+
+ printf("Output mode: ");
+ if (appl_args->opt.out_mode == ODP_PKTOUT_MODE_QUEUE)
+ printf("plain\n");
+ else
+ printf("direct\n");
+
+ printf("Input queues: %u\n", gbl_args->opt.num_input_queues);
+ printf("Output queues: %u\n", gbl_args->opt.num_output_queues);
+ printf("PMRs: %u\n", gbl_args->opt.num_pmr);
+ printf("Test rounds: %d\n", gbl_args->opt.rounds);
+ printf("\n");
+}
+
+int main(int argc, char *argv[])
+{
+ odph_helper_options_t helper_options;
+ odph_thread_t worker_thread;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
+ odp_shm_t shm;
+ odp_cpumask_t cpumask, default_mask;
+ odp_instance_t instance;
+ odp_init_t init_param;
+ int cpu;
+ int ret;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Reading ODP helper options failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init_param, NULL)) {
+ ODPH_ERR("Global init failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ ret = odp_schedule_config(NULL);
+ if (ret) {
+ ODPH_ERR("Schedule config failed: %d\n", ret);
+ exit(EXIT_FAILURE);
+ }
+
+ if (setup_sig_handler()) {
+ ODPH_ERR("Signal handler setup failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Reserve memory for args from shared mem */
+ shm = odp_shm_reserve("shm_args", sizeof(appl_args_t), ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Shared mem reserve failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ gbl_args = odp_shm_addr(shm);
+ if (gbl_args == NULL) {
+ ODPH_ERR("Shared mem alloc failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(gbl_args, 0, sizeof(appl_args_t));
+
+ /* Parse and store the application arguments */
+ ret = parse_args(argc, argv);
+ if (ret)
+ goto exit;
+
+ if (check_args(gbl_args))
+ goto exit;
+
+ bench_tm_suite_init(&gbl_args->suite);
+ gbl_args->suite.bench = test_suite;
+ gbl_args->suite.num_bench = ODPH_ARRAY_SIZE(test_suite);
+ gbl_args->suite.rounds = gbl_args->opt.rounds;
+ gbl_args->suite.bench_idx = gbl_args->opt.case_idx;
+
+ /* Get default worker cpumask */
+ if (odp_cpumask_default_worker(&default_mask, 1) != 1) {
+ ODPH_ERR("Unable to allocate worker thread\n");
+ ret = -1;
+ goto exit;
+ }
+
+ (void)odp_cpumask_to_str(&default_mask, gbl_args->cpumask_str,
+ sizeof(gbl_args->cpumask_str));
+
+ print_info(gbl_args);
+
+ memset(&worker_thread, 0, sizeof(odph_thread_t));
+
+ /* Create worker thread */
+ cpu = odp_cpumask_first(&default_mask);
+
+ odp_cpumask_zero(&cpumask);
+ odp_cpumask_set(&cpumask, cpu);
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = bench_tm_run;
+ thr_param.arg = &gbl_args->suite;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_create(&worker_thread, &thr_common, &thr_param, 1);
+
+ odph_thread_join(&worker_thread, 1);
+
+ ret = gbl_args->suite.retval;
+
+exit:
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Shared mem free failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Local term failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Global term failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (ret < 0)
+ return EXIT_FAILURE;
+
+ return EXIT_SUCCESS;
+}
diff --git a/test/performance/odp_bench_timer.c b/test/performance/odp_bench_timer.c
new file mode 100644
index 000000000..65c7a9168
--- /dev/null
+++ b/test/performance/odp_bench_timer.c
@@ -0,0 +1,742 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_bench_timer.c
+ *
+ * Microbenchmark application for timer API functions
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE /* Needed for sigaction */
+#endif
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#include "bench_common.h"
+
+#include <getopt.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+/* Number of API function calls per test case */
+#define REPEAT_COUNT 1000
+
+/* Default number of rounds per test case */
+#define ROUNDS 1000u
+
+/** User area size in bytes */
+#define UAREA_SIZE 8
+
+/** Timer duration in nsec */
+#define TIMER_NSEC 50000000
+
+#define BENCH_INFO(run_fn, max, alt_name) \
+ {.name = #run_fn, .run = run_fn, .max_rounds = max, .desc = alt_name}
+
+typedef struct {
+ /* Command line options */
+ struct {
+ /* Clock source to be used */
+ int clk_src;
+
+ /* Measure time vs CPU cycles */
+ int time;
+
+ /* Benchmark index to run indefinitely */
+ int bench_idx;
+
+ /* Rounds per test case */
+ uint32_t rounds;
+
+ } opt;
+
+ /* Common benchmark suite data */
+ bench_suite_t suite;
+
+ odp_timer_pool_t timer_pool;
+ odp_timer_t timer;
+ odp_queue_t queue;
+ odp_pool_t pool;
+ odp_timeout_t timeout;
+ odp_event_t event;
+ uint64_t timer_nsec;
+ uint64_t tick;
+ uint64_t nsec;
+ double tick_hz;
+ int plain_queue;
+
+ /* Test case input / output data */
+ uint64_t a1[REPEAT_COUNT];
+ odp_event_t ev[REPEAT_COUNT];
+ odp_timeout_t tmo[REPEAT_COUNT];
+ odp_timer_t tim[REPEAT_COUNT];
+
+ /* CPU mask as string */
+ char cpumask_str[ODP_CPUMASK_STR_SIZE];
+
+} gbl_args_t;
+
+static gbl_args_t *gbl_args;
+
+static void sig_handler(int signo ODP_UNUSED)
+{
+ if (gbl_args == NULL)
+ return;
+ odp_atomic_store_u32(&gbl_args->suite.exit_worker, 1);
+}
+
+static int setup_sig_handler(void)
+{
+ struct sigaction action;
+
+ memset(&action, 0, sizeof(action));
+ action.sa_handler = sig_handler;
+
+ /* No additional signals blocked. By default, the signal which triggered
+ * the handler is blocked. */
+ if (sigemptyset(&action.sa_mask))
+ return -1;
+
+ if (sigaction(SIGINT, &action, NULL))
+ return -1;
+
+ return 0;
+}
+
+static int timer_current_tick(void)
+{
+ int i;
+ odp_timer_pool_t timer_pool = gbl_args->timer_pool;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_timer_current_tick(timer_pool);
+
+ return i;
+}
+
+static int timer_tick_to_ns(void)
+{
+ int i;
+ odp_timer_pool_t timer_pool = gbl_args->timer_pool;
+ uint64_t *a1 = gbl_args->a1;
+ uint64_t tick = gbl_args->tick;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_timer_tick_to_ns(timer_pool, tick);
+
+ return i;
+}
+
+static int timer_ns_to_tick(void)
+{
+ int i;
+ odp_timer_pool_t timer_pool = gbl_args->timer_pool;
+ uint64_t *a1 = gbl_args->a1;
+ uint64_t nsec = gbl_args->nsec;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_timer_ns_to_tick(timer_pool, nsec);
+
+ return i;
+}
+
+static int timeout_to_event(void)
+{
+ int i;
+ odp_event_t *ev = gbl_args->ev;
+ odp_timeout_t timeout = gbl_args->timeout;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ ev[i] = odp_timeout_to_event(timeout);
+
+ gbl_args->suite.dummy += odp_event_to_u64(ev[0]);
+
+ return i;
+}
+
+static int timeout_from_event(void)
+{
+ int i;
+ odp_event_t ev = gbl_args->event;
+ odp_timeout_t *tmo = gbl_args->tmo;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ tmo[i] = odp_timeout_from_event(ev);
+
+ gbl_args->suite.dummy += odp_timeout_to_u64(tmo[0]);
+
+ return i;
+}
+
+static int timeout_timer(void)
+{
+ int i;
+ odp_timeout_t timeout = gbl_args->timeout;
+ odp_timer_t *tim = gbl_args->tim;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ tim[i] = odp_timeout_timer(timeout);
+
+ gbl_args->suite.dummy += odp_timer_to_u64(tim[0]);
+
+ return i;
+}
+
+static int timeout_tick(void)
+{
+ int i;
+ odp_timeout_t timeout = gbl_args->timeout;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_timeout_tick(timeout);
+
+ return i;
+}
+
+static int timeout_user_ptr(void)
+{
+ int i;
+ odp_timeout_t timeout = gbl_args->timeout;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = (uintptr_t)odp_timeout_user_ptr(timeout);
+
+ return i;
+}
+
+static int timeout_user_area(void)
+{
+ int i;
+ odp_timeout_t timeout = gbl_args->timeout;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = (uintptr_t)odp_timeout_user_area(timeout);
+
+ return i;
+}
+
+static int timeout_to_u64(void)
+{
+ int i;
+ odp_timeout_t timeout = gbl_args->timeout;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_timeout_to_u64(timeout);
+
+ return i;
+}
+
+static int timer_to_u64(void)
+{
+ int i;
+ odp_timer_t timer = gbl_args->timer;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_timer_to_u64(timer);
+
+ return i;
+}
+
+static int timer_pool_to_u64(void)
+{
+ int i;
+ odp_timer_pool_t tp = gbl_args->timer_pool;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ a1[i] = odp_timer_pool_to_u64(tp);
+
+ return i;
+}
+
+bench_info_t test_suite[] = {
+ BENCH_INFO(timer_current_tick, 0, NULL),
+ BENCH_INFO(timer_tick_to_ns, 0, NULL),
+ BENCH_INFO(timer_ns_to_tick, 0, NULL),
+ BENCH_INFO(timeout_to_event, 0, NULL),
+ BENCH_INFO(timeout_from_event, 0, NULL),
+ BENCH_INFO(timeout_timer, 0, NULL),
+ BENCH_INFO(timeout_tick, 0, NULL),
+ BENCH_INFO(timeout_user_ptr, 0, NULL),
+ BENCH_INFO(timeout_user_area, 0, NULL),
+ BENCH_INFO(timeout_to_u64, 0, NULL),
+ BENCH_INFO(timer_to_u64, 0, NULL),
+ BENCH_INFO(timer_pool_to_u64, 0, NULL),
+};
+
+/* Print usage information */
+static void usage(void)
+{
+ printf("\n"
+ "ODP timer API micro benchmarks\n"
+ "\n"
+ "Options:\n"
+ " -s, --clk_src Clock source select (default 0):\n"
+ " 0: ODP_CLOCK_DEFAULT\n"
+ " 1: ODP_CLOCK_SRC_1, ...\n"
+ " -t, --time <opt> Time measurement. 0: measure CPU cycles (default), 1: measure time\n"
+ " -i, --index <idx> Benchmark index to run indefinitely.\n"
+ " -r, --rounds <num> Run each test case 'num' times (default %u).\n"
+ " -h, --help Display help and exit.\n\n"
+ "\n", ROUNDS);
+}
+
+/* Parse command line arguments */
+static int parse_args(int argc, char *argv[])
+{
+ int opt;
+ int long_index;
+ static const struct option longopts[] = {
+ {"clk_src", required_argument, NULL, 's'},
+ {"time", required_argument, NULL, 't'},
+ {"index", required_argument, NULL, 'i'},
+ {"rounds", required_argument, NULL, 'r'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "s:t:i:r:h";
+
+ gbl_args->opt.clk_src = ODP_CLOCK_DEFAULT;
+ gbl_args->opt.time = 0; /* Measure CPU cycles */
+ gbl_args->opt.bench_idx = 0; /* Run all benchmarks */
+ gbl_args->opt.rounds = ROUNDS;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break; /* No more options */
+
+ switch (opt) {
+ case 's':
+ gbl_args->opt.clk_src = atoi(optarg);
+ break;
+ case 't':
+ gbl_args->opt.time = atoi(optarg);
+ break;
+ case 'i':
+ gbl_args->opt.bench_idx = atoi(optarg);
+ break;
+ case 'r':
+ gbl_args->opt.rounds = atoi(optarg);
+ break;
+ case 'h':
+ usage();
+ return 1;
+ default:
+ ODPH_ERR("Bad option. Use -h for help.\n");
+ return -1;
+ }
+ }
+
+ if (gbl_args->opt.rounds < 1) {
+ ODPH_ERR("Invalid test cycle repeat count: %u\n", gbl_args->opt.rounds);
+ return -1;
+ }
+
+ if (gbl_args->opt.bench_idx < 0 ||
+ gbl_args->opt.bench_idx > (int)ODPH_ARRAY_SIZE(test_suite)) {
+ ODPH_ERR("Bad bench index %i\n", gbl_args->opt.bench_idx);
+ return -1;
+ }
+
+ optind = 1; /* Reset 'extern optind' from the getopt lib */
+
+ return 0;
+}
+
+/* Print system and application info */
+static void print_info(void)
+{
+ odp_sys_info_print();
+
+ printf("\n"
+ "odp_bench_timer options\n"
+ "-----------------------\n");
+
+ printf("CPU mask: %s\n", gbl_args->cpumask_str);
+ printf("Clock source: %i\n", gbl_args->opt.clk_src);
+ printf("Measurement unit: %s\n", gbl_args->opt.time ? "nsec" : "CPU cycles");
+ printf("Test rounds: %u\n", gbl_args->opt.rounds);
+ printf("Timer duration: %" PRIu64 " nsec\n", gbl_args->timer_nsec);
+ printf("Timer tick freq: %.2f Hz\n", gbl_args->tick_hz);
+ printf("\n");
+}
+
+static int create_timer(void)
+{
+ odp_pool_capability_t pool_capa;
+ odp_timer_capability_t timer_capa;
+ odp_timer_clk_src_t clk_src;
+ odp_timer_pool_param_t tp_param;
+ odp_timer_pool_t tp;
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ odp_timeout_t tmo;
+ odp_queue_param_t queue_param;
+ odp_queue_t queue;
+ odp_timer_t timer;
+ uint64_t t1, t2, diff, tick1, tick2;
+
+ if (odp_pool_capability(&pool_capa)) {
+ ODPH_ERR("Pool capa failed\n");
+ return -1;
+ }
+
+ clk_src = gbl_args->opt.clk_src;
+ if (odp_timer_capability(clk_src, &timer_capa)) {
+ ODPH_ERR("Timer capa failed\n");
+ return -1;
+ }
+
+ odp_timer_pool_param_init(&tp_param);
+ tp_param.clk_src = clk_src;
+ tp_param.res_ns = timer_capa.max_res.res_ns;
+ tp_param.min_tmo = timer_capa.max_res.min_tmo;
+ tp_param.max_tmo = timer_capa.max_res.max_tmo;
+ tp_param.num_timers = 10;
+
+ tp = odp_timer_pool_create("bench_timer", &tp_param);
+
+ if (tp == ODP_TIMER_POOL_INVALID) {
+ ODPH_ERR("Timer pool create failed\n");
+ return -1;
+ }
+
+ if (odp_timer_pool_start_multi(&tp, 1) != 1) {
+ ODPH_ERR("Timer pool start failed\n");
+ return -1;
+ }
+
+ gbl_args->timer_pool = tp;
+
+ gbl_args->timer_nsec = TIMER_NSEC;
+ if (TIMER_NSEC < tp_param.min_tmo)
+ gbl_args->timer_nsec = tp_param.min_tmo;
+ else if (TIMER_NSEC > tp_param.max_tmo)
+ gbl_args->timer_nsec = tp_param.max_tmo;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_TIMEOUT;
+ pool_param.tmo.num = 10;
+ pool_param.tmo.uarea_size = UAREA_SIZE;
+ if (UAREA_SIZE > pool_capa.tmo.max_uarea_size)
+ pool_param.tmo.uarea_size = pool_capa.tmo.max_uarea_size;
+
+ pool = odp_pool_create("bench_timer", &pool_param);
+
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Timeout pool create failed\n");
+ return -1;
+ }
+
+ gbl_args->pool = pool;
+
+ tmo = odp_timeout_alloc(pool);
+
+ if (tmo == ODP_TIMEOUT_INVALID) {
+ ODPH_ERR("Timeout alloc failed\n");
+ return -1;
+ }
+
+ gbl_args->timeout = tmo;
+ gbl_args->tick = odp_timer_current_tick(tp);
+ gbl_args->nsec = odp_timer_tick_to_ns(tp, gbl_args->tick);
+
+ /* Measure timer tick frequency for test information */
+ t1 = odp_time_global_strict_ns();
+ tick1 = odp_timer_current_tick(tp);
+
+ odp_time_wait_ns(200 * ODP_TIME_MSEC_IN_NS);
+
+ tick2 = odp_timer_current_tick(tp);
+ t2 = odp_time_global_strict_ns();
+ diff = t2 - t1;
+
+ if (diff)
+ gbl_args->tick_hz = (tick2 - tick1) / ((double)diff / ODP_TIME_SEC_IN_NS);
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.prio = odp_schedule_default_prio();
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ if (timer_capa.queue_type_sched == 0) {
+ queue_param.type = ODP_QUEUE_TYPE_PLAIN;
+ gbl_args->plain_queue = 1;
+ }
+
+ queue = odp_queue_create("bench_timer", &queue_param);
+ if (queue == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Queue create failed\n");
+ return -1;
+ }
+
+ gbl_args->queue = queue;
+
+ timer = odp_timer_alloc(tp, queue, (void *)(uintptr_t)0xdeadbeef);
+ if (timer == ODP_TIMER_INVALID) {
+ ODPH_ERR("Timer alloc failed\n");
+ return -1;
+ }
+
+ gbl_args->timer = timer;
+
+ return 0;
+}
+
+static int wait_timer(void)
+{
+ odp_timer_start_t start_param;
+ odp_timer_t timer = gbl_args->timer;
+ odp_timer_pool_t tp = gbl_args->timer_pool;
+ uint64_t wait_nsec = 2 * gbl_args->timer_nsec;
+ uint64_t sched_wait = odp_schedule_wait_time(wait_nsec);
+ odp_event_t ev;
+ uint64_t start;
+
+ start_param.tick_type = ODP_TIMER_TICK_REL;
+ start_param.tick = odp_timer_ns_to_tick(tp, gbl_args->timer_nsec);
+ start_param.tmo_ev = odp_timeout_to_event(gbl_args->timeout);
+
+ if (odp_timer_start(timer, &start_param) != ODP_TIMER_SUCCESS) {
+ ODPH_ERR("Timer start failed\n");
+ return -1;
+ }
+
+ gbl_args->timeout = ODP_TIMEOUT_INVALID;
+ gbl_args->event = ODP_EVENT_INVALID;
+
+ /* Wait for timeout */
+ if (gbl_args->plain_queue) {
+ start = odp_time_global_ns();
+ while (1) {
+ ev = odp_queue_deq(gbl_args->queue);
+
+ if (ev != ODP_EVENT_INVALID)
+ break;
+
+ if ((odp_time_global_ns() - start) > wait_nsec) {
+ ODPH_ERR("Timeout event missing\n");
+ return -1;
+ }
+ }
+
+ gbl_args->event = ev;
+ } else {
+ ev = odp_schedule(NULL, sched_wait);
+
+ if (ev == ODP_EVENT_INVALID) {
+ ODPH_ERR("Timeout event missing\n");
+ return -1;
+ }
+
+ gbl_args->event = ev;
+
+ /* Free schedule context */
+ if (odp_schedule(NULL, ODP_SCHED_NO_WAIT) != ODP_EVENT_INVALID) {
+ ODPH_ERR("Extra timeout event\n");
+ return -1;
+ }
+ }
+
+ if (odp_event_type(gbl_args->event) != ODP_EVENT_TIMEOUT) {
+ ODPH_ERR("Bad event type\n");
+ return -1;
+ }
+
+ gbl_args->timeout = odp_timeout_from_event(gbl_args->event);
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ odph_helper_options_t helper_options;
+ odph_thread_t worker_thread;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
+ int cpu, i;
+ odp_shm_t shm;
+ odp_cpumask_t cpumask, default_mask;
+ odp_instance_t instance;
+ odp_init_t init_param;
+ int ret = 0;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Reading ODP helper options failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init_param, NULL)) {
+ ODPH_ERR("Global init failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (setup_sig_handler()) {
+ ODPH_ERR("Signal handler setup failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_schedule_config(NULL);
+
+ /* Reserve memory for args from shared mem */
+ shm = odp_shm_reserve("shm_args", sizeof(gbl_args_t), ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Shared mem reserve failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ gbl_args = odp_shm_addr(shm);
+ if (gbl_args == NULL) {
+ ODPH_ERR("Shared mem alloc failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(gbl_args, 0, sizeof(gbl_args_t));
+ gbl_args->timer_pool = ODP_TIMER_POOL_INVALID;
+ gbl_args->timer = ODP_TIMER_INVALID;
+ gbl_args->queue = ODP_QUEUE_INVALID;
+ gbl_args->pool = ODP_POOL_INVALID;
+ gbl_args->timeout = ODP_TIMEOUT_INVALID;
+
+ for (i = 0; i < REPEAT_COUNT; i++) {
+ gbl_args->a1[i] = i;
+ gbl_args->ev[i] = ODP_EVENT_INVALID;
+ gbl_args->tmo[i] = ODP_TIMEOUT_INVALID;
+ gbl_args->tim[i] = ODP_TIMER_INVALID;
+ }
+
+ /* Parse and store the application arguments */
+ ret = parse_args(argc, argv);
+ if (ret)
+ goto exit;
+
+ bench_suite_init(&gbl_args->suite);
+ gbl_args->suite.bench = test_suite;
+ gbl_args->suite.num_bench = ODPH_ARRAY_SIZE(test_suite);
+ gbl_args->suite.measure_time = !!gbl_args->opt.time;
+ gbl_args->suite.indef_idx = gbl_args->opt.bench_idx;
+ gbl_args->suite.rounds = gbl_args->opt.rounds;
+ gbl_args->suite.repeat_count = REPEAT_COUNT;
+
+ /* Get default worker cpumask */
+ if (odp_cpumask_default_worker(&default_mask, 1) != 1) {
+ ODPH_ERR("Unable to allocate worker thread\n");
+ ret = -1;
+ goto exit;
+ }
+
+ (void)odp_cpumask_to_str(&default_mask, gbl_args->cpumask_str,
+ sizeof(gbl_args->cpumask_str));
+
+ /* Create timer and other resources */
+ ret = create_timer();
+ if (ret)
+ goto exit;
+
+ print_info();
+
+ /* Start one timer and wait for the timeout event. Timer expiration fills in
+ * timeout event metadata. */
+ ret = wait_timer();
+ if (ret)
+ goto exit;
+
+ memset(&worker_thread, 0, sizeof(odph_thread_t));
+
+ /* Create worker thread */
+ cpu = odp_cpumask_first(&default_mask);
+
+ odp_cpumask_zero(&cpumask);
+ odp_cpumask_set(&cpumask, cpu);
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = bench_run;
+ thr_param.arg = &gbl_args->suite;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_create(&worker_thread, &thr_common, &thr_param, 1);
+
+ odph_thread_join(&worker_thread, 1);
+
+ ret = gbl_args->suite.retval;
+
+exit:
+ if (gbl_args->timeout != ODP_TIMEOUT_INVALID)
+ odp_timeout_free(gbl_args->timeout);
+
+ if (gbl_args->pool != ODP_POOL_INVALID)
+ odp_pool_destroy(gbl_args->pool);
+
+ if (gbl_args->timer != ODP_TIMER_INVALID) {
+ if (odp_timer_free(gbl_args->timer)) {
+ ODPH_ERR("Timer free failed\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (gbl_args->timer_pool != ODP_TIMER_POOL_INVALID)
+ odp_timer_pool_destroy(gbl_args->timer_pool);
+
+ if (gbl_args->queue != ODP_QUEUE_INVALID) {
+ if (odp_queue_destroy(gbl_args->queue)) {
+ ODPH_ERR("Queue destroy failed\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Shared mem free failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Local term failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Global term failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (ret < 0)
+ return EXIT_FAILURE;
+
+ return EXIT_SUCCESS;
+}
diff --git a/test/performance/odp_cpu_bench.c b/test/performance/odp_cpu_bench.c
new file mode 100644
index 000000000..39eff620d
--- /dev/null
+++ b/test/performance/odp_cpu_bench.c
@@ -0,0 +1,837 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_cpu_bench.c
+ *
+ * Application for CPU stress testing
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#include <getopt.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+/* Queues are divided into groups and tests packets are passed only between
+ * queues which belong to the same group. */
+#define MAX_GROUPS 64
+#define QUEUES_PER_GROUP 4
+#define PKTS_PER_QUEUE 256
+
+#define MAX_EVENT_BURST 32
+#define CRC_INIT_VAL 123456789
+#define PASS_PACKETS 10000
+
+/* Default number of entries in the test lookup table */
+#define DEF_LOOKUP_TBL_SIZE (1024 * 1024)
+
+#define MAX_WORKERS \
+ (((ODP_THREAD_COUNT_MAX - 1) > (MAX_GROUPS * QUEUES_PER_GROUP)) ? \
+ (MAX_GROUPS * QUEUES_PER_GROUP) : \
+ (ODP_THREAD_COUNT_MAX - 1))
+
+ODP_STATIC_ASSERT(MAX_WORKERS <= MAX_GROUPS * QUEUES_PER_GROUP,
+ "Not enough queues for all workers");
+
+/* Get rid of path in filename - only for unix-type paths using '/' */
+#define NO_PATH(file_name) (strrchr((file_name), '/') ? \
+ strrchr((file_name), '/') + 1 : (file_name))
+
+/* Test dummy lookup table entry */
+typedef struct {
+ uint64_t idx;
+ uint32_t val0;
+ uint32_t val1;
+} lookup_entry_t;
+
+/* Test packet */
+typedef struct {
+ uint32_t seq;
+ uint32_t crc;
+ uint16_t group;
+} test_hdr_t;
+
+/* Parsed application arguments */
+typedef struct {
+ uint64_t lookup_tbl_size; /* Lookup table size */
+ int accuracy; /* Number of seconds between stats prints */
+ unsigned int cpu_count; /* CPU count */
+ int time; /* Time in seconds to run */
+} appl_args_t;
+
+/* Statistics */
+typedef union ODP_ALIGNED_CACHE {
+ struct {
+ /* Number of processed packets */
+ uint64_t pkts;
+ /* Number of dropped packets */
+ uint64_t dropped_pkts;
+ /* Time spent processing packets */
+ uint64_t nsec;
+ /* Cycles spent processing packets */
+ uint64_t cycles;
+ } s;
+
+ uint8_t padding[ODP_CACHE_LINE_SIZE];
+} stats_t;
+
+/* Thread specific data */
+typedef struct thread_args_t {
+ stats_t stats;
+ uint16_t idx;
+} thread_args_t;
+
+/* Grouping of all global data */
+typedef struct {
+ /* Thread specific arguments */
+ thread_args_t thread[MAX_WORKERS];
+ /* Barriers to synchronize main and workers */
+ odp_barrier_t init_barrier;
+ odp_barrier_t term_barrier;
+ /* Application (parsed) arguments */
+ appl_args_t appl;
+ /* Test queues */
+ odp_queue_t queue[MAX_GROUPS][QUEUES_PER_GROUP];
+ /* Test lookup table */
+ lookup_entry_t *lookup_tbl;
+ /* Break workers loop if set to 1 */
+ odp_atomic_u32_t exit_threads;
+} args_t;
+
+/* Global pointer to args */
+static args_t *gbl_args;
+
+static const uint8_t test_udp_packet[] = {
+ 0x00, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x08, 0x00, 0x45, 0x00,
+ 0x02, 0x1C, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11,
+ 0xF7, 0x7C, 0xC0, 0xA8, 0x00, 0x01, 0xC0, 0xA8,
+ 0x00, 0x02, 0x04, 0xD2, 0x1A, 0x82, 0x02, 0x08,
+ 0x24, 0x1E, 0xC9, 0x56, 0xB4, 0xD6, 0x4B, 0x64,
+ 0xB3, 0x01, 0xA1, 0x97, 0x4D, 0xD1, 0xA4, 0x76,
+ 0xF5, 0x7B, 0x27, 0x22, 0x6C, 0xA9, 0xED, 0x29,
+ 0x6E, 0x02, 0x80, 0xF7, 0xC4, 0x2D, 0x2A, 0x96,
+ 0x2D, 0xF6, 0x02, 0x8E, 0x89, 0x9F, 0x8C, 0xF4,
+ 0x0D, 0xC5, 0xE5, 0x1F, 0xA1, 0x52, 0xC3, 0x4B,
+ 0x5C, 0x4C, 0xDF, 0x14, 0x05, 0x6A, 0xA8, 0xD7,
+ 0xAD, 0x4F, 0x22, 0xA6, 0xB8, 0xF9, 0x52, 0x5A,
+ 0xB8, 0xF9, 0xE2, 0x2C, 0x05, 0x2A, 0x6F, 0xF2,
+ 0xCA, 0xA1, 0xA7, 0xC3, 0x56, 0xE1, 0xDB, 0xC1,
+ 0xDB, 0x86, 0x26, 0x55, 0xAC, 0xBE, 0xE1, 0x3D,
+ 0x82, 0x86, 0xB9, 0xDE, 0x3E, 0xD3, 0x11, 0xAB,
+ 0x65, 0x6A, 0xED, 0x1B, 0x60, 0xBE, 0x69, 0x71,
+ 0xB2, 0xA8, 0x5B, 0xB1, 0x06, 0xE3, 0x48, 0x14,
+ 0xC9, 0x13, 0x73, 0xDA, 0xBE, 0xE4, 0x7A, 0x5F,
+ 0xC0, 0xE0, 0xCA, 0xF3, 0x7A, 0xCA, 0x3F, 0xC9,
+ 0x4A, 0xEE, 0x47, 0x76, 0x67, 0xF0, 0x0D, 0x3F,
+ 0x7F, 0x3D, 0x69, 0xEA, 0x39, 0x53, 0x7C, 0xE3,
+ 0xED, 0x78, 0x79, 0x47, 0x60, 0x95, 0xCB, 0xDC,
+ 0x26, 0x60, 0x46, 0xAC, 0x47, 0xDA, 0x4C, 0x4D,
+ 0x0F, 0xE1, 0x68, 0x43, 0xBC, 0xCD, 0x4E, 0xFE,
+ 0x2E, 0xD6, 0xC2, 0x6E, 0x63, 0xEA, 0xB3, 0x98,
+ 0xCA, 0x8F, 0x7F, 0x05, 0xDF, 0x72, 0x8F, 0x6E,
+ 0x3E, 0x6D, 0xC7, 0x94, 0x59, 0x9D, 0x15, 0x5B,
+ 0xB8, 0x02, 0x52, 0x4F, 0x68, 0x3A, 0xF1, 0xFF,
+ 0xA9, 0xA4, 0x30, 0x29, 0xE0, 0x1C, 0xA0, 0x1B,
+ 0x50, 0xAB, 0xFD, 0x06, 0x84, 0xD4, 0x33, 0x51,
+ 0x01, 0xB3, 0x5F, 0x49, 0x5F, 0x21, 0xA0, 0xA1,
+ 0xC9, 0x08, 0xB3, 0xDF, 0x72, 0x9B, 0x5B, 0x70,
+ 0x89, 0x96, 0x08, 0x25, 0x88, 0x1E, 0xED, 0x52,
+ 0xDC, 0x98, 0xA0, 0xB8, 0x83, 0x2A, 0xA0, 0x90,
+ 0x45, 0xC9, 0x77, 0xD2, 0x19, 0xD7, 0x6B, 0xAB,
+ 0x49, 0x67, 0x7C, 0xD1, 0xE0, 0x23, 0xA2, 0x36,
+ 0xB2, 0x91, 0x3B, 0x23, 0x3B, 0x03, 0x36, 0xAF,
+ 0xAD, 0x81, 0xFA, 0x6F, 0x68, 0xD5, 0xBE, 0x73,
+ 0x1D, 0x56, 0x8A, 0xE8, 0x1A, 0xB4, 0xA8, 0x7C,
+ 0xF3, 0x82, 0x10, 0xD0, 0xF2, 0x1D, 0x9C, 0xEA,
+ 0xAB, 0xE7, 0xEC, 0x53, 0x6D, 0x52, 0xBD, 0x29,
+ 0x86, 0x21, 0xCE, 0xAA, 0xF3, 0x68, 0xA6, 0xEC,
+ 0x7E, 0xCA, 0x6F, 0xEB, 0xE1, 0x81, 0x80, 0x7C,
+ 0xF3, 0xE5, 0x22, 0xA0, 0x91, 0x08, 0xB7, 0x35,
+ 0x15, 0x87, 0x0C, 0x77, 0x31, 0x9C, 0x2F, 0x73,
+ 0xCE, 0x29, 0x6F, 0xC6, 0xAC, 0x9F, 0x68, 0xB8,
+ 0x6A, 0xFC, 0xD3, 0xB5, 0x08, 0x98, 0xAE, 0xE4,
+ 0x20, 0x84, 0x24, 0x69, 0xA5, 0xF5, 0x4A, 0x9D,
+ 0x44, 0x26, 0x5A, 0xF9, 0x6B, 0x5E, 0x5D, 0xC8,
+ 0x6F, 0xD4, 0x62, 0x91, 0xE5, 0x8E, 0x80, 0x05,
+ 0xA1, 0x95, 0x09, 0xEA, 0xFE, 0x84, 0x6D, 0xC3,
+ 0x0D, 0xD4, 0x32, 0xA4, 0x38, 0xB2, 0xF7, 0x9D,
+ 0x58, 0xD3, 0x5D, 0x93, 0x5F, 0x67, 0x86, 0xE1,
+ 0xAF, 0xFF, 0xE9, 0xFE, 0xF4, 0x71, 0x63, 0xE3,
+ 0x3E, 0xE1, 0x7A, 0x80, 0x5A, 0x23, 0x4F, 0x5B,
+ 0x54, 0x21, 0x0E, 0xE2, 0xAF, 0x01, 0x2E, 0xA4,
+ 0xF5, 0x1F, 0x59, 0x96, 0x3E, 0x82, 0xF3, 0x44,
+ 0xDF, 0xA6, 0x7C, 0x64, 0x5D, 0xC7, 0x79, 0xA1,
+ 0x17, 0xE1, 0x06, 0x14, 0x3E, 0x1B, 0x46, 0xCA,
+ 0x71, 0xC8, 0x05, 0x62, 0xD0, 0x56, 0x23, 0x9B,
+ 0xBA, 0xFE, 0x6D, 0xA8, 0x03, 0x4C, 0x23, 0xD8,
+ 0x98, 0x8A, 0xE8, 0x9C, 0x93, 0x8E, 0xB7, 0x24,
+ 0x31, 0x2A, 0x81, 0x72, 0x8F, 0x13, 0xD4, 0x7E,
+ 0xEB, 0xB1, 0xEE, 0x33, 0xD9, 0xF4, 0x96, 0x5E,
+ 0x6C, 0x3D, 0x45, 0x9C, 0xE0, 0x71, 0xA3, 0xFA,
+ 0x17, 0x2B, 0xC3, 0x07, 0xD6, 0x86, 0xA2, 0x06,
+ 0xC5, 0x33, 0xF0, 0xEA, 0x25, 0x70, 0x68, 0x56,
+ 0xD5, 0xB0
+};
+
+static void sig_handler(int signo ODP_UNUSED)
+{
+ if (gbl_args == NULL)
+ return;
+ odp_atomic_store_u32(&gbl_args->exit_threads, 1);
+}
+
+static inline void init_packet(odp_packet_t pkt, uint32_t seq, uint16_t group)
+{
+ uint32_t *payload;
+ test_hdr_t *hdr;
+ odp_packet_parse_param_t param;
+
+ param.proto = ODP_PROTO_ETH;
+ param.last_layer = ODP_PROTO_LAYER_ALL;
+ param.chksums.all_chksum = 0;
+ if (odp_packet_parse(pkt, 0, &param))
+ ODPH_ABORT("odp_packet_parse() failed\n");
+
+ /* Modify UDP payload and update checksum */
+ payload = odp_packet_offset(pkt, odp_packet_l4_offset(pkt) +
+ ODPH_UDPHDR_LEN, NULL, NULL);
+ *payload = seq;
+ if (odph_udp_chksum_set(pkt))
+ ODPH_ABORT("odph_udp_chksum_set() failed\n");
+
+ /* Test header is stored in user area */
+ hdr = odp_packet_user_area(pkt);
+ hdr->seq = seq;
+ hdr->group = group;
+ hdr->crc = odp_hash_crc32c(odp_packet_data(pkt), odp_packet_len(pkt),
+ CRC_INIT_VAL);
+}
+
+static inline odp_queue_t work_on_event(odp_event_t event)
+{
+ odp_packet_t pkt;
+ odp_packet_parse_param_t param;
+ odph_udphdr_t *udp_hdr;
+ test_hdr_t *hdr;
+ lookup_entry_t *lookup_entry;
+ uint32_t *payload;
+ uint32_t crc;
+ uint32_t pkt_len;
+ uint8_t *data;
+ uint32_t new_val;
+ uint32_t old_val;
+
+ if (odp_event_type(event) != ODP_EVENT_PACKET)
+ return ODP_QUEUE_INVALID;
+
+ pkt = odp_packet_from_event(event);
+ hdr = odp_packet_user_area(pkt);
+ pkt_len = odp_packet_len(pkt);
+ data = odp_packet_data(pkt);
+
+ crc = odp_hash_crc32c(data, pkt_len, CRC_INIT_VAL);
+ if (crc != hdr->crc)
+ ODPH_ERR("Error: Invalid packet crc\n");
+
+ param.proto = ODP_PROTO_ETH;
+ param.last_layer = ODP_PROTO_LAYER_ALL;
+ param.chksums.all_chksum = 1;
+ if (odp_packet_parse(pkt, 0, &param)) {
+ ODPH_ERR("Error: odp_packet_parse() failed\n");
+ return ODP_QUEUE_INVALID;
+ }
+
+ /* Modify packet data using lookup table value and sequence number, and
+ * update UDP checksum accordingly. */
+ lookup_entry = &gbl_args->lookup_tbl[(crc + hdr->seq) %
+ gbl_args->appl.lookup_tbl_size];
+ udp_hdr = odp_packet_l4_ptr(pkt, NULL);
+ payload = odp_packet_offset(pkt, odp_packet_l4_offset(pkt) +
+ ODPH_UDPHDR_LEN, NULL, NULL);
+ old_val = *payload;
+ *payload += lookup_entry->idx % 2 ? lookup_entry->val1 :
+ lookup_entry->val0;
+ new_val = *payload;
+ udp_hdr->chksum = ~(~udp_hdr->chksum + (-old_val) + new_val);
+
+ payload++;
+ old_val = *payload;
+ *payload += hdr->seq;
+ new_val = *payload;
+ udp_hdr->chksum = ~(~udp_hdr->chksum + (-old_val) + new_val);
+
+ hdr->crc = odp_hash_crc32c(data, pkt_len, CRC_INIT_VAL);
+
+ return gbl_args->queue[hdr->group][hdr->seq++ % QUEUES_PER_GROUP];
+}
+
+/**
+ * Worker thread
+ */
+static int run_thread(void *arg)
+{
+ thread_args_t *thr_args = arg;
+ stats_t *stats = &thr_args->stats;
+ odp_time_t t1, t2;
+ uint64_t c1, c2;
+
+ odp_barrier_wait(&gbl_args->init_barrier);
+
+ c1 = odp_cpu_cycles();
+ t1 = odp_time_local();
+
+ while (!odp_atomic_load_u32(&gbl_args->exit_threads)) {
+ odp_event_t event_tbl[MAX_EVENT_BURST];
+ odp_queue_t dst_queue;
+ int num_events;
+ int i;
+
+ num_events = odp_schedule_multi(NULL, ODP_SCHED_NO_WAIT,
+ event_tbl, MAX_EVENT_BURST);
+ if (num_events <= 0)
+ continue;
+
+ for (i = 0; i < num_events; i++) {
+ odp_event_t event = event_tbl[i];
+
+ dst_queue = work_on_event(event);
+ if (odp_unlikely(dst_queue == ODP_QUEUE_INVALID)) {
+ stats->s.dropped_pkts++;
+ odp_event_free(event);
+ continue;
+ }
+
+ if (odp_unlikely(odp_queue_enq(dst_queue, event))) {
+ ODPH_ERR("Error: odp_queue_enq() failed\n");
+ stats->s.dropped_pkts++;
+ odp_event_free(event);
+ break;
+ }
+
+ stats->s.pkts++;
+ }
+ }
+
+ c2 = odp_cpu_cycles();
+ t2 = odp_time_local();
+
+ stats->s.cycles = c2 - c1;
+ stats->s.nsec = odp_time_diff_ns(t2, t1);
+
+ odp_barrier_wait(&gbl_args->term_barrier);
+
+ /* Free remaining events in queues */
+ while (1) {
+ odp_event_t ev;
+
+ ev = odp_schedule(NULL,
+ odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS));
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ odp_event_free(ev);
+ }
+
+ return 0;
+}
+
+/*
+ * Print usage information
+ */
+static void usage(char *progname)
+{
+ printf("\n"
+ "OpenDataPlane CPU benchmarking application.\n"
+ "\n"
+ "Usage: %s [options]\n"
+ "\n"
+ " E.g. %s -c 4 -t 30\n"
+ "Options:\n"
+ " -c, --count <number> CPU count, 0=all available, default=1\n"
+ " -t, --time <sec> Time in seconds to run\n"
+ " (default is 10 second).\n"
+ " -a, --accuracy <sec> Time in seconds get print statistics\n"
+ " (default is 1 second).\n"
+ " -l, --lookup_tbl <num> Number of entries in dummy lookup table\n"
+ " (default is %d).\n"
+ " -h, --help Display help and exit.\n\n"
+ "\n", NO_PATH(progname), NO_PATH(progname), DEF_LOOKUP_TBL_SIZE);
+}
+
+/**
+ * @internal Parse arguments
+ *
+ * @param argc Argument count
+ * @param argv Argument vector
+ * @param args Test arguments
+ */
+static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
+{
+ int opt;
+ int long_index;
+
+ static const struct option longopts[] = {
+ {"accuracy", required_argument, NULL, 'a'},
+ {"cpu", required_argument, NULL, 'c'},
+ {"lookup_tbl", required_argument, NULL, 'l'},
+ {"time", required_argument, NULL, 't'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "+a:c:l:t:h";
+
+ appl_args->accuracy = 1; /* Get and print pps stats second */
+ appl_args->cpu_count = 1;
+ appl_args->lookup_tbl_size = DEF_LOOKUP_TBL_SIZE;
+ appl_args->time = 10; /* Loop forever if time to run is 0 */
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break; /* No more options */
+
+ switch (opt) {
+ case 'a':
+ appl_args->accuracy = atoi(optarg);
+ break;
+ case 'c':
+ appl_args->cpu_count = atoi(optarg);
+ break;
+ case 'l':
+ appl_args->lookup_tbl_size = atoi(optarg);
+ break;
+ case 't':
+ appl_args->time = atoi(optarg);
+ break;
+ case 'h':
+ usage(argv[0]);
+ exit(EXIT_SUCCESS);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (appl_args->lookup_tbl_size < 1) {
+ printf("At least one lookup table entry required.\n");
+ exit(EXIT_FAILURE);
+ }
+}
+
+/*
+ * Print statistics
+ *
+ * num_workers Number of worker threads
+ * thr_stats Pointers to stats storage
+ * duration Number of seconds to loop
+ */
+static int print_stats(int num_workers, stats_t **thr_stats, int duration,
+ int accuracy)
+{
+ uint64_t pkts;
+ uint64_t dropped;
+ uint64_t pkts_prev = 0;
+ uint64_t nsec = 0;
+ uint64_t cycles = 0;
+ int i;
+ int elapsed = 0;
+ int stats_enabled = 1;
+ int loop_forever = (duration == 0);
+
+ if (accuracy <= 0) {
+ stats_enabled = 0;
+ accuracy = 1;
+ }
+ /* Wait for all threads to be ready*/
+ odp_barrier_wait(&gbl_args->init_barrier);
+
+ do {
+ uint64_t pps;
+
+ sleep(accuracy);
+
+ pkts = 0;
+ dropped = 0;
+ for (i = 0; i < num_workers; i++) {
+ pkts += thr_stats[i]->s.pkts;
+ dropped += thr_stats[i]->s.dropped_pkts;
+ }
+
+ pps = (pkts - pkts_prev) / accuracy;
+
+ if (stats_enabled) {
+ printf("%.2f Mpps, ", pps / 1000000.0);
+
+ printf("%" PRIu64 " dropped\n", dropped);
+ }
+
+ pkts_prev = pkts;
+ elapsed += accuracy;
+ } while (!odp_atomic_load_u32(&gbl_args->exit_threads) &&
+ (loop_forever || (elapsed < duration)));
+
+ odp_atomic_store_u32(&gbl_args->exit_threads, 1);
+ odp_barrier_wait(&gbl_args->term_barrier);
+
+ pkts = 0;
+ dropped = 0;
+ for (i = 0; i < num_workers; i++) {
+ pkts += thr_stats[i]->s.pkts;
+ dropped += thr_stats[i]->s.dropped_pkts;
+ nsec += thr_stats[i]->s.nsec;
+ cycles += thr_stats[i]->s.cycles;
+ }
+
+ printf("\nRESULTS - per thread (Million packets per sec):\n");
+ printf("-----------------------------------------------\n");
+ printf(" avg 1 2 3 4 5 6 7 8 9 10\n");
+ printf("%6.2f ", pkts / (nsec / 1000.0));
+
+ for (i = 0; i < num_workers; i++) {
+ if (i != 0 && (i % 10) == 0)
+ printf("\n ");
+
+ printf("%6.2f ", thr_stats[i]->s.pkts /
+ (thr_stats[i]->s.nsec / 1000.0));
+ }
+ printf("\n\n");
+
+ nsec /= num_workers;
+ printf("RESULTS - total over %i threads:\n", num_workers);
+ printf("----------------------------------\n");
+ printf(" avg packets per sec: %.3f M\n", pkts / (nsec / 1000.0));
+ printf(" avg cycles per packet: %" PRIu64 "\n", cycles / pkts);
+ printf(" dropped packets: %" PRIu64 "\n\n", dropped);
+
+ return pkts > PASS_PACKETS ? 0 : -1;
+}
+
+static void gbl_args_init(args_t *args)
+{
+ memset(args, 0, sizeof(args_t));
+ odp_atomic_init_u32(&args->exit_threads, 0);
+}
+
+/**
+ * Test main function
+ */
+int main(int argc, char *argv[])
+{
+ stats_t *stats[MAX_WORKERS];
+ odph_helper_options_t helper_options;
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[MAX_WORKERS];
+ odp_cpumask_t cpumask;
+ odp_pool_capability_t pool_capa;
+ odp_pool_t pool;
+ odp_schedule_config_t schedule_config;
+ odp_shm_t shm;
+ odp_shm_t lookup_tbl_shm;
+ odp_pool_param_t params;
+ odp_instance_t instance;
+ odp_init_t init;
+ char cpumaskstr[ODP_CPUMASK_STR_SIZE];
+ uint32_t num_pkts;
+ uint32_t num_groups;
+ uint32_t num_queues;
+ uint32_t pkts_per_group;
+ uint32_t pkt_len;
+ uint32_t init_val;
+ unsigned int num_workers;
+ unsigned int i, j;
+ int ret = 0;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init);
+
+ /* List features not to be used (may optimize performance) */
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ init.mem_model = helper_options.mem_model;
+
+ /* Signal handler has to be registered before global init in case ODP
+ * implementation creates internal threads/processes. */
+ signal(SIGINT, sig_handler);
+
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("Error: ODP global init failed\n");
+ return -1;
+ }
+
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Error: ODP local init failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ shm = odp_shm_reserve("shm_args", sizeof(args_t), ODP_CACHE_LINE_SIZE,
+ 0);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Error: shared mem reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ gbl_args = odp_shm_addr(shm);
+ if (gbl_args == NULL) {
+ ODPH_ERR("Error: shared mem alloc failed\n");
+ exit(EXIT_FAILURE);
+ }
+ gbl_args_init(gbl_args);
+
+ /* Parse and store the application arguments */
+ parse_args(argc, argv, &gbl_args->appl);
+
+ lookup_tbl_shm = odp_shm_reserve("lookup_tbl_shm",
+ sizeof(lookup_entry_t) *
+ gbl_args->appl.lookup_tbl_size,
+ ODP_CACHE_LINE_SIZE, 0);
+ if (lookup_tbl_shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Error: shared mem reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ gbl_args->lookup_tbl = odp_shm_addr(lookup_tbl_shm);
+ if (gbl_args->lookup_tbl == NULL) {
+ ODPH_ERR("Error: lookup table mem alloc failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ printf("\n");
+ odp_sys_info_print();
+
+ /* Default to system CPU count unless user specified */
+ num_workers = MAX_WORKERS;
+ if (gbl_args->appl.cpu_count && gbl_args->appl.cpu_count < MAX_WORKERS)
+ num_workers = gbl_args->appl.cpu_count;
+
+ /* Get default worker cpumask */
+ num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
+ (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
+
+ printf("num worker threads: %i\n", num_workers);
+ printf("first CPU: %i\n", odp_cpumask_first(&cpumask));
+ printf("cpu mask: %s\n", cpumaskstr);
+
+ odp_schedule_config_init(&schedule_config);
+ odp_schedule_config(&schedule_config);
+
+ /* Make sure a single queue can store all the packets in a group */
+ pkts_per_group = QUEUES_PER_GROUP * PKTS_PER_QUEUE;
+ if (schedule_config.queue_size &&
+ schedule_config.queue_size < pkts_per_group)
+ pkts_per_group = schedule_config.queue_size;
+
+ /* Divide queues evenly into groups */
+ if (schedule_config.num_queues < QUEUES_PER_GROUP) {
+ ODPH_ERR("Error: min %d queues required\n", QUEUES_PER_GROUP);
+ return -1;
+ }
+ num_queues = num_workers > schedule_config.num_queues ?
+ schedule_config.num_queues : num_workers;
+ num_groups = (num_queues + QUEUES_PER_GROUP - 1) / QUEUES_PER_GROUP;
+ if (num_groups * QUEUES_PER_GROUP > schedule_config.num_queues)
+ num_groups--;
+ num_queues = num_groups * QUEUES_PER_GROUP;
+
+ for (i = 0; i < num_groups; i++) {
+ for (j = 0; j < QUEUES_PER_GROUP; j++) {
+ odp_queue_t queue;
+ odp_queue_param_t param;
+
+ odp_queue_param_init(&param);
+ param.type = ODP_QUEUE_TYPE_SCHED;
+ param.sched.prio = odp_schedule_default_prio();
+ param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ param.sched.group = ODP_SCHED_GROUP_ALL;
+ param.size = pkts_per_group;
+
+ queue = odp_queue_create(NULL, &param);
+ if (queue == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Error: odp_queue_create() failed\n");
+ return -1;
+ }
+ gbl_args->queue[i][j] = queue;
+ }
+ }
+
+ /* Create packet pool */
+ if (odp_pool_capability(&pool_capa)) {
+ ODPH_ERR("Error: odp_pool_capability() failed\n");
+ exit(EXIT_FAILURE);
+ }
+ num_pkts = pkts_per_group * num_groups;
+ if (num_pkts > pool_capa.pkt.max_num)
+ num_pkts = pool_capa.pkt.max_num;
+
+ pkt_len = sizeof(test_udp_packet);
+ if (pool_capa.pkt.max_len && pkt_len > pool_capa.pkt.max_len)
+ pkt_len = pool_capa.pkt.max_len;
+
+ if (pool_capa.pkt.max_seg_len && pkt_len > pool_capa.pkt.max_seg_len)
+ pkt_len = pool_capa.pkt.max_seg_len;
+
+ if (pkt_len < sizeof(test_udp_packet)) {
+ ODPH_ERR("Error: min %dB single segment packets required\n",
+ (int)sizeof(test_udp_packet));
+ exit(EXIT_FAILURE);
+ }
+
+ if (pool_capa.pkt.max_uarea_size &&
+ pool_capa.pkt.max_uarea_size < sizeof(test_hdr_t)) {
+ ODPH_ERR("Error: min %dB of packet user area required\n",
+ (int)sizeof(test_hdr_t));
+ exit(EXIT_FAILURE);
+ }
+
+ odp_pool_param_init(&params);
+ params.pkt.len = pkt_len;
+ params.pkt.max_len = pkt_len;
+ params.pkt.seg_len = pkt_len;
+ params.pkt.num = num_pkts;
+ params.pkt.max_num = num_pkts;
+ params.pkt.uarea_size = sizeof(test_hdr_t);
+ params.type = ODP_POOL_PACKET;
+ pool = odp_pool_create("pkt_pool", &params);
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error: packet pool create failed\n");
+ exit(EXIT_FAILURE);
+ }
+ odp_pool_print(pool);
+
+ printf("CPU bench args\n--------------\n");
+ printf(" workers: %u\n", num_workers);
+ printf(" queues: %" PRIu32 "\n", num_queues);
+ printf(" pkts: %" PRIu32 "\n", num_pkts);
+ printf(" pkt size: %" PRIu32 " B\n", pkt_len);
+ printf(" lookup entries: %" PRIu64 "\n\n",
+ gbl_args->appl.lookup_tbl_size);
+
+ /* Spread test packets into queues */
+ for (i = 0; i < num_pkts; i++) {
+ odp_packet_t pkt = odp_packet_alloc(pool, pkt_len);
+ odp_event_t ev;
+ odp_queue_t queue;
+ uint16_t group = i % num_groups;
+
+ if (pkt == ODP_PACKET_INVALID) {
+ ODPH_ERR("Error: odp_packet_alloc() failed\n");
+ return -1;
+ }
+
+ odp_packet_copy_from_mem(pkt, 0, pkt_len, test_udp_packet);
+
+ init_packet(pkt, i, group);
+
+ queue = gbl_args->queue[group][i % QUEUES_PER_GROUP];
+
+ ev = odp_packet_to_event(pkt);
+ if (odp_queue_enq(queue, ev)) {
+ ODPH_ERR("Error: odp_queue_enq() failed\n");
+ return -1;
+ }
+ }
+
+ odp_barrier_init(&gbl_args->init_barrier, num_workers + 1);
+ odp_barrier_init(&gbl_args->term_barrier, num_workers + 1);
+
+ /* Initialize lookup table */
+ init_val = CRC_INIT_VAL;
+ for (i = 0; i < gbl_args->appl.lookup_tbl_size; i++) {
+ uint32_t *val0 = &gbl_args->lookup_tbl[i].val0;
+ uint32_t *val1 = &gbl_args->lookup_tbl[i].val1;
+
+ gbl_args->lookup_tbl[i].idx = i;
+
+ *val0 = i;
+ *val0 = odp_hash_crc32c(val0, sizeof(uint32_t), init_val);
+ *val1 = odp_hash_crc32c(val0, sizeof(uint32_t), init_val);
+ init_val = *val1;
+ }
+
+ /* Create worker threads */
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+
+ for (i = 0; i < num_workers; i++) {
+ gbl_args->thread[i].idx = i;
+ stats[i] = &gbl_args->thread[i].stats;
+
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = run_thread;
+ thr_param[i].arg = &gbl_args->thread[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ }
+
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+ odph_thread_create(thread_tbl, &thr_common, thr_param, num_workers);
+
+ ret = print_stats(num_workers, stats, gbl_args->appl.time,
+ gbl_args->appl.accuracy);
+
+ /* Master thread waits for other threads to exit */
+ odph_thread_join(thread_tbl, num_workers);
+
+ for (i = 0; i < num_groups; i++) {
+ for (j = 0; j < QUEUES_PER_GROUP; j++) {
+ if (odp_queue_destroy(gbl_args->queue[i][j])) {
+ ODPH_ERR("Error: queue destroy\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+ }
+ gbl_args = NULL;
+ odp_mb_full();
+
+ if (odp_pool_destroy(pool)) {
+ ODPH_ERR("Error: pool destroy\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Error: shm free\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_shm_free(lookup_tbl_shm)) {
+ ODPH_ERR("Error: shm free\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Error: term local\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Error: term global\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return ret;
+}
diff --git a/test/performance/odp_cpu_bench_run.sh b/test/performance/odp_cpu_bench_run.sh
new file mode 100755
index 000000000..c33e0b38e
--- /dev/null
+++ b/test/performance/odp_cpu_bench_run.sh
@@ -0,0 +1,19 @@
+#!/bin/sh
+#
+# Copyright (c) 2022, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+TEST_DIR="${TEST_DIR:-$(dirname $0)}"
+
+# Use short run time in make check
+
+$TEST_DIR/odp_cpu_bench${EXEEXT} -t 1
+
+if [ $? -ne 0 ] ; then
+ echo Test FAILED
+ exit 1
+fi
+
+exit 0
diff --git a/test/performance/odp_crc.c b/test/performance/odp_crc.c
new file mode 100644
index 000000000..89e2e971f
--- /dev/null
+++ b/test/performance/odp_crc.c
@@ -0,0 +1,308 @@
+/* Copyright (c) 2021, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_crc.c
+ *
+ * Performance test application for CRC hash APIs
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define KB 1024ull
+#define MB (1024ull * 1024ull)
+
+/* Command line options */
+typedef struct {
+ uint32_t size;
+ uint32_t rounds;
+ uint32_t offset;
+ uint32_t test;
+} options_t;
+
+static options_t options;
+static const options_t options_def = {
+ .size = 16,
+ .rounds = 10000,
+ .offset = 0,
+ .test = 0,
+};
+
+static void print_usage(void)
+{
+ printf("\n"
+ "CRC performance test\n"
+ "\n"
+ "Usage: odp_crc_perf [options]\n"
+ "\n"
+ " -s, --size Size of buffer in KB (default %u)\n"
+ " -r, --rounds Number of test rounds (default %u)\n"
+ " Rounded down to nearest multiple of 8\n"
+ " -o, --offset Offset of data (default %u)\n"
+ " -t, --test Which API to test (default %u)\n"
+ " 0: both\n"
+ " 1: odp_hash_crc32c\n"
+ " 2: odp_hash_crc32\n"
+ " -h, --help This help\n"
+ "\n",
+ options_def.size, options_def.rounds, options_def.offset,
+ options.test);
+}
+
+static int parse_options(int argc, char *argv[])
+{
+ int opt;
+ int long_index;
+ int ret = 0;
+
+ static const struct option longopts[] = {
+ { "size", required_argument, NULL, 's' },
+ { "rounds", required_argument, NULL, 'r' },
+ { "offset", required_argument, NULL, 'o' },
+ { "test", required_argument, NULL, 't' },
+ { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ static const char *shortopts = "+s:r:o:t:h";
+
+ options = options_def;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 's':
+ options.size = atol(optarg);
+ break;
+ case 'r':
+ options.rounds = atol(optarg);
+ break;
+ case 'o':
+ options.offset = atol(optarg);
+ break;
+ case 't':
+ options.test = atol(optarg);
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (options.size < 1) {
+ ODPH_ERR("Invalid size: %" PRIu32 "\n", options.size);
+ return -1;
+ }
+
+ if (options.offset > 4 * KB) {
+ ODPH_ERR("Invalid offset: %" PRIu32 "\n", options.offset);
+ return -1;
+ }
+
+ if (options.test > 2) {
+ ODPH_ERR("Invalid API to test: %" PRIu32 "\n", options.test);
+ return -1;
+ }
+
+ return ret;
+}
+
+static void report(uint64_t nsec)
+{
+ uint64_t size = (uint64_t)options.size * KB;
+ uint32_t rounds = options.rounds & ~7ul;
+ double mb, seconds;
+
+ printf("size: %d KB rounds: %d offset: %d ",
+ options.size, rounds, options.offset);
+ mb = (double)(size * (uint64_t)rounds) / (double)MB;
+ seconds = (double)nsec / (double)ODP_TIME_SEC_IN_NS;
+ printf("MB: %.3f seconds: %.3f ", mb, seconds);
+ printf("MB/s: %.3f", mb / seconds);
+ printf("\n\n");
+}
+
+static uint64_t measure_crc32c(uint8_t *data, uint32_t size)
+{
+ void *p = data + options.offset;
+ uint32_t crc = 1;
+ volatile uint32_t v;
+ odp_time_t start = odp_time_local();
+
+ for (uint32_t i = 0; i < options.rounds / 8; i++) {
+ crc ^= odp_hash_crc32c(p, size, crc);
+ crc ^= odp_hash_crc32c(p, size, crc);
+ crc ^= odp_hash_crc32c(p, size, crc);
+ crc ^= odp_hash_crc32c(p, size, crc);
+
+ crc ^= odp_hash_crc32c(p, size, crc);
+ crc ^= odp_hash_crc32c(p, size, crc);
+ crc ^= odp_hash_crc32c(p, size, crc);
+ crc ^= odp_hash_crc32c(p, size, crc);
+ }
+
+ /* Make sure that crc is not optimized out. */
+ v = crc;
+
+ /* Quell "unused" warning. */
+ (void)v;
+
+ return odp_time_diff_ns(odp_time_local(), start);
+}
+
+static void test_odp_hash_crc32c(uint8_t *data)
+{
+ uint64_t size = (uint64_t)options.size * KB;
+ uint64_t nsec;
+
+ /* Warm-up. */
+ measure_crc32c(data, size);
+
+ /* Actual measurement. */
+ nsec = measure_crc32c(data, size);
+
+ report(nsec);
+}
+
+static uint64_t measure_crc32(uint8_t *data, uint32_t size)
+{
+ void *p = data + options.offset;
+ uint32_t crc = 1;
+ volatile uint32_t v;
+ odp_time_t start = odp_time_local();
+
+ for (uint32_t i = 0; i < options.rounds / 8; i++) {
+ crc ^= odp_hash_crc32(p, size, crc);
+ crc ^= odp_hash_crc32(p, size, crc);
+ crc ^= odp_hash_crc32(p, size, crc);
+ crc ^= odp_hash_crc32(p, size, crc);
+
+ crc ^= odp_hash_crc32(p, size, crc);
+ crc ^= odp_hash_crc32(p, size, crc);
+ crc ^= odp_hash_crc32(p, size, crc);
+ crc ^= odp_hash_crc32(p, size, crc);
+ }
+
+ /* Make sure that crc is not optimized out. */
+ v = crc;
+
+ /* Quell "unused" warning. */
+ (void)v;
+
+ return odp_time_diff_ns(odp_time_local(), start);
+}
+
+static void test_odp_hash_crc32(uint8_t *data)
+{
+ uint64_t size = (uint64_t)options.size * KB;
+ uint64_t nsec;
+
+ /* Warm-up. */
+ measure_crc32(data, size);
+
+ /* Actual measurement. */
+ nsec = measure_crc32(data, size);
+
+ report(nsec);
+}
+
+int main(int argc, char **argv)
+{
+ odp_instance_t instance;
+ odp_init_t init;
+
+ if (parse_options(argc, argv))
+ exit(EXIT_FAILURE);
+
+ /* List features not to be used */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.schedule = 1;
+ init.not_used.feat.stash = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("Global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_sys_info_print();
+
+ uint8_t *buf, *data;
+ uint32_t size = options.size * KB;
+ uint64_t seed = 1;
+ const unsigned long page = 4 * KB;
+
+ /* One extra page for alignment, another one for offset. */
+ buf = (uint8_t *)malloc(size + page * 2);
+
+ if (!buf) {
+ ODPH_ERR("Memory allocation failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Align to start of page. */
+ data = (uint8_t *)(((uintptr_t)buf + (page - 1)) & ~(page - 1));
+
+ if (odp_random_test_data(data, size, &seed) != (int32_t)size) {
+ ODPH_ERR("odp_random_test_data() failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (options.test == 0 || options.test == 1) {
+ printf("odp_hash_crc32c\n"
+ "---------------\n");
+ test_odp_hash_crc32c(data);
+ }
+
+ if (options.test == 0 || options.test == 2) {
+ printf("odp_hash_crc32\n"
+ "--------------\n");
+ test_odp_hash_crc32(data);
+ }
+
+ free(buf);
+
+ if (odp_term_local()) {
+ ODPH_ERR("Local terminate failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Global terminate failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
diff --git a/test/performance/odp_crypto.c b/test/performance/odp_crypto.c
new file mode 100644
index 000000000..a644da5e1
--- /dev/null
+++ b/test/performance/odp_crypto.c
@@ -0,0 +1,1526 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_crypto.c
+ *
+ * Performance test application for crypto APIs
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif /* _GNU_SOURCE */
+
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+/** @def POOL_NUM_PKT
+ * Number of packets in the pool
+ */
+#define POOL_NUM_PKT 64
+
+#define AAD_LEN 8 /* typical AAD length used in IPsec when ESN is not in use */
+#define MAX_AUTH_DIGEST_LEN 32 /* maximum MAC length in bytes */
+
+static uint8_t test_aad[AAD_LEN] = "01234567";
+static uint8_t test_iv[16] = "0123456789abcdef";
+
+static uint8_t test_key16[16] = { 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a,
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10,
+};
+
+static uint8_t test_key20[20] = { 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a,
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14,
+};
+
+static uint8_t test_key24[24] = { 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a,
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14,
+ 0x15, 0x16, 0x17, 0x18
+};
+
+static uint8_t test_key32[32] = { 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a,
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14,
+ 0x15, 0x16, 0x17, 0x18, 0x19,
+ 0x1a, 0x1b, 0x1c, 0x1d, 0x1e,
+ 0x1f, 0x20,
+};
+
+static uint8_t test_key64[64] = { 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a,
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14,
+ 0x15, 0x16, 0x17, 0x18, 0x19,
+ 0x1a, 0x1b, 0x1c, 0x1d, 0x1e,
+ 0x1f, 0x20, 0x21, 0x22, 0x23,
+ 0x24, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x4b, 0x2c, 0x2d,
+ 0x2e, 0x2f, 0x30, 0x31, 0x32,
+ 0x33, 0x34, 0x55, 0x36, 0x37,
+ 0x38, 0x39, 0x5a, 0x3b, 0x3c,
+ 0x3d, 0x3e, 0x5f, 0x40,
+};
+
+/**
+ * Structure that holds template for session create call
+ * for different algorithms supported by test
+ */
+typedef struct {
+ const char *name; /**< Algorithm name */
+ odp_crypto_session_param_t session; /**< Prefilled crypto session params */
+ int cipher_in_bit_mode; /**< Cipher range in bits, probed at run time */
+ int auth_in_bit_mode; /**< Auth range in bits, probed at run time */
+} crypto_alg_config_t;
+
+/**
+ * Parsed command line crypto arguments. Describes test configuration.
+ */
+typedef struct {
+ /**
+ * If non-zero, prints content of packets. Enabled by -d or
+ * --debug option.
+ */
+ int debug_packets;
+
+ /**
+ * If non-zero, try to run crypto operation in place. Note some
+ * implementation may not support such mode. Enabled by -n or
+ * --inplace option.
+ */
+ int in_place;
+
+ /**
+ * If non-zero, output of previous operation taken as input for
+ * next encrypt operations. Enabled by -r or --reuse option.
+ */
+ int reuse_packet;
+
+ /**
+ * Maximum number of outstanding encryption requests. Note code
+ * poll for results over queue and if nothing is available it can
+ * submit more encryption requests up to maximum number specified by
+ * this option. Specified through -f or --flight option.
+ */
+ int in_flight;
+
+ /**
+ * Number of iteration to repeat crypto operation to get good
+ * average number. Specified through -i or --iterations option.
+ * Default is 10000.
+ */
+ int iteration_count;
+
+ /**
+ * Maximum sessions. Currently is not used.
+ */
+ int max_sessions;
+
+ /**
+ * Payload size to test. If 0 set of predefined payload sizes
+ * is tested. Specified through -p or --payload option.
+ */
+ int payload_length;
+
+ /**
+ * Pointer to selected algorithm to test. If NULL all available
+ * algorithms are tested. Name of algorithm is passed through
+ * -a or --algorithm option.
+ */
+ crypto_alg_config_t *alg_config;
+
+ /**
+ * Use scheduler to get completion events from crypto operation.
+ * Specified through -s argument.
+ * */
+ int schedule;
+
+ /*
+ * Poll completion queue for crypto completion events.
+ * Specified through -p argument.
+ */
+ int poll;
+} crypto_args_t;
+
+/*
+ * Helper structure that holds averages for test of one algorithm
+ * for given payload size.
+ */
+typedef struct {
+ /**
+ * Elapsed time for one crypto operation.
+ */
+ double elapsed;
+
+ /**
+ * CPU time spent pre one crypto operation by whole process
+ * i.e include current and all other threads in process.
+ * It is filled with 'getrusage(RUSAGE_SELF, ...)' call.
+ */
+ double rusage_self;
+
+ /**
+ * CPU time spent per one crypto operation by current thread
+ * only. It is filled with 'getrusage(RUSAGE_THREAD, ...)'
+ * call.
+ */
+ double rusage_thread;
+} crypto_run_result_t;
+
+/**
+ * Structure holds one snap to misc times of current process.
+ */
+typedef struct {
+ struct timeval tv; /**< Elapsed time */
+ struct rusage ru_self; /**< Rusage value for whole process */
+ struct rusage ru_thread; /**< Rusage value for current thread */
+} time_record_t;
+
+/* Arguments for one test run */
+typedef struct test_run_arg_t {
+ crypto_args_t crypto_args;
+ crypto_alg_config_t *crypto_alg_config;
+ odp_crypto_capability_t crypto_capa;
+
+} test_run_arg_t;
+
+static void parse_args(int argc, char *argv[], crypto_args_t *cargs);
+static void usage(char *progname);
+
+/**
+ * Set of predefined payloads.
+ */
+static unsigned int payloads[] = {
+ 16,
+ 64,
+ 256,
+ 1024,
+ 8192,
+ 16384
+};
+
+/** Number of payloads used in the test */
+static unsigned num_payloads;
+
+/**
+ * Set of known algorithms to test
+ */
+static crypto_alg_config_t algs_config[] = {
+ {
+ .name = "3des-cbc-null",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_3DES_CBC,
+ .cipher_key = {
+ .data = test_key24,
+ .length = sizeof(test_key24)
+ },
+ .cipher_iv_len = 8,
+ .auth_alg = ODP_AUTH_ALG_NULL
+ },
+ },
+ {
+ .name = "3des-cbc-hmac-md5-96",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_3DES_CBC,
+ .cipher_key = {
+ .data = test_key24,
+ .length = sizeof(test_key24)
+ },
+ .cipher_iv_len = 8,
+ .auth_alg = ODP_AUTH_ALG_MD5_HMAC,
+ .auth_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .auth_digest_len = 12,
+ },
+ },
+ {
+ .name = "null-hmac-md5-96",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_NULL,
+ .auth_alg = ODP_AUTH_ALG_MD5_HMAC,
+ .auth_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .auth_digest_len = 12,
+ },
+ },
+ {
+ .name = "aes-cbc-null",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_AES_CBC,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .cipher_iv_len = 16,
+ .auth_alg = ODP_AUTH_ALG_NULL
+ },
+ },
+ {
+ .name = "aes-cbc-hmac-sha1-96",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_AES_CBC,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .cipher_iv_len = 16,
+ .auth_alg = ODP_AUTH_ALG_SHA1_HMAC,
+ .auth_key = {
+ .data = test_key20,
+ .length = sizeof(test_key20)
+ },
+ .auth_digest_len = 12,
+ },
+ },
+ {
+ .name = "null-hmac-sha1-96",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_NULL,
+ .auth_alg = ODP_AUTH_ALG_SHA1_HMAC,
+ .auth_key = {
+ .data = test_key20,
+ .length = sizeof(test_key20)
+ },
+ .auth_digest_len = 12,
+ },
+ },
+ {
+ .name = "aes-ctr-null",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_AES_CTR,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .cipher_iv_len = 16,
+ .auth_alg = ODP_AUTH_ALG_NULL
+ },
+ },
+ {
+ .name = "aes-ctr-hmac-sha1-96",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_AES_CTR,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .cipher_iv_len = 16,
+ .auth_alg = ODP_AUTH_ALG_SHA1_HMAC,
+ .auth_key = {
+ .data = test_key20,
+ .length = sizeof(test_key20)
+ },
+ .auth_digest_len = 12,
+ },
+ },
+ {
+ .name = "null-hmac-sha256-128",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_NULL,
+ .auth_alg = ODP_AUTH_ALG_SHA256_HMAC,
+ .auth_key = {
+ .data = test_key32,
+ .length = sizeof(test_key32)
+ },
+ .auth_digest_len = 16,
+ },
+ },
+ {
+ .name = "null-hmac-sha512-256",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_NULL,
+ .auth_alg = ODP_AUTH_ALG_SHA512_HMAC,
+ .auth_key = {
+ .data = test_key64,
+ .length = sizeof(test_key64)
+ },
+ .auth_digest_len = 32,
+ },
+ },
+ {
+ .name = "null-aes-gmac",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_NULL,
+ .auth_alg = ODP_AUTH_ALG_AES_GMAC,
+ .auth_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .auth_iv_len = 12,
+ .auth_digest_len = 16,
+ },
+ },
+ {
+ .name = "aes-gcm",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_AES_GCM,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .cipher_iv_len = 12,
+ .auth_alg = ODP_AUTH_ALG_AES_GCM,
+ .auth_digest_len = 16,
+ .auth_aad_len = AAD_LEN,
+ },
+ },
+ {
+ .name = "aes-ccm",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_AES_CCM,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .cipher_iv_len = 11,
+ .auth_alg = ODP_AUTH_ALG_AES_CCM,
+ .auth_digest_len = 16,
+ .auth_aad_len = AAD_LEN,
+ },
+ },
+ {
+ .name = "chacha20-poly1305",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_CHACHA20_POLY1305,
+ .cipher_key = {
+ .data = test_key32,
+ .length = sizeof(test_key32)
+ },
+ .cipher_iv_len = 12,
+ .auth_alg = ODP_AUTH_ALG_CHACHA20_POLY1305,
+ .auth_digest_len = 16,
+ .auth_aad_len = AAD_LEN,
+ },
+ },
+ {
+ .name = "zuc-eea3",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_ZUC_EEA3,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .cipher_iv_len = 16,
+ .auth_alg = ODP_AUTH_ALG_NULL,
+ },
+ },
+ {
+ .name = "zuc-eia3",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_NULL,
+ .auth_alg = ODP_AUTH_ALG_ZUC_EIA3,
+ .auth_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .auth_iv_len = 16,
+ .auth_digest_len = 4,
+ },
+ },
+ {
+ .name = "zuc-eea3-zuc-eia3",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_ZUC_EEA3,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .cipher_iv_len = 16,
+ .auth_alg = ODP_AUTH_ALG_ZUC_EIA3,
+ .auth_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .auth_iv_len = 16,
+ .auth_digest_len = 4,
+ },
+ },
+ {
+ .name = "snow3g-uea2",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_SNOW3G_UEA2,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .cipher_iv_len = 16,
+ .auth_alg = ODP_AUTH_ALG_NULL,
+ },
+ },
+ {
+ .name = "snow3g-uia2",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_NULL,
+ .auth_alg = ODP_AUTH_ALG_SNOW3G_UIA2,
+ .auth_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .auth_iv_len = 16,
+ .auth_digest_len = 4,
+ },
+ },
+ {
+ .name = "snow3g-uea2-snow3g-uia2",
+ .session = {
+ .cipher_alg = ODP_CIPHER_ALG_SNOW3G_UEA2,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .cipher_iv_len = 16,
+ .auth_alg = ODP_AUTH_ALG_SNOW3G_UIA2,
+ .auth_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .auth_iv_len = 16,
+ .auth_digest_len = 4,
+ },
+ },
+};
+
+/**
+ * Find corresponding config for given name. Returns NULL
+ * if config for given name is not found.
+ */
+static crypto_alg_config_t *
+find_config_by_name(const char *name) {
+ unsigned int i;
+ crypto_alg_config_t *ret = NULL;
+
+ for (i = 0; i < ODPH_ARRAY_SIZE(algs_config); i++) {
+ if (strcmp(algs_config[i].name, name) == 0) {
+ ret = algs_config + i;
+ break;
+ }
+ }
+ return ret;
+}
+
+/**
+ * Helper function that prints list of algorithms that this
+ * test understands.
+ */
+static void
+print_config_names(const char *prefix) {
+ unsigned int i;
+
+ for (i = 0; i < ODPH_ARRAY_SIZE(algs_config); i++)
+ printf("%s %s\n", prefix, algs_config[i].name);
+}
+
+/**
+ * Snap current time values and put them into 'rec'.
+ */
+static void
+fill_time_record(time_record_t *rec)
+{
+ gettimeofday(&rec->tv, NULL);
+ getrusage(RUSAGE_SELF, &rec->ru_self);
+ getrusage(RUSAGE_THREAD, &rec->ru_thread);
+}
+
+/**
+ * Calculated CPU time difference for given two rusage structures.
+ * Note it adds user space and system time together.
+ */
+static unsigned long long
+get_rusage_diff(struct rusage *start, struct rusage *end)
+{
+ unsigned long long rusage_diff;
+ unsigned long long rusage_start;
+ unsigned long long rusage_end;
+
+ rusage_start = (start->ru_utime.tv_sec * 1000000) +
+ (start->ru_utime.tv_usec);
+ rusage_start += (start->ru_stime.tv_sec * 1000000) +
+ (start->ru_stime.tv_usec);
+
+ rusage_end = (end->ru_utime.tv_sec * 1000000) +
+ (end->ru_utime.tv_usec);
+ rusage_end += (end->ru_stime.tv_sec * 1000000) +
+ (end->ru_stime.tv_usec);
+
+ rusage_diff = rusage_end - rusage_start;
+
+ return rusage_diff;
+}
+
+/**
+ * Get diff for RUSAGE_SELF (whole process) between two time snap
+ * records.
+ */
+static unsigned long long
+get_rusage_self_diff(time_record_t *start, time_record_t *end)
+{
+ return get_rusage_diff(&start->ru_self, &end->ru_self);
+}
+
+/**
+ * Get diff for RUSAGE_THREAD (current thread only) between two
+ * time snap records.
+ */
+static unsigned long long
+get_rusage_thread_diff(time_record_t *start, time_record_t *end)
+{
+ return get_rusage_diff(&start->ru_thread, &end->ru_thread);
+}
+
+/**
+ * Get diff of elapsed time between two time snap records
+ */
+static unsigned long long
+get_elapsed_usec(time_record_t *start, time_record_t *end)
+{
+ unsigned long long s;
+ unsigned long long e;
+
+ s = (start->tv.tv_sec * 1000000) +
+ (start->tv.tv_usec);
+ e = (end->tv.tv_sec * 1000000) +
+ (end->tv.tv_usec);
+
+ return e - s;
+}
+
+#define REPORT_HEADER "%30.30s %15s %15s %15s %15s %15s %15s\n"
+#define REPORT_LINE "%30.30s %15d %15d %15.3f %15.3f %15.3f %15d\n"
+
+/**
+ * Print header line for our report.
+ */
+static void
+print_result_header(void)
+{
+ printf(REPORT_HEADER,
+ "algorithm", "avg over #", "payload (bytes)", "elapsed (us)",
+ "rusg self (us)", "rusg thrd (us)", "throughput (Kb)");
+}
+
+/**
+ * Print one line of our report.
+ */
+static void
+print_result(crypto_args_t *cargs,
+ unsigned int payload_length,
+ crypto_alg_config_t *config,
+ crypto_run_result_t *result)
+{
+ unsigned int throughput;
+
+ throughput = (1000000.0 / result->elapsed) * payload_length / 1024;
+ printf(REPORT_LINE,
+ config->name, cargs->iteration_count, payload_length,
+ result->elapsed, result->rusage_self, result->rusage_thread,
+ throughput);
+}
+
+/**
+ * Print piece of memory with given size.
+ */
+static void
+print_mem(const char *msg,
+ const unsigned char *ptr,
+ unsigned int len)
+{
+ unsigned i, j;
+ char c;
+ char line[81];
+ char *p;
+
+ if (msg)
+ printf("\n%s (bytes size = %d)", msg, len);
+
+ for (i = 0; i < len; i += 16) {
+ p = line;
+ sprintf(p, "\n%04x ", i); p += 8;
+
+ for (j = 0; j < 16; j++) {
+ if (i + j == len)
+ break;
+
+ sprintf(p, " %02x", (ptr)[i + j]); p += 3;
+ }
+
+ for (; j < 16; j++) {
+ sprintf(p, " "); p += 3;
+ }
+
+ sprintf(p, " "); p += 3;
+
+ for (j = 0; j < 16; j++) {
+ if (i + j == len)
+ break;
+ c = (ptr)[i + j];
+ *p++ = (' ' <= c && c <= '~') ? c : '.';
+ }
+
+ *p = '\0';
+ printf("%s", line);
+ }
+ printf("\n");
+}
+
+/**
+ * Create ODP crypto session for given config.
+ */
+static int
+create_session_from_config(odp_crypto_session_t *session,
+ crypto_alg_config_t *config,
+ crypto_args_t *cargs)
+{
+ odp_crypto_session_param_t params;
+ odp_crypto_ses_create_err_t ses_create_rc;
+ odp_pool_t pkt_pool;
+ odp_queue_t out_queue;
+
+ odp_crypto_session_param_init(&params);
+ memcpy(&params, &config->session, sizeof(odp_crypto_session_param_t));
+ params.op = ODP_CRYPTO_OP_ENCODE;
+ params.auth_cipher_text = true;
+
+ /* Lookup the packet pool */
+ pkt_pool = odp_pool_lookup("packet_pool");
+ if (pkt_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("packet_pool pool not found\n");
+ return -1;
+ }
+ params.output_pool = pkt_pool;
+
+ if (cargs->schedule || cargs->poll) {
+ out_queue = odp_queue_lookup("crypto-out");
+ if (out_queue == ODP_QUEUE_INVALID) {
+ ODPH_ERR("crypto-out queue not found\n");
+ return -1;
+ }
+ params.compl_queue = out_queue;
+ params.op_mode = ODP_CRYPTO_ASYNC;
+ } else {
+ params.compl_queue = ODP_QUEUE_INVALID;
+ params.op_mode = ODP_CRYPTO_SYNC;
+ }
+ if (odp_crypto_session_create(&params, session,
+ &ses_create_rc)) {
+ switch (ses_create_rc) {
+ case ODP_CRYPTO_SES_ERR_ALG_COMBO:
+ printf(" requested algorithm combination not supported\n");
+ return 1;
+ case ODP_CRYPTO_SES_ERR_ALG_ORDER:
+ printf(" requested algorithm order not supported\n");
+ return 1;
+ case ODP_CRYPTO_SES_ERR_PARAMS:
+ printf(" requested session parameters not supported\n");
+ return 1;
+ default:
+ break;
+ }
+ ODPH_ERR("crypto session create failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static odp_packet_t
+make_packet(odp_pool_t pkt_pool, unsigned int payload_length)
+{
+ odp_packet_t pkt;
+
+ pkt = odp_packet_alloc(pkt_pool, payload_length);
+ if (pkt == ODP_PACKET_INVALID) {
+ ODPH_ERR("failed to allocate buffer\n");
+ return pkt;
+ }
+
+ void *mem = odp_packet_data(pkt);
+
+ memset(mem, 1, payload_length);
+
+ return pkt;
+}
+
+/**
+ * Run measurement iterations for given config and payload size.
+ * Result of run returned in 'result' out parameter.
+ */
+static int
+run_measure_one(crypto_args_t *cargs,
+ crypto_alg_config_t *config,
+ odp_crypto_session_t *session,
+ unsigned int payload_length,
+ crypto_run_result_t *result)
+{
+ odp_crypto_packet_op_param_t params;
+
+ odp_pool_t pkt_pool;
+ odp_queue_t out_queue;
+ odp_packet_t pkt = ODP_PACKET_INVALID;
+ int rc = 0;
+ uint32_t packet_len = payload_length + MAX_AUTH_DIGEST_LEN;
+
+ pkt_pool = odp_pool_lookup("packet_pool");
+ if (pkt_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("pkt_pool not found\n");
+ return -1;
+ }
+
+ out_queue = odp_queue_lookup("crypto-out");
+ if (cargs->schedule || cargs->poll) {
+ if (out_queue == ODP_QUEUE_INVALID) {
+ ODPH_ERR("crypto-out queue not found\n");
+ return -1;
+ }
+ }
+
+ if (cargs->reuse_packet) {
+ pkt = make_packet(pkt_pool, packet_len);
+ if (ODP_PACKET_INVALID == pkt)
+ return -1;
+ }
+
+ time_record_t start, end;
+ int packets_sent = 0;
+ int packets_received = 0;
+
+ /* Initialize parameters block */
+ memset(&params, 0, sizeof(params));
+ params.session = *session;
+ params.cipher_iv_ptr = test_iv;
+ params.auth_iv_ptr = test_iv;
+ params.aad_ptr = test_aad;
+
+ params.cipher_range.offset = 0;
+ params.cipher_range.length = config->cipher_in_bit_mode ? payload_length * 8
+ : payload_length;
+ params.auth_range.offset = 0;
+ params.auth_range.length = config->auth_in_bit_mode ? payload_length * 8
+ : payload_length;
+ params.hash_result_offset = payload_length;
+
+ fill_time_record(&start);
+
+ while ((packets_sent < cargs->iteration_count) ||
+ (packets_received < cargs->iteration_count)) {
+ void *mem;
+
+ if ((packets_sent < cargs->iteration_count) &&
+ (packets_sent - packets_received <
+ cargs->in_flight)) {
+ odp_packet_t out_pkt;
+
+ if (!cargs->reuse_packet) {
+ pkt = make_packet(pkt_pool, packet_len);
+ if (ODP_PACKET_INVALID == pkt)
+ return -1;
+ }
+
+ out_pkt = cargs->in_place ? pkt : ODP_PACKET_INVALID;
+
+ if (cargs->debug_packets) {
+ mem = odp_packet_data(pkt);
+ print_mem("Packet before encryption:",
+ mem, payload_length);
+ }
+
+ if (cargs->schedule || cargs->poll) {
+ rc = odp_crypto_op_enq(&pkt, &out_pkt,
+ &params, 1);
+ if (rc <= 0) {
+ ODPH_ERR("failed odp_crypto_packet_op_enq: rc = %d\n", rc);
+ if (!cargs->reuse_packet)
+ odp_packet_free(pkt);
+ break;
+ }
+ packets_sent += rc;
+ } else {
+ rc = odp_crypto_op(&pkt, &out_pkt,
+ &params, 1);
+ if (rc <= 0) {
+ ODPH_ERR("failed odp_crypto_packet_op: rc = %d\n", rc);
+ if (!cargs->reuse_packet)
+ odp_packet_free(pkt);
+ break;
+ }
+ packets_sent += rc;
+ packets_received++;
+ if (odp_unlikely(odp_crypto_result(NULL, out_pkt) != 0)) {
+ ODPH_ERR("Crypto operation failed\n");
+ odp_packet_free(out_pkt);
+ return -1;
+ }
+ if (cargs->debug_packets) {
+ mem = odp_packet_data(out_pkt);
+ print_mem("Immediately encrypted "
+ "packet",
+ mem,
+ payload_length +
+ config->session.
+ auth_digest_len);
+ }
+ if (cargs->reuse_packet)
+ pkt = out_pkt;
+ else
+ odp_packet_free(out_pkt);
+ }
+ }
+
+ if (cargs->schedule || cargs->poll) {
+ odp_event_t ev;
+ odp_packet_t out_pkt;
+
+ if (cargs->schedule)
+ ev = odp_schedule(NULL,
+ ODP_SCHED_NO_WAIT);
+ else
+ ev = odp_queue_deq(out_queue);
+
+ while (ev != ODP_EVENT_INVALID) {
+ out_pkt = odp_crypto_packet_from_event(ev);
+ if (odp_unlikely(odp_crypto_result(NULL, out_pkt) != 0)) {
+ ODPH_ERR("Crypto operation failed\n");
+ odp_packet_free(out_pkt);
+ return -1;
+ }
+ if (cargs->debug_packets) {
+ mem = odp_packet_data(out_pkt);
+ print_mem("Received encrypted packet",
+ mem,
+ payload_length +
+ config->
+ session.auth_digest_len);
+ }
+ if (cargs->reuse_packet)
+ pkt = out_pkt;
+ else
+ odp_packet_free(out_pkt);
+ packets_received++;
+ if (cargs->schedule)
+ ev = odp_schedule(NULL,
+ ODP_SCHED_NO_WAIT);
+ else
+ ev = odp_queue_deq(out_queue);
+ };
+ }
+ }
+
+ fill_time_record(&end);
+
+ {
+ double count;
+
+ count = get_elapsed_usec(&start, &end);
+ result->elapsed = count /
+ cargs->iteration_count;
+
+ count = get_rusage_self_diff(&start, &end);
+ result->rusage_self = count /
+ cargs->iteration_count;
+
+ count = get_rusage_thread_diff(&start, &end);
+ result->rusage_thread = count /
+ cargs->iteration_count;
+ }
+
+ if (cargs->reuse_packet)
+ odp_packet_free(pkt);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int check_cipher_alg(const odp_crypto_capability_t *capa,
+ odp_cipher_alg_t alg)
+{
+ switch (alg) {
+ case ODP_CIPHER_ALG_NULL:
+ if (capa->ciphers.bit.null)
+ return 0;
+ break;
+ case ODP_CIPHER_ALG_DES:
+ if (capa->ciphers.bit.des)
+ return 0;
+ break;
+ case ODP_CIPHER_ALG_3DES_CBC:
+ if (capa->ciphers.bit.trides_cbc)
+ return 0;
+ break;
+ case ODP_CIPHER_ALG_AES_CBC:
+ if (capa->ciphers.bit.aes_cbc)
+ return 0;
+ break;
+ case ODP_CIPHER_ALG_AES_CTR:
+ if (capa->ciphers.bit.aes_ctr)
+ return 0;
+ break;
+ case ODP_CIPHER_ALG_AES_GCM:
+ if (capa->ciphers.bit.aes_gcm)
+ return 0;
+ break;
+ case ODP_CIPHER_ALG_AES_CCM:
+ if (capa->ciphers.bit.aes_ccm)
+ return 0;
+ break;
+ case ODP_CIPHER_ALG_CHACHA20_POLY1305:
+ if (capa->ciphers.bit.chacha20_poly1305)
+ return 0;
+ break;
+ case ODP_CIPHER_ALG_ZUC_EEA3:
+ if (capa->ciphers.bit.zuc_eea3)
+ return 0;
+ break;
+ case ODP_CIPHER_ALG_SNOW3G_UEA2:
+ if (capa->ciphers.bit.snow3g_uea2)
+ return 0;
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int check_auth_alg(const odp_crypto_capability_t *capa,
+ odp_auth_alg_t alg)
+{
+ switch (alg) {
+ case ODP_AUTH_ALG_NULL:
+ if (capa->auths.bit.null)
+ return 0;
+ break;
+ case ODP_AUTH_ALG_MD5_HMAC:
+ if (capa->auths.bit.md5_hmac)
+ return 0;
+ break;
+ case ODP_AUTH_ALG_SHA1_HMAC:
+ if (capa->auths.bit.sha1_hmac)
+ return 0;
+ break;
+ case ODP_AUTH_ALG_SHA256_HMAC:
+ if (capa->auths.bit.sha256_hmac)
+ return 0;
+ break;
+ case ODP_AUTH_ALG_SHA384_HMAC:
+ if (capa->auths.bit.sha384_hmac)
+ return 0;
+ break;
+ case ODP_AUTH_ALG_SHA512_HMAC:
+ if (capa->auths.bit.sha512_hmac)
+ return 0;
+ break;
+ case ODP_AUTH_ALG_AES_GCM:
+ if (capa->auths.bit.aes_gcm)
+ return 0;
+ break;
+ case ODP_AUTH_ALG_AES_GMAC:
+ if (capa->auths.bit.aes_gmac)
+ return 0;
+ break;
+ case ODP_AUTH_ALG_AES_CCM:
+ if (capa->auths.bit.aes_ccm)
+ return 0;
+ break;
+ case ODP_AUTH_ALG_CHACHA20_POLY1305:
+ if (capa->auths.bit.chacha20_poly1305)
+ return 0;
+ break;
+ case ODP_AUTH_ALG_ZUC_EIA3:
+ if (capa->auths.bit.zuc_eia3)
+ return 0;
+ break;
+ case ODP_AUTH_ALG_SNOW3G_UIA2:
+ if (capa->auths.bit.snow3g_uia2)
+ return 0;
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int check_cipher_params(const odp_crypto_capability_t *crypto_capa,
+ const odp_crypto_session_param_t *param,
+ int *bit_mode)
+{
+ int num, rc;
+
+ if (check_cipher_alg(crypto_capa, param->cipher_alg))
+ return 1;
+
+ num = odp_crypto_cipher_capability(param->cipher_alg, NULL, 0);
+ if (num <= 0)
+ return 1;
+
+ odp_crypto_cipher_capability_t cipher_capa[num];
+
+ rc = odp_crypto_cipher_capability(param->cipher_alg, cipher_capa, num);
+ if (rc < num)
+ num = rc;
+
+ for (int n = 0; n < num; n++) {
+ odp_crypto_cipher_capability_t *capa = &cipher_capa[n];
+
+ if (capa->key_len != param->cipher_key.length ||
+ capa->iv_len != param->cipher_iv_len)
+ continue;
+
+ *bit_mode = capa->bit_mode;
+ return 0;
+ }
+ return 1;
+}
+
+static int aad_len_ok(const odp_crypto_auth_capability_t *capa, uint32_t len)
+{
+ if (len < capa->aad_len.min || len > capa->aad_len.max)
+ return 0;
+
+ if (len == capa->aad_len.min)
+ return 1;
+ if (capa->aad_len.inc == 0)
+ return 0;
+
+ return ((len - capa->aad_len.min) % capa->aad_len.inc) == 0;
+}
+
+static int check_auth_params(const odp_crypto_capability_t *crypto_capa,
+ const odp_crypto_session_param_t *param,
+ int *bit_mode)
+{
+ int num, rc;
+
+ if (param->auth_digest_len > MAX_AUTH_DIGEST_LEN) {
+ ODPH_ERR("MAX_AUTH_DIGEST_LEN too low\n");
+ return 1;
+ }
+
+ if (check_auth_alg(crypto_capa, param->auth_alg))
+ return 1;
+
+ num = odp_crypto_auth_capability(param->auth_alg, NULL, 0);
+ if (num <= 0)
+ return 1;
+
+ odp_crypto_auth_capability_t auth_capa[num];
+
+ rc = odp_crypto_auth_capability(param->auth_alg, auth_capa, num);
+ if (rc < num)
+ num = rc;
+
+ for (int n = 0; n < num; n++) {
+ odp_crypto_auth_capability_t *capa = &auth_capa[n];
+
+ if (capa->digest_len != param->auth_digest_len ||
+ capa->key_len != param->auth_key.length ||
+ capa->iv_len != param->auth_iv_len)
+ continue;
+
+ if (!aad_len_ok(capa, param->auth_aad_len))
+ continue;
+
+ *bit_mode = capa->bit_mode;
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Process one algorithm. Note if payload size is specified it is
+ * only one run. Or iterate over set of predefined payloads.
+ */
+static int run_measure_one_config(test_run_arg_t *arg)
+{
+ crypto_run_result_t result;
+ odp_crypto_session_t session;
+ crypto_args_t *cargs = &arg->crypto_args;
+ crypto_alg_config_t *config = arg->crypto_alg_config;
+ odp_crypto_capability_t crypto_capa = arg->crypto_capa;
+ int rc = 0;
+
+ printf("\n");
+
+ if (check_cipher_params(&crypto_capa, &config->session,
+ &config->cipher_in_bit_mode)) {
+ printf(" Cipher algorithm not supported\n");
+ rc = 1;
+ }
+
+ if (check_auth_params(&crypto_capa, &config->session,
+ &config->auth_in_bit_mode)) {
+ printf(" Auth algorithm not supported\n");
+ rc = 1;
+ }
+
+#if ODP_VERSION_API >= ODP_VERSION_API_NUM(1, 42, 0)
+ /* Bit mode ciphers can now be used in byte mode. */
+ config->cipher_in_bit_mode = 0;
+ config->auth_in_bit_mode = 0;
+#endif
+
+ if (rc == 0)
+ rc = create_session_from_config(&session, config, cargs);
+ if (rc) {
+ printf(" => %s skipped\n", config->name);
+ return rc > 0 ? 0 : -1;
+ }
+
+ if (cargs->payload_length) {
+ rc = run_measure_one(cargs, config, &session,
+ cargs->payload_length, &result);
+ if (!rc) {
+ print_result_header();
+ print_result(cargs, cargs->payload_length,
+ config, &result);
+ }
+ } else {
+ unsigned i;
+
+ print_result_header();
+ for (i = 0; i < num_payloads; i++) {
+ rc = run_measure_one(cargs, config, &session,
+ payloads[i], &result);
+ if (rc)
+ break;
+ print_result(cargs, payloads[i],
+ config, &result);
+ }
+ }
+
+ odp_crypto_session_destroy(session);
+
+ return rc;
+}
+
+static int run_thr_func(void *arg)
+{
+ run_measure_one_config((test_run_arg_t *)arg);
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ crypto_args_t cargs;
+ odp_pool_t pool;
+ odp_queue_param_t qparam;
+ odp_pool_param_t params;
+ odp_queue_t out_queue = ODP_QUEUE_INVALID;
+ test_run_arg_t test_run_arg;
+ odp_cpumask_t cpumask;
+ char cpumaskstr[ODP_CPUMASK_STR_SIZE];
+ int num_workers = 1;
+ odph_helper_options_t helper_options;
+ odph_thread_t thread_tbl[num_workers];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
+ odp_instance_t instance;
+ odp_init_t init_param;
+ odp_pool_capability_t pool_capa;
+ odp_crypto_capability_t crypto_capa;
+ uint32_t max_seg_len;
+ uint32_t i;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ memset(&cargs, 0, sizeof(cargs));
+
+ /* Parse and store the application arguments */
+ parse_args(argc, argv, &cargs);
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init_param, NULL)) {
+ ODPH_ERR("ODP global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ odp_init_local(instance, ODP_THREAD_WORKER);
+
+ odp_sys_info_print();
+ memset(&crypto_capa, 0, sizeof(crypto_capa));
+
+ if (odp_crypto_capability(&crypto_capa)) {
+ ODPH_ERR("Crypto capability request failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (cargs.schedule && crypto_capa.queue_type_sched == 0) {
+ ODPH_ERR("scheduled type completion queue not supported.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (cargs.poll && crypto_capa.queue_type_plain == 0) {
+ ODPH_ERR("plain type completion queue not supported.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_pool_capability(&pool_capa)) {
+ ODPH_ERR("Pool capability request failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ max_seg_len = pool_capa.pkt.max_seg_len;
+
+ for (i = 0; i < ODPH_ARRAY_SIZE(payloads); i++) {
+ if (payloads[i] + MAX_AUTH_DIGEST_LEN > max_seg_len)
+ break;
+ }
+
+ num_payloads = i;
+
+ /* Create packet pool */
+ odp_pool_param_init(&params);
+ params.pkt.seg_len = max_seg_len;
+ params.pkt.len = max_seg_len;
+ params.pkt.num = POOL_NUM_PKT;
+ params.type = ODP_POOL_PACKET;
+ pool = odp_pool_create("packet_pool", &params);
+
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_ERR("packet pool create failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ odp_pool_print(pool);
+
+ odp_queue_param_init(&qparam);
+ if (cargs.schedule) {
+ odp_schedule_config(NULL);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.prio = odp_schedule_default_prio();
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qparam.sched.group = ODP_SCHED_GROUP_ALL;
+ out_queue = odp_queue_create("crypto-out", &qparam);
+ } else if (cargs.poll) {
+ qparam.type = ODP_QUEUE_TYPE_PLAIN;
+ out_queue = odp_queue_create("crypto-out", &qparam);
+ }
+ if (cargs.schedule || cargs.poll) {
+ if (out_queue == ODP_QUEUE_INVALID) {
+ ODPH_ERR("crypto-out queue create failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (cargs.schedule) {
+ printf("Run in async scheduled mode\n");
+ num_workers = odp_cpumask_default_worker(&cpumask,
+ num_workers);
+ (void)odp_cpumask_to_str(&cpumask, cpumaskstr,
+ sizeof(cpumaskstr));
+ printf("num worker threads: %i\n",
+ num_workers);
+ printf("first CPU: %i\n",
+ odp_cpumask_first(&cpumask));
+ printf("cpu mask: %s\n",
+ cpumaskstr);
+ } else if (cargs.poll) {
+ printf("Run in async poll mode\n");
+ } else {
+ printf("Run in sync mode\n");
+ }
+
+ test_run_arg.crypto_args = cargs;
+ test_run_arg.crypto_alg_config = cargs.alg_config;
+ test_run_arg.crypto_capa = crypto_capa;
+
+ if (cargs.alg_config) {
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ if (cargs.schedule) {
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_thr_func;
+ thr_param.arg = &test_run_arg;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+ odph_thread_create(thread_tbl, &thr_common, &thr_param, num_workers);
+
+ odph_thread_join(thread_tbl, num_workers);
+ } else {
+ run_measure_one_config(&test_run_arg);
+ }
+ } else {
+ for (i = 0; i < ODPH_ARRAY_SIZE(algs_config); i++) {
+ test_run_arg.crypto_alg_config = algs_config + i;
+ run_measure_one_config(&test_run_arg);
+ }
+ }
+
+ if (cargs.schedule || cargs.poll)
+ odp_queue_destroy(out_queue);
+ if (odp_pool_destroy(pool)) {
+ ODPH_ERR("Error: pool destroy\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Error: term local\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Error: term global\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
+
+static void parse_args(int argc, char *argv[], crypto_args_t *cargs)
+{
+ int opt;
+ int long_index;
+ static const struct option longopts[] = {
+ {"algorithm", optional_argument, NULL, 'a'},
+ {"debug", no_argument, NULL, 'd'},
+ {"flight", optional_argument, NULL, 'f'},
+ {"help", no_argument, NULL, 'h'},
+ {"iterations", optional_argument, NULL, 'i'},
+ {"inplace", no_argument, NULL, 'n'},
+ {"payload", optional_argument, NULL, 'l'},
+ {"sessions", optional_argument, NULL, 'm'},
+ {"reuse", no_argument, NULL, 'r'},
+ {"poll", no_argument, NULL, 'p'},
+ {"schedule", no_argument, NULL, 's'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "+a:c:df:hi:m:nl:spr";
+
+ cargs->in_place = 0;
+ cargs->in_flight = 1;
+ cargs->debug_packets = 0;
+ cargs->iteration_count = 10000;
+ cargs->payload_length = 0;
+ cargs->alg_config = NULL;
+ cargs->reuse_packet = 0;
+ cargs->schedule = 0;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break; /* No more options */
+
+ switch (opt) {
+ case 'a':
+ cargs->alg_config = find_config_by_name(optarg);
+ if (!cargs->alg_config) {
+ printf("cannot test crypto '%s' configuration\n",
+ optarg);
+ usage(argv[0]);
+ exit(-1);
+ }
+ break;
+ case 'd':
+ cargs->debug_packets = 1;
+ break;
+ case 'i':
+ cargs->iteration_count = atoi(optarg);
+ break;
+ case 'f':
+ cargs->in_flight = atoi(optarg);
+ break;
+ case 'h':
+ usage(argv[0]);
+ exit(EXIT_SUCCESS);
+ break;
+ case 'm':
+ cargs->max_sessions = atoi(optarg);
+ break;
+ case 'n':
+ cargs->in_place = 1;
+ break;
+ case 'l':
+ cargs->payload_length = atoi(optarg);
+ break;
+ case 'r':
+ cargs->reuse_packet = 1;
+ break;
+ case 's':
+ cargs->schedule = 1;
+ break;
+ case 'p':
+ cargs->poll = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ optind = 1; /* reset 'extern optind' from the getopt lib */
+
+ if ((cargs->in_flight > 1) && cargs->reuse_packet) {
+ printf("-f (in flight > 1) and -r (reuse packet) options are not compatible\n");
+ usage(argv[0]);
+ exit(-1);
+ }
+ if (cargs->schedule && cargs->poll) {
+ printf("-s (schedule) and -p (poll) options are not compatible\n");
+ usage(argv[0]);
+ exit(-1);
+ }
+}
+
+/**
+ * Print usage information
+ */
+static void usage(char *progname)
+{
+ printf("\n"
+ "Usage: %s OPTIONS\n"
+ " E.g. %s -i 100000\n"
+ "\n"
+ "OpenDataPlane crypto speed measure.\n"
+ "Optional OPTIONS\n"
+ " -a, --algorithm <name> Specify algorithm name (default all)\n"
+ " Supported values are:\n",
+ progname, progname);
+
+ print_config_names(" ");
+ printf(" -d, --debug Enable dump of processed packets.\n"
+ " -f, --flight <number> Max number of packet processed in parallel (default 1)\n"
+ " -i, --iterations <number> Number of iterations.\n"
+ " -n, --inplace Encrypt on place.\n"
+ " -l, --payload Payload length.\n"
+ " -r, --reuse Output encrypted packet is passed as input\n"
+ " to next encrypt iteration.\n"
+ " -s, --schedule Use scheduler for completion events.\n"
+ " -p, --poll Poll completion queue for completion events.\n"
+ " -h, --help Display help and exit.\n"
+ "\n");
+}
diff --git a/test/performance/odp_crypto_run.sh b/test/performance/odp_crypto_run.sh
new file mode 100755
index 000000000..f50311ae0
--- /dev/null
+++ b/test/performance/odp_crypto_run.sh
@@ -0,0 +1,19 @@
+#!/bin/sh
+#
+# Copyright (c) 2022, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+TEST_DIR="${TEST_DIR:-$(dirname $0)}"
+
+# Run with a small number of iterations in make check
+
+$TEST_DIR/odp_crypto${EXEEXT} -i 100
+
+if [ $? -ne 0 ] ; then
+ echo Test FAILED
+ exit 1
+fi
+
+exit 0
diff --git a/test/performance/odp_dma_perf.c b/test/performance/odp_dma_perf.c
new file mode 100644
index 000000000..2f4ca490d
--- /dev/null
+++ b/test/performance/odp_dma_perf.c
@@ -0,0 +1,1951 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021-2024 Nokia
+ */
+
+/**
+ * @example odp_dma_perf.c
+ *
+ * This tester application can be used to profile the performance of an ODP DMA implementation.
+ * Tester workflow is simple and consists of issuing as many back-to-back DMA transfers as the
+ * implementation allows and then recording key performance statistics (such as function overhead,
+ * latencies etc.).
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define EXIT_NOT_SUP 2
+#define PROG_NAME "odp_dma_perf"
+
+enum {
+ SYNC_DMA = 0U,
+ ASYNC_DMA,
+ SW_COPY
+};
+
+enum {
+ DENSE_PACKET = 0U,
+ SPARSE_PACKET,
+ DENSE_MEMORY,
+ SPARSE_MEMORY
+};
+
+enum {
+ POLL = 0U,
+ EVENT
+};
+
+enum {
+ SINGLE = 0U,
+ MANY
+};
+
+#define DEF_TRS_TYPE SYNC_DMA
+#define DEF_SEG_CNT 1U
+#define DEF_LEN 1024U
+#define DEF_SEG_TYPE DENSE_PACKET
+#define DEF_MODE POLL
+#define DEF_INFLIGHT 1U
+#define DEF_TIME 10U
+#define DEF_WORKERS 1U
+#define DEF_POLICY SINGLE
+
+#define MAX_SEGS 1024U
+#define MAX_WORKERS 24
+#define MAX_MEMORY (256U * 1024U * 1024U)
+
+#define GIGAS 1000000000
+#define MEGAS 1000000
+#define KILOS 1000
+
+#define DATA 0xAA
+
+typedef enum {
+ PRS_OK,
+ PRS_NOK,
+ PRS_TERM,
+ PRS_NOT_SUP
+} parse_result_t;
+
+typedef struct {
+ uint64_t completed;
+ uint64_t start_errs;
+ uint64_t poll_errs;
+ uint64_t scheduler_timeouts;
+ uint64_t transfer_errs;
+ uint64_t data_errs;
+ uint64_t tot_tm;
+ uint64_t trs_tm;
+ uint64_t max_trs_tm;
+ uint64_t min_trs_tm;
+ uint64_t start_cc;
+ uint64_t max_start_cc;
+ uint64_t min_start_cc;
+ uint64_t wait_cc;
+ uint64_t max_wait_cc;
+ uint64_t min_wait_cc;
+ uint64_t trs_cc;
+ uint64_t max_trs_cc;
+ uint64_t min_trs_cc;
+ uint64_t start_cnt;
+ uint64_t wait_cnt;
+ uint64_t trs_poll_cnt;
+ uint64_t trs_cnt;
+} stats_t;
+
+typedef struct {
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_compl_param_t compl_param;
+ odp_ticketlock_t lock;
+ odp_time_t trs_start_tm;
+ uint64_t trs_start_cc;
+ uint64_t trs_poll_cnt;
+ odp_bool_t is_running;
+} trs_info_t;
+
+typedef struct sd_s sd_t;
+typedef void (*ver_fn_t)(trs_info_t *info, stats_t *stats);
+
+typedef struct ODP_ALIGNED_CACHE sd_s {
+ struct {
+ trs_info_t infos[MAX_SEGS];
+ odp_dma_seg_t src_seg[MAX_SEGS];
+ odp_dma_seg_t dst_seg[MAX_SEGS];
+ odp_dma_t handle;
+ odp_pool_t pool;
+ odp_queue_t compl_q;
+ uint32_t num_in_segs;
+ uint32_t num_out_segs;
+ uint32_t src_seg_len;
+ uint32_t dst_seg_len;
+ uint32_t num_inflight;
+ uint8_t trs_type;
+ uint8_t compl_mode;
+ } dma;
+
+ struct {
+ odp_packet_t src_pkt[MAX_SEGS];
+ odp_packet_t dst_pkt[MAX_SEGS];
+ odp_pool_t src_pool;
+ odp_pool_t dst_pool;
+ odp_shm_t src_shm;
+ odp_shm_t dst_shm;
+ void *src;
+ void *dst;
+ void *src_high;
+ void *dst_high;
+ void *cur_src;
+ void *cur_dst;
+ uint64_t shm_size;
+ uint8_t seg_type;
+ } seg;
+
+ odp_schedule_group_t grp;
+ /* Prepare single transfer. */
+ void (*prep_trs_fn)(sd_t *sd, trs_info_t *info);
+ /* Verify single transfer. */
+ ver_fn_t ver_fn;
+} sd_t;
+
+typedef struct prog_config_s prog_config_t;
+
+typedef struct ODP_ALIGNED_CACHE {
+ stats_t stats;
+ prog_config_t *prog_config;
+ sd_t *sd;
+} thread_config_t;
+
+typedef struct {
+ /* Configure DMA session specific resources. */
+ odp_bool_t (*session_cfg_fn)(sd_t *sd);
+ /* Setup transfer elements (memory/packet segments). */
+ odp_bool_t (*setup_fn)(sd_t *sd);
+ /* Configure DMA transfers (segment addresses etc.). */
+ void (*trs_fn)(sd_t *sd);
+ /* Configure transfer completion resources (transfer IDs, events etc.). */
+ odp_bool_t (*compl_fn)(sd_t *sd);
+ /* Initiate required initial transfers. */
+ odp_bool_t (*bootstrap_fn)(sd_t *sd);
+ /* Wait and handle finished transfer. */
+ void (*wait_fn)(sd_t *sd, stats_t *stats);
+ /* Handle all unfinished transfers after main test has been stopped. */
+ void (*drain_fn)(sd_t *sd);
+ /* Free any resources that might have been allocated during setup phase. */
+ void (*free_fn)(const sd_t *sd);
+} test_api_t;
+
+typedef struct prog_config_s {
+ odph_thread_t threads[MAX_WORKERS];
+ thread_config_t thread_config[MAX_WORKERS];
+ sd_t sds[MAX_WORKERS];
+ test_api_t api;
+ odp_atomic_u32_t is_running;
+ odp_instance_t odp_instance;
+ odp_barrier_t init_barrier;
+ odp_barrier_t term_barrier;
+ odp_dma_compl_mode_t compl_mode_mask;
+ odp_pool_t src_pool;
+ odp_pool_t dst_pool;
+ uint64_t shm_size;
+ uint32_t num_in_segs;
+ uint32_t num_out_segs;
+ uint32_t src_seg_len;
+ uint32_t dst_seg_len;
+ uint32_t num_inflight;
+ double time_sec;
+ uint32_t num_sessions;
+ uint32_t src_cache_size;
+ uint32_t dst_cache_size;
+ int num_workers;
+ odp_bool_t is_verify;
+ uint8_t trs_type;
+ uint8_t seg_type;
+ uint8_t compl_mode;
+ uint8_t policy;
+} prog_config_t;
+
+static prog_config_t *prog_conf;
+
+static const int mode_map[] = { ODP_DMA_COMPL_POLL, ODP_DMA_COMPL_EVENT };
+
+static void terminate(int signal ODP_UNUSED)
+{
+ odp_atomic_store_u32(&prog_conf->is_running, 0U);
+}
+
+static void init_config(prog_config_t *config)
+{
+ sd_t *sd;
+ trs_info_t *info;
+ stats_t *stats;
+
+ memset(config, 0, sizeof(*config));
+ config->compl_mode_mask |= ODP_DMA_COMPL_SYNC;
+ config->src_pool = ODP_POOL_INVALID;
+ config->dst_pool = ODP_POOL_INVALID;
+ config->num_in_segs = DEF_SEG_CNT;
+ config->num_out_segs = DEF_SEG_CNT;
+ config->src_seg_len = DEF_LEN;
+ config->num_inflight = DEF_INFLIGHT;
+ config->time_sec = DEF_TIME;
+ config->num_workers = DEF_WORKERS;
+ config->trs_type = DEF_TRS_TYPE;
+ config->seg_type = DEF_SEG_TYPE;
+ config->compl_mode = DEF_MODE;
+ config->policy = DEF_POLICY;
+
+ for (uint32_t i = 0U; i < MAX_WORKERS; ++i) {
+ sd = &config->sds[i];
+ stats = &config->thread_config[i].stats;
+ memset(sd, 0, sizeof(*sd));
+
+ for (uint32_t j = 0U; j < MAX_SEGS; ++j) {
+ info = &sd->dma.infos[j];
+ info->compl_param.transfer_id = ODP_DMA_TRANSFER_ID_INVALID;
+ info->compl_param.event = ODP_EVENT_INVALID;
+ info->compl_param.queue = ODP_QUEUE_INVALID;
+ odp_ticketlock_init(&info->lock);
+ sd->seg.src_pkt[j] = ODP_PACKET_INVALID;
+ sd->seg.dst_pkt[j] = ODP_PACKET_INVALID;
+ }
+
+ sd->dma.handle = ODP_DMA_INVALID;
+ sd->dma.pool = ODP_POOL_INVALID;
+ sd->dma.compl_q = ODP_QUEUE_INVALID;
+ sd->seg.src_shm = ODP_SHM_INVALID;
+ sd->seg.dst_shm = ODP_SHM_INVALID;
+ sd->grp = ODP_SCHED_GROUP_INVALID;
+ stats->min_trs_tm = UINT64_MAX;
+ stats->min_start_cc = UINT64_MAX;
+ stats->min_wait_cc = UINT64_MAX;
+ stats->min_trs_cc = UINT64_MAX;
+ }
+}
+
+static void print_usage(void)
+{
+ printf("\n"
+ "DMA performance test. Load DMA subsystem from several workers.\n"
+ "\n"
+ "Usage: " PROG_NAME " [OPTIONS]\n"
+ "\n"
+ " E.g. " PROG_NAME "\n"
+ " " PROG_NAME " -s 10240\n"
+ " " PROG_NAME " -t 0 -i 1 -o 1 -s 51200 -S 2 -f 64 -T 10\n"
+ " " PROG_NAME " -t 1 -i 10 -o 10 -s 4096 -S 0 -m 1 -f 10 -c 4 -p 1\n"
+ " " PROG_NAME " -t 2 -i 10 -o 1 -s 1024 -S 3 -f 10 -c 4 -p 1\n"
+ "\n"
+ "Optional OPTIONS:\n"
+ "\n"
+ " -t, --trs_type Transfer type for test data. %u by default.\n"
+ " Types:\n"
+ " 0: synchronous DMA\n"
+ " 1: asynchronous DMA\n"
+ " 2: SW memory copy\n"
+ " -i, --num_in_seg Number of input segments to transfer. 0 means the maximum\n"
+ " count supported by the implementation. %u by default.\n"
+ " -o, --num_out_seg Number of output segments to transfer to. 0 means the\n"
+ " maximum count supported by the implementation. %u by\n"
+ " default.\n"
+ " -s, --in_seg_len Input segment length in bytes. 0 length means the maximum\n"
+ " segment length supported by the implementation. The actual\n"
+ " maximum might be limited by what type of data is\n"
+ " transferred (packet/memory). %u by default.\n"
+ " -S, --in_seg_type Input segment data type. Dense types can load the DMA\n"
+ " subsystem more heavily as transfer resources are\n"
+ " pre-configured. Sparse types might on the other hand\n"
+ " reflect application usage more precisely as transfer\n"
+ " resources are configured in runtime. %u by default.\n"
+ " Types:\n"
+ " 0: dense packet\n"
+ " 1: sparse packet\n"
+ " 2: dense memory\n"
+ " 3: sparse memory\n"
+ " -m, --compl_mode Completion mode for transfers. %u by default.\n"
+ " Modes:\n"
+ " 0: poll\n"
+ " 1: event\n"
+ " -f, --max_in_flight Maximum transfers in-flight per session. 0 means the\n"
+ " maximum supported by the tester/implementation. %u by\n"
+ " default.\n"
+ " -T, --time_sec Time in seconds to run. 0 means infinite. %u by default.\n"
+ " -c, --worker_count Amount of workers. %u by default.\n"
+ " -p, --policy DMA session policy. %u by default.\n"
+ " Policies:\n"
+ " 0: One session shared by workers\n"
+ " 1: One session per worker\n"
+ " -v, --verify Verify transfers. Checks correctness of destination data\n"
+ " after successful transfers.\n"
+ " -h, --help This help.\n"
+ "\n", DEF_TRS_TYPE, DEF_SEG_CNT, DEF_SEG_CNT, DEF_LEN, DEF_SEG_TYPE, DEF_MODE,
+ DEF_INFLIGHT, DEF_TIME, DEF_WORKERS, DEF_POLICY);
+}
+
+static parse_result_t check_options(prog_config_t *config)
+{
+ int max_workers;
+ odp_dma_capability_t dma_capa;
+ uint32_t num_sessions, max_seg_len, max_trs, max_in, max_out, max_segs;
+ odp_schedule_capability_t sched_capa;
+ odp_pool_capability_t pool_capa;
+ odp_shm_capability_t shm_capa;
+ uint64_t shm_size = 0U;
+
+ if (config->trs_type != SYNC_DMA && config->trs_type != ASYNC_DMA &&
+ config->trs_type != SW_COPY) {
+ ODPH_ERR("Invalid transfer type: %u\n", config->trs_type);
+ return PRS_NOK;
+ }
+
+ if (config->seg_type != DENSE_PACKET && config->seg_type != SPARSE_PACKET &&
+ config->seg_type != DENSE_MEMORY && config->seg_type != SPARSE_MEMORY) {
+ ODPH_ERR("Invalid segment type: %u\n", config->seg_type);
+ return PRS_NOK;
+ }
+
+ max_workers = ODPH_MIN(odp_thread_count_max() - 1, MAX_WORKERS);
+
+ if (config->num_workers <= 0 || config->num_workers > max_workers) {
+ ODPH_ERR("Invalid thread count: %d (min: 1, max: %d)\n", config->num_workers,
+ max_workers);
+ return PRS_NOK;
+ }
+
+ if (config->policy != SINGLE && config->policy != MANY) {
+ ODPH_ERR("Invalid DMA session policy: %u\n", config->policy);
+ return PRS_NOK;
+ }
+
+ if (odp_dma_capability(&dma_capa) < 0) {
+ ODPH_ERR("Error querying DMA capabilities\n");
+ return PRS_NOK;
+ }
+
+ num_sessions = config->policy == SINGLE ? 1 : config->num_workers;
+
+ if (num_sessions > dma_capa.max_sessions) {
+ ODPH_ERR("Not enough DMA sessions supported: %u (max: %u)\n", num_sessions,
+ dma_capa.max_sessions);
+ return PRS_NOT_SUP;
+ }
+
+ config->num_sessions = num_sessions;
+
+ if (config->num_in_segs == 0U)
+ config->num_in_segs = dma_capa.max_src_segs;
+
+ if (config->num_out_segs == 0U)
+ config->num_out_segs = dma_capa.max_dst_segs;
+
+ if (config->num_in_segs > dma_capa.max_src_segs ||
+ config->num_out_segs > dma_capa.max_dst_segs ||
+ config->num_in_segs + config->num_out_segs > dma_capa.max_segs) {
+ ODPH_ERR("Unsupported segment count configuration, in: %u, out: %u (max in: %u, "
+ "max out: %u, max tot: %u)\n", config->num_in_segs, config->num_out_segs,
+ dma_capa.max_src_segs, dma_capa.max_dst_segs, dma_capa.max_segs);
+ return PRS_NOT_SUP;
+ }
+
+ if (config->src_seg_len == 0U)
+ config->src_seg_len = dma_capa.max_seg_len;
+
+ config->dst_seg_len = config->src_seg_len * config->num_in_segs /
+ config->num_out_segs + config->src_seg_len *
+ config->num_in_segs % config->num_out_segs;
+
+ max_seg_len = ODPH_MAX(config->src_seg_len, config->dst_seg_len);
+
+ if (max_seg_len > dma_capa.max_seg_len) {
+ ODPH_ERR("Unsupported total DMA segment length: %u (max: %u)\n", max_seg_len,
+ dma_capa.max_seg_len);
+ return PRS_NOT_SUP;
+ }
+
+ if (config->trs_type == ASYNC_DMA) {
+ if (config->compl_mode != POLL && config->compl_mode != EVENT) {
+ ODPH_ERR("Invalid completion mode: %u\n", config->compl_mode);
+ return PRS_NOK;
+ }
+
+ if (config->compl_mode == POLL && (dma_capa.compl_mode_mask & ODP_DMA_COMPL_POLL)
+ == 0U) {
+ ODPH_ERR("Unsupported DMA completion mode, poll\n");
+ return PRS_NOT_SUP;
+ }
+
+ if (config->compl_mode == EVENT) {
+ if (config->num_sessions > dma_capa.pool.max_pools) {
+ ODPH_ERR("Unsupported amount of completion pools: %u (max: %u)\n",
+ config->num_sessions, dma_capa.pool.max_pools);
+ return PRS_NOT_SUP;
+ }
+
+ if ((dma_capa.compl_mode_mask & ODP_DMA_COMPL_EVENT) == 0U) {
+ ODPH_ERR("Unsupported DMA completion mode, event\n");
+ return PRS_NOT_SUP;
+ }
+
+ if (dma_capa.queue_type_sched == 0) {
+ ODPH_ERR("Unsupported DMA queueing type, scheduled\n");
+ return PRS_NOT_SUP;
+ }
+
+ if (config->num_inflight > dma_capa.pool.max_num) {
+ ODPH_ERR("Unsupported amount of completion events: %u (max: %u)\n",
+ config->num_inflight, dma_capa.pool.max_num);
+ return PRS_NOT_SUP;
+ }
+
+ if (odp_schedule_capability(&sched_capa) < 0) {
+ ODPH_ERR("Error querying scheduler capabilities\n");
+ return PRS_NOK;
+ }
+
+ if (config->num_sessions > sched_capa.max_groups - 3U) {
+ ODPH_ERR("Unsupported amount of scheduler groups: %u (max: %u)\n",
+ config->num_sessions, sched_capa.max_groups - 3U);
+ return PRS_NOT_SUP;
+ }
+ }
+
+ config->compl_mode_mask |= mode_map[config->compl_mode];
+ }
+
+ max_trs = ODPH_MIN(dma_capa.max_transfers, MAX_SEGS);
+
+ if (config->num_inflight == 0U)
+ config->num_inflight = max_trs;
+
+ if (config->num_inflight > max_trs) {
+ ODPH_ERR("Unsupported amount of in-flight DMA transfers: %u (max: %u)\n",
+ config->num_inflight, max_trs);
+ return PRS_NOT_SUP;
+ }
+
+ max_in = config->num_in_segs * config->num_inflight;
+ max_out = config->num_out_segs * config->num_inflight;
+ max_segs = ODPH_MAX(max_in, max_out);
+
+ if (max_segs > MAX_SEGS) {
+ ODPH_ERR("Unsupported input/output * inflight segment combination: %u (max: %u)\n",
+ max_segs, MAX_SEGS);
+ return PRS_NOT_SUP;
+ }
+
+ if (config->seg_type == DENSE_PACKET || config->seg_type == SPARSE_PACKET) {
+ if (odp_pool_capability(&pool_capa) < 0) {
+ ODPH_ERR("Error querying pool capabilities\n");
+ return PRS_NOK;
+ }
+
+ if (pool_capa.pkt.max_pools < 2U) {
+ ODPH_ERR("Unsupported amount of packet pools: 2 (max: %u)\n",
+ pool_capa.pkt.max_pools);
+ return PRS_NOT_SUP;
+ }
+
+ if (pool_capa.pkt.max_len != 0U && max_seg_len > pool_capa.pkt.max_len) {
+ ODPH_ERR("Unsupported packet size: %u (max: %u)\n", max_seg_len,
+ pool_capa.pkt.max_len);
+ return PRS_NOT_SUP;
+ }
+
+ if (pool_capa.pkt.max_num != 0U &&
+ max_segs * num_sessions > pool_capa.pkt.max_num) {
+ ODPH_ERR("Unsupported amount of packet pool elements: %u (max: %u)\n",
+ max_segs * num_sessions, pool_capa.pkt.max_num);
+ return PRS_NOT_SUP;
+ }
+
+ config->src_cache_size = ODPH_MIN(ODPH_MAX(max_in, pool_capa.pkt.min_cache_size),
+ pool_capa.pkt.max_cache_size);
+ config->dst_cache_size = ODPH_MIN(ODPH_MAX(max_out, pool_capa.pkt.min_cache_size),
+ pool_capa.pkt.max_cache_size);
+ } else {
+ /* If SHM implementation capabilities are very puny, program will have already
+ * failed when reserving memory for global program configuration. */
+ if (odp_shm_capability(&shm_capa) < 0) {
+ ODPH_ERR("Error querying SHM capabilities\n");
+ return PRS_NOK;
+ }
+
+ /* One block for program configuration, one for source memory and one for
+ * destination memory. */
+ if (shm_capa.max_blocks < 3U) {
+ ODPH_ERR("Unsupported amount of SHM blocks: 3 (max: %u)\n",
+ shm_capa.max_blocks);
+ return PRS_NOT_SUP;
+ }
+
+ shm_size = (uint64_t)config->dst_seg_len * config->num_out_segs *
+ config->num_inflight;
+
+ if (shm_capa.max_size != 0U && shm_size > shm_capa.max_size) {
+ ODPH_ERR("Unsupported total SHM block size: %" PRIu64 ""
+ " (max: %" PRIu64 ")\n", shm_size, shm_capa.max_size);
+ return PRS_NOT_SUP;
+ }
+
+ if (config->seg_type == SPARSE_MEMORY && shm_size < MAX_MEMORY)
+ shm_size = shm_capa.max_size != 0U ?
+ ODPH_MIN(shm_capa.max_size, MAX_MEMORY) : MAX_MEMORY;
+
+ config->shm_size = shm_size;
+ }
+
+ return PRS_OK;
+}
+
+static parse_result_t parse_options(int argc, char **argv, prog_config_t *config)
+{
+ int opt, long_index;
+ static const struct option longopts[] = {
+ { "trs_type", required_argument, NULL, 't' },
+ { "num_in_seg", required_argument, NULL, 'i' },
+ { "num_out_seg", required_argument, NULL, 'o' },
+ { "in_seg_len", required_argument, NULL, 's' },
+ { "in_seg_type", required_argument, NULL, 'S' },
+ { "compl_mode", required_argument, NULL, 'm' },
+ { "max_in_flight", required_argument, NULL, 'f'},
+ { "time_sec", required_argument, NULL, 'T' },
+ { "worker_count", required_argument, NULL, 'c' },
+ { "policy", required_argument, NULL, 'p' },
+ { "verify", no_argument, NULL, 'v' },
+ { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 }
+ };
+ static const char *shortopts = "t:i:o:s:S:m:f:T:c:p:vh";
+
+ init_config(config);
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 't':
+ config->trs_type = atoi(optarg);
+ break;
+ case 'i':
+ config->num_in_segs = atoi(optarg);
+ break;
+ case 'o':
+ config->num_out_segs = atoi(optarg);
+ break;
+ case 's':
+ config->src_seg_len = atoi(optarg);
+ break;
+ case 'S':
+ config->seg_type = atoi(optarg);
+ break;
+ case 'm':
+ config->compl_mode = atoi(optarg);
+ break;
+ case 'f':
+ config->num_inflight = atoi(optarg);
+ break;
+ case 'T':
+ config->time_sec = atof(optarg);
+ break;
+ case 'c':
+ config->num_workers = atoi(optarg);
+ break;
+ case 'p':
+ config->policy = atoi(optarg);
+ break;
+ case 'v':
+ config->is_verify = true;
+ break;
+ case 'h':
+ print_usage();
+ return PRS_TERM;
+ case '?':
+ default:
+ print_usage();
+ return PRS_NOK;
+ }
+ }
+
+ return check_options(config);
+}
+
+static parse_result_t setup_program(int argc, char **argv, prog_config_t *config)
+{
+ struct sigaction action = { .sa_handler = terminate };
+
+ if (sigemptyset(&action.sa_mask) == -1 || sigaddset(&action.sa_mask, SIGINT) == -1 ||
+ sigaddset(&action.sa_mask, SIGTERM) == -1 ||
+ sigaddset(&action.sa_mask, SIGHUP) == -1 || sigaction(SIGINT, &action, NULL) == -1 ||
+ sigaction(SIGTERM, &action, NULL) == -1 || sigaction(SIGHUP, &action, NULL) == -1) {
+ ODPH_ERR("Error installing signal handler\n");
+ return PRS_NOK;
+ }
+
+ return parse_options(argc, argv, config);
+}
+
+static odp_pool_t get_src_packet_pool(void)
+{
+ odp_pool_param_t param;
+ uint32_t num_pkts_per_worker = ODPH_MAX(prog_conf->num_inflight * prog_conf->num_in_segs,
+ prog_conf->src_cache_size);
+
+ if (prog_conf->src_pool != ODP_POOL_INVALID)
+ return prog_conf->src_pool;
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_PACKET;
+ param.pkt.num = num_pkts_per_worker * prog_conf->num_workers;
+ param.pkt.len = prog_conf->src_seg_len;
+ param.pkt.seg_len = prog_conf->src_seg_len;
+ param.pkt.cache_size = prog_conf->src_cache_size;
+ prog_conf->src_pool = odp_pool_create(PROG_NAME "_src_pkts", &param);
+
+ return prog_conf->src_pool;
+}
+
+static odp_pool_t get_dst_packet_pool(void)
+{
+ odp_pool_param_t param;
+ uint32_t num_pkts_per_worker = ODPH_MAX(prog_conf->num_inflight * prog_conf->num_out_segs,
+ prog_conf->dst_cache_size);
+
+ if (prog_conf->dst_pool != ODP_POOL_INVALID)
+ return prog_conf->dst_pool;
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_PACKET;
+ param.pkt.num = num_pkts_per_worker * prog_conf->num_workers;
+ param.pkt.len = prog_conf->dst_seg_len;
+ param.pkt.seg_len = prog_conf->dst_seg_len;
+ param.pkt.cache_size = prog_conf->dst_cache_size;
+ prog_conf->dst_pool = odp_pool_create(PROG_NAME "_dst_pkts", &param);
+
+ return prog_conf->dst_pool;
+}
+
+static odp_bool_t configure_packets(sd_t *sd)
+{
+ sd->seg.src_pool = get_src_packet_pool();
+
+ if (sd->seg.src_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error creating source packet pool\n");
+ return false;
+ }
+
+ sd->seg.dst_pool = get_dst_packet_pool();
+
+ if (sd->seg.dst_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error creating destination packet pool\n");
+ return false;
+ }
+
+ return true;
+}
+
+static odp_bool_t allocate_packets(sd_t *sd)
+{
+ for (uint32_t i = 0U; i < sd->dma.num_inflight * sd->dma.num_in_segs; ++i) {
+ sd->seg.src_pkt[i] = odp_packet_alloc(sd->seg.src_pool, sd->dma.src_seg_len);
+
+ if (sd->seg.src_pkt[i] == ODP_PACKET_INVALID) {
+ ODPH_ERR("Error allocating source segment packets\n");
+ return false;
+ }
+ }
+
+ for (uint32_t i = 0U; i < sd->dma.num_inflight * sd->dma.num_out_segs; ++i) {
+ sd->seg.dst_pkt[i] = odp_packet_alloc(sd->seg.dst_pool, sd->dma.dst_seg_len);
+
+ if (sd->seg.dst_pkt[i] == ODP_PACKET_INVALID) {
+ ODPH_ERR("Error allocating destination segment packets\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static odp_bool_t setup_packet_segments(sd_t *sd)
+{
+ return configure_packets(sd) &&
+ (sd->seg.seg_type == DENSE_PACKET ? allocate_packets(sd) : true);
+}
+
+static inline void fill_data(uint8_t *data, uint32_t len)
+{
+ memset(data, DATA, len);
+}
+
+static void configure_packet_transfer(sd_t *sd)
+{
+ odp_dma_seg_t *start_src_seg, *start_dst_seg, *seg;
+ uint32_t k = 0U, z = 0U, len;
+ odp_packet_t pkt;
+ odp_dma_transfer_param_t *param;
+
+ for (uint32_t i = 0U; i < sd->dma.num_inflight; ++i) {
+ start_src_seg = &sd->dma.src_seg[k];
+ start_dst_seg = &sd->dma.dst_seg[z];
+
+ for (uint32_t j = 0U; j < sd->dma.num_in_segs; ++j, ++k) {
+ pkt = sd->seg.src_pkt[k];
+ seg = &start_src_seg[j];
+ seg->packet = pkt;
+ seg->offset = 0U;
+ seg->len = sd->dma.src_seg_len;
+
+ if (seg->packet != ODP_PACKET_INVALID)
+ fill_data(odp_packet_data(seg->packet), seg->len);
+ }
+
+ len = sd->dma.num_in_segs * sd->dma.src_seg_len;
+
+ for (uint32_t j = 0U; j < sd->dma.num_out_segs; ++j, ++z) {
+ pkt = sd->seg.dst_pkt[z];
+ seg = &start_dst_seg[j];
+ seg->packet = pkt;
+ seg->offset = 0U;
+ seg->len = ODPH_MIN(len, sd->dma.dst_seg_len);
+ len -= sd->dma.dst_seg_len;
+ }
+
+ param = &sd->dma.infos[i].trs_param;
+ odp_dma_transfer_param_init(param);
+ param->src_format = ODP_DMA_FORMAT_PACKET;
+ param->dst_format = ODP_DMA_FORMAT_PACKET;
+ param->num_src = sd->dma.num_in_segs;
+ param->num_dst = sd->dma.num_out_segs;
+ param->src_seg = start_src_seg;
+ param->dst_seg = start_dst_seg;
+ }
+}
+
+static void free_packets(const sd_t *sd)
+{
+ for (uint32_t i = 0U; i < sd->dma.num_inflight * sd->dma.num_in_segs; ++i) {
+ if (sd->seg.src_pkt[i] != ODP_PACKET_INVALID)
+ odp_packet_free(sd->seg.src_pkt[i]);
+ }
+
+ for (uint32_t i = 0U; i < sd->dma.num_inflight * sd->dma.num_out_segs; ++i) {
+ if (sd->seg.dst_pkt[i] != ODP_PACKET_INVALID)
+ odp_packet_free(sd->seg.dst_pkt[i]);
+ }
+}
+
+static odp_bool_t allocate_memory(sd_t *sd)
+{
+ sd->seg.src_shm = odp_shm_reserve(PROG_NAME "_src_shm", sd->seg.shm_size,
+ ODP_CACHE_LINE_SIZE, 0U);
+ sd->seg.dst_shm = odp_shm_reserve(PROG_NAME "_dst_shm", sd->seg.shm_size,
+ ODP_CACHE_LINE_SIZE, 0U);
+
+ if (sd->seg.src_shm == ODP_SHM_INVALID || sd->seg.dst_shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Error allocating SHM block\n");
+ return false;
+ }
+
+ sd->seg.src = odp_shm_addr(sd->seg.src_shm);
+ sd->seg.dst = odp_shm_addr(sd->seg.dst_shm);
+
+ if (sd->seg.src == NULL || sd->seg.dst == NULL) {
+ ODPH_ERR("Error resolving SHM block address\n");
+ return false;
+ }
+
+ sd->seg.src_high = (uint8_t *)sd->seg.src + sd->seg.shm_size - sd->dma.src_seg_len;
+ sd->seg.dst_high = (uint8_t *)sd->seg.dst + sd->seg.shm_size - sd->dma.dst_seg_len;
+ sd->seg.cur_src = sd->seg.src;
+ sd->seg.cur_dst = sd->seg.dst;
+
+ return true;
+}
+
+static odp_bool_t setup_memory_segments(sd_t *sd)
+{
+ return allocate_memory(sd);
+}
+
+static void configure_address_transfer(sd_t *sd)
+{
+ odp_dma_seg_t *start_src_seg, *start_dst_seg, *seg;
+ uint32_t k = 0U, z = 0U, len;
+ odp_dma_transfer_param_t *param;
+
+ for (uint32_t i = 0U; i < sd->dma.num_inflight; ++i) {
+ start_src_seg = &sd->dma.src_seg[k];
+ start_dst_seg = &sd->dma.dst_seg[z];
+
+ for (uint32_t j = 0U; j < sd->dma.num_in_segs; ++j, ++k) {
+ seg = &start_src_seg[j];
+ seg->addr = sd->seg.seg_type == SPARSE_MEMORY ?
+ NULL : (uint8_t *)sd->seg.src + k * sd->dma.src_seg_len;
+ seg->len = sd->dma.src_seg_len;
+
+ if (seg->addr != NULL)
+ fill_data(seg->addr, seg->len);
+ }
+
+ len = sd->dma.num_in_segs * sd->dma.src_seg_len;
+
+ for (uint32_t j = 0U; j < sd->dma.num_out_segs; ++j, ++z) {
+ seg = &start_dst_seg[j];
+ seg->addr = sd->seg.seg_type == SPARSE_MEMORY ?
+ NULL : (uint8_t *)sd->seg.dst + z * sd->dma.dst_seg_len;
+ seg->len = ODPH_MIN(len, sd->dma.dst_seg_len);
+ len -= sd->dma.dst_seg_len;
+ }
+
+ param = &sd->dma.infos[i].trs_param;
+ odp_dma_transfer_param_init(param);
+ param->src_format = ODP_DMA_FORMAT_ADDR;
+ param->dst_format = ODP_DMA_FORMAT_ADDR;
+ param->num_src = sd->dma.num_in_segs;
+ param->num_dst = sd->dma.num_out_segs;
+ param->src_seg = start_src_seg;
+ param->dst_seg = start_dst_seg;
+ }
+}
+
+static void free_memory(const sd_t *sd)
+{
+ if (sd->seg.src_shm != ODP_SHM_INVALID)
+ (void)odp_shm_free(sd->seg.src_shm);
+
+ if (sd->seg.dst_shm != ODP_SHM_INVALID)
+ (void)odp_shm_free(sd->seg.dst_shm);
+}
+
+static void run_transfer(odp_dma_t handle, trs_info_t *info, stats_t *stats, ver_fn_t ver_fn)
+{
+ odp_time_t start_tm, end_tm;
+ uint64_t start_cc, end_cc, trs_tm, trs_cc;
+ odp_dma_result_t res;
+ int ret;
+
+ start_tm = odp_time_local_strict();
+ start_cc = odp_cpu_cycles();
+ ret = odp_dma_transfer(handle, &info->trs_param, &res);
+ end_cc = odp_cpu_cycles();
+ end_tm = odp_time_local_strict();
+
+ if (odp_unlikely(ret <= 0)) {
+ ++stats->start_errs;
+ } else {
+ trs_tm = odp_time_diff_ns(end_tm, start_tm);
+ stats->max_trs_tm = ODPH_MAX(trs_tm, stats->max_trs_tm);
+ stats->min_trs_tm = ODPH_MIN(trs_tm, stats->min_trs_tm);
+ stats->trs_tm += trs_tm;
+ trs_cc = odp_cpu_cycles_diff(end_cc, start_cc);
+ stats->max_trs_cc = ODPH_MAX(trs_cc, stats->max_trs_cc);
+ stats->min_trs_cc = ODPH_MIN(trs_cc, stats->min_trs_cc);
+ stats->trs_cc += trs_cc;
+ ++stats->trs_cnt;
+ stats->max_start_cc = stats->max_trs_cc;
+ stats->min_start_cc = stats->min_trs_cc;
+ stats->start_cc += trs_cc;
+ ++stats->start_cnt;
+
+ if (odp_unlikely(!res.success)) {
+ ++stats->transfer_errs;
+ } else {
+ ++stats->completed;
+
+ if (ver_fn != NULL)
+ ver_fn(info, stats);
+ }
+ }
+}
+
+static void run_transfers_mt_unsafe(sd_t *sd, stats_t *stats)
+{
+ const uint32_t count = sd->dma.num_inflight;
+ odp_dma_t handle = sd->dma.handle;
+ trs_info_t *infos = sd->dma.infos, *info;
+
+ for (uint32_t i = 0U; i < count; ++i) {
+ info = &infos[i];
+
+ if (sd->prep_trs_fn != NULL)
+ sd->prep_trs_fn(sd, info);
+
+ run_transfer(handle, info, stats, sd->ver_fn);
+ }
+}
+
+static void run_transfers_mt_safe(sd_t *sd, stats_t *stats)
+{
+ const uint32_t count = sd->dma.num_inflight;
+ odp_dma_t handle = sd->dma.handle;
+ trs_info_t *infos = sd->dma.infos, *info;
+
+ for (uint32_t i = 0U; i < count; ++i) {
+ info = &infos[i];
+
+ if (odp_ticketlock_trylock(&info->lock)) {
+ if (sd->prep_trs_fn != NULL)
+ sd->prep_trs_fn(sd, info);
+
+ run_transfer(handle, info, stats, sd->ver_fn);
+ odp_ticketlock_unlock(&info->lock);
+ }
+ }
+}
+
+static odp_bool_t configure_poll_compl(sd_t *sd)
+{
+ odp_dma_compl_param_t *param;
+
+ for (uint32_t i = 0U; i < sd->dma.num_inflight; ++i) {
+ param = &sd->dma.infos[i].compl_param;
+
+ odp_dma_compl_param_init(param);
+ param->compl_mode = mode_map[sd->dma.compl_mode];
+ param->transfer_id = odp_dma_transfer_id_alloc(sd->dma.handle);
+
+ if (param->transfer_id == ODP_DMA_TRANSFER_ID_INVALID) {
+ ODPH_ERR("Error allocating transfer ID\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void poll_transfer(sd_t *sd, trs_info_t *info, stats_t *stats)
+{
+ uint64_t start_cc, end_cc, trs_tm, trs_cc, wait_cc, start_cc_diff;
+ odp_time_t start_tm;
+ odp_dma_t handle = sd->dma.handle;
+ odp_dma_result_t res;
+ int ret;
+
+ if (info->is_running) {
+ start_cc = odp_cpu_cycles();
+ ret = odp_dma_transfer_done(handle, info->compl_param.transfer_id, &res);
+ end_cc = odp_cpu_cycles();
+
+ if (odp_unlikely(ret < 0)) {
+ ++stats->poll_errs;
+ return;
+ }
+
+ ++info->trs_poll_cnt;
+ wait_cc = odp_cpu_cycles_diff(end_cc, start_cc);
+ stats->max_wait_cc = ODPH_MAX(wait_cc, stats->max_wait_cc);
+ stats->min_wait_cc = ODPH_MIN(wait_cc, stats->min_wait_cc);
+ stats->wait_cc += wait_cc;
+ ++stats->wait_cnt;
+
+ if (ret == 0)
+ return;
+
+ trs_tm = odp_time_diff_ns(odp_time_global_strict(), info->trs_start_tm);
+ stats->max_trs_tm = ODPH_MAX(trs_tm, stats->max_trs_tm);
+ stats->min_trs_tm = ODPH_MIN(trs_tm, stats->min_trs_tm);
+ stats->trs_tm += trs_tm;
+ trs_cc = odp_cpu_cycles_diff(odp_cpu_cycles(), info->trs_start_cc);
+ stats->max_trs_cc = ODPH_MAX(trs_cc, stats->max_trs_cc);
+ stats->min_trs_cc = ODPH_MIN(trs_cc, stats->min_trs_cc);
+ stats->trs_cc += trs_cc;
+ stats->trs_poll_cnt += info->trs_poll_cnt;
+ ++stats->trs_cnt;
+
+ if (odp_unlikely(!res.success)) {
+ ++stats->transfer_errs;
+ } else {
+ ++stats->completed;
+
+ if (sd->ver_fn != NULL)
+ sd->ver_fn(info, stats);
+ }
+
+ info->is_running = false;
+ } else {
+ if (sd->prep_trs_fn != NULL)
+ sd->prep_trs_fn(sd, info);
+
+ start_tm = odp_time_global_strict();
+ start_cc = odp_cpu_cycles();
+ ret = odp_dma_transfer_start(handle, &info->trs_param, &info->compl_param);
+ end_cc = odp_cpu_cycles();
+
+ if (odp_unlikely(ret <= 0)) {
+ ++stats->start_errs;
+ } else {
+ info->trs_start_tm = start_tm;
+ info->trs_start_cc = start_cc;
+ info->trs_poll_cnt = 0U;
+ start_cc_diff = odp_cpu_cycles_diff(end_cc, start_cc);
+ stats->max_start_cc = ODPH_MAX(start_cc_diff, stats->max_start_cc);
+ stats->min_start_cc = ODPH_MIN(start_cc_diff, stats->min_start_cc);
+ stats->start_cc += start_cc_diff;
+ ++stats->start_cnt;
+ info->is_running = true;
+ }
+ }
+}
+
+static void poll_transfers_mt_unsafe(sd_t *sd, stats_t *stats)
+{
+ const uint32_t count = sd->dma.num_inflight;
+ trs_info_t *infos = sd->dma.infos;
+
+ for (uint32_t i = 0U; i < count; ++i)
+ poll_transfer(sd, &infos[i], stats);
+}
+
+static void poll_transfers_mt_safe(sd_t *sd, stats_t *stats)
+{
+ const uint32_t count = sd->dma.num_inflight;
+ trs_info_t *infos = sd->dma.infos, *info;
+
+ for (uint32_t i = 0U; i < count; ++i) {
+ info = &infos[i];
+
+ if (odp_ticketlock_trylock(&info->lock)) {
+ poll_transfer(sd, info, stats);
+ odp_ticketlock_unlock(&info->lock);
+ }
+ }
+}
+
+static void drain_poll_transfers(sd_t *sd)
+{
+ const uint32_t count = sd->dma.num_inflight;
+ trs_info_t *infos = sd->dma.infos, *info;
+ odp_dma_t handle = sd->dma.handle;
+ int rc;
+
+ for (uint32_t i = 0U; i < count; ++i) {
+ info = &infos[i];
+
+ if (info->is_running) {
+ do {
+ rc = odp_dma_transfer_done(handle, info->compl_param.transfer_id,
+ NULL);
+ } while (rc == 0);
+ }
+ }
+}
+
+static odp_bool_t configure_event_compl_session(sd_t *sd)
+{
+ odp_thrmask_t zero;
+ odp_dma_pool_param_t pool_param;
+ odp_queue_param_t queue_param;
+
+ odp_thrmask_zero(&zero);
+ sd->grp = odp_schedule_group_create(PROG_NAME "_scd_grp", &zero);
+
+ if (sd->grp == ODP_SCHED_GROUP_INVALID) {
+ ODPH_ERR("Error creating scheduler group for DMA session\n");
+ return false;
+ }
+
+ odp_dma_pool_param_init(&pool_param);
+ pool_param.num = sd->dma.num_inflight;
+ sd->dma.pool = odp_dma_pool_create(PROG_NAME "_dma_evs", &pool_param);
+
+ if (sd->dma.pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error creating DMA event completion pool\n");
+ return false;
+ }
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ queue_param.sched.prio = odp_schedule_default_prio();
+ queue_param.sched.group = sd->grp;
+ sd->dma.compl_q = odp_queue_create(PROG_NAME, &queue_param);
+
+ if (sd->dma.compl_q == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Error creating DMA completion queue\n");
+ return false;
+ }
+
+ return true;
+}
+
+static odp_bool_t configure_event_compl(sd_t *sd)
+{
+ odp_dma_compl_param_t *param;
+ odp_dma_compl_t c_ev;
+
+ for (uint32_t i = 0U; i < sd->dma.num_inflight; ++i) {
+ param = &sd->dma.infos[i].compl_param;
+
+ odp_dma_compl_param_init(param);
+ param->compl_mode = mode_map[sd->dma.compl_mode];
+ c_ev = odp_dma_compl_alloc(sd->dma.pool);
+
+ if (c_ev == ODP_DMA_COMPL_INVALID) {
+ ODPH_ERR("Error allocating completion event\n");
+ return false;
+ }
+
+ param->event = odp_dma_compl_to_event(c_ev);
+ param->queue = sd->dma.compl_q;
+ param->user_ptr = &sd->dma.infos[i];
+ }
+
+ return true;
+}
+
+static odp_bool_t start_initial_transfers(sd_t *sd)
+{
+ odp_time_t start_tm;
+ uint64_t start_cc;
+ trs_info_t *info;
+ int ret;
+
+ for (uint32_t i = 0U; i < sd->dma.num_inflight; ++i) {
+ info = &sd->dma.infos[i];
+
+ if (sd->prep_trs_fn != NULL)
+ sd->prep_trs_fn(sd, info);
+
+ start_tm = odp_time_global_strict();
+ start_cc = odp_cpu_cycles();
+ ret = odp_dma_transfer_start(sd->dma.handle, &info->trs_param, &info->compl_param);
+
+ if (ret <= 0) {
+ ODPH_ERR("Error starting DMA transfer\n");
+ return false;
+ }
+
+ info->trs_start_tm = start_tm;
+ info->trs_start_cc = start_cc;
+ }
+
+ return true;
+}
+
+static void wait_compl_event(sd_t *sd, stats_t *stats)
+{
+ uint64_t start_cc, end_cc, wait_cc, trs_tm, trs_cc, start_cc_diff;
+ odp_time_t start_tm;
+ odp_event_t ev;
+ odp_dma_result_t res;
+ trs_info_t *info;
+ int ret;
+
+ start_cc = odp_cpu_cycles();
+ ev = odp_schedule(NULL, odp_schedule_wait_time(ODP_TIME_SEC_IN_NS));
+ end_cc = odp_cpu_cycles();
+
+ if (odp_unlikely(ev == ODP_EVENT_INVALID)) {
+ ++stats->scheduler_timeouts;
+ return;
+ }
+
+ odp_dma_compl_result(odp_dma_compl_from_event(ev), &res);
+ info = res.user_ptr;
+ trs_tm = odp_time_diff_ns(odp_time_global_strict(), info->trs_start_tm);
+ stats->max_trs_tm = ODPH_MAX(trs_tm, stats->max_trs_tm);
+ stats->min_trs_tm = ODPH_MIN(trs_tm, stats->min_trs_tm);
+ stats->trs_tm += trs_tm;
+ trs_cc = odp_cpu_cycles_diff(odp_cpu_cycles(), info->trs_start_cc);
+ stats->max_trs_cc = ODPH_MAX(trs_cc, stats->max_trs_cc);
+ stats->min_trs_cc = ODPH_MIN(trs_cc, stats->min_trs_cc);
+ stats->trs_cc += trs_cc;
+ ++stats->trs_cnt;
+ wait_cc = odp_cpu_cycles_diff(end_cc, start_cc);
+ stats->max_wait_cc = ODPH_MAX(wait_cc, stats->max_wait_cc);
+ stats->min_wait_cc = ODPH_MIN(wait_cc, stats->min_wait_cc);
+ stats->wait_cc += wait_cc;
+ ++stats->wait_cnt;
+
+ if (odp_unlikely(!res.success)) {
+ ++stats->transfer_errs;
+ } else {
+ ++stats->completed;
+
+ if (sd->ver_fn != NULL)
+ sd->ver_fn(info, stats);
+ }
+
+ if (sd->prep_trs_fn != NULL)
+ sd->prep_trs_fn(sd, info);
+
+ start_tm = odp_time_global_strict();
+ start_cc = odp_cpu_cycles();
+ ret = odp_dma_transfer_start(sd->dma.handle, &info->trs_param, &info->compl_param);
+ end_cc = odp_cpu_cycles();
+
+ if (odp_unlikely(ret <= 0)) {
+ ++stats->start_errs;
+ } else {
+ info->trs_start_tm = start_tm;
+ info->trs_start_cc = start_cc;
+ start_cc_diff = odp_cpu_cycles_diff(end_cc, start_cc);
+ stats->max_start_cc = ODPH_MAX(start_cc_diff, stats->max_start_cc);
+ stats->min_start_cc = ODPH_MIN(start_cc_diff, stats->min_start_cc);
+ stats->start_cc += start_cc_diff;
+ ++stats->start_cnt;
+ }
+}
+
+static void drain_compl_events(ODP_UNUSED sd_t *sd)
+{
+ odp_event_t ev;
+
+ while (true) {
+ ev = odp_schedule(NULL, odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS));
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+ }
+}
+
+static void run_memcpy(trs_info_t *info, stats_t *stats, ver_fn_t ver_fn)
+{
+ odp_time_t start_tm;
+ uint64_t start_cc, end_cc, trs_tm, trs_cc;
+ const odp_dma_transfer_param_t *param = &info->trs_param;
+ uint32_t tot_len, src_len, dst_len, min_len, len, i = 0U, j = 0U, src_off = 0U,
+ dst_off = 0U, src_rem, dst_rem;
+ const odp_bool_t is_addr = param->src_format == ODP_DMA_FORMAT_ADDR;
+ uint8_t *src_data, *dst_data;
+
+ /* Test data is configured so that total source and total destination sizes always match,
+ * all source and all destination segments have the same size and in case of packets,
+ * there's always just a single segment. */
+ tot_len = param->num_src * param->src_seg->len;
+ src_len = param->src_seg->len;
+ dst_len = param->dst_seg->len;
+ min_len = ODPH_MIN(src_len, dst_len);
+ len = min_len;
+ start_tm = odp_time_local_strict();
+ start_cc = odp_cpu_cycles();
+
+ while (tot_len > 0U) {
+ if (is_addr) {
+ src_data = param->src_seg[i].addr;
+ dst_data = param->dst_seg[j].addr;
+ } else {
+ src_data = odp_packet_data(param->src_seg[i].packet);
+ dst_data = odp_packet_data(param->dst_seg[j].packet);
+ }
+
+ memcpy(dst_data + dst_off, src_data + src_off, len);
+ dst_off += len;
+ src_off += len;
+ src_rem = src_len - src_off;
+ dst_rem = dst_len - dst_off;
+ tot_len -= len;
+ len = ODPH_MIN(ODPH_MAX(src_rem, dst_rem), min_len);
+
+ if (dst_rem > 0U) {
+ ++i;
+ src_off = 0U;
+ } else {
+ ++j;
+ dst_off = 0U;
+ }
+ }
+
+ end_cc = odp_cpu_cycles();
+ trs_tm = odp_time_diff_ns(odp_time_local_strict(), start_tm);
+ stats->max_trs_tm = ODPH_MAX(trs_tm, stats->max_trs_tm);
+ stats->min_trs_tm = ODPH_MIN(trs_tm, stats->min_trs_tm);
+ stats->trs_tm += trs_tm;
+ trs_cc = odp_cpu_cycles_diff(end_cc, start_cc);
+ stats->max_trs_cc = ODPH_MAX(trs_cc, stats->max_trs_cc);
+ stats->min_trs_cc = ODPH_MIN(trs_cc, stats->min_trs_cc);
+ stats->trs_cc += trs_cc;
+ ++stats->trs_cnt;
+ stats->max_start_cc = stats->max_trs_cc;
+ stats->min_start_cc = stats->min_trs_cc;
+ stats->start_cc += trs_cc;
+ ++stats->start_cnt;
+ ++stats->completed;
+
+ if (ver_fn != NULL)
+ ver_fn(info, stats);
+}
+
+static void run_memcpy_mt_unsafe(sd_t *sd, stats_t *stats)
+{
+ const uint32_t count = sd->dma.num_inflight;
+ trs_info_t *infos = sd->dma.infos, *info;
+
+ for (uint32_t i = 0U; i < count; ++i) {
+ info = &infos[i];
+
+ if (sd->prep_trs_fn != NULL)
+ sd->prep_trs_fn(sd, info);
+
+ run_memcpy(info, stats, sd->ver_fn);
+ }
+}
+
+static void run_memcpy_mt_safe(sd_t *sd, stats_t *stats)
+{
+ const uint32_t count = sd->dma.num_inflight;
+ trs_info_t *infos = sd->dma.infos, *info;
+
+ for (uint32_t i = 0U; i < count; ++i) {
+ info = &infos[i];
+
+ if (odp_ticketlock_trylock(&info->lock)) {
+ if (sd->prep_trs_fn != NULL)
+ sd->prep_trs_fn(sd, info);
+
+ run_memcpy(info, stats, sd->ver_fn);
+ odp_ticketlock_unlock(&info->lock);
+ }
+ }
+}
+
+static void setup_api(prog_config_t *config)
+{
+ if (config->seg_type == DENSE_PACKET || config->seg_type == SPARSE_PACKET) {
+ config->api.setup_fn = setup_packet_segments;
+ config->api.trs_fn = configure_packet_transfer;
+ config->api.free_fn = free_packets;
+ } else {
+ config->api.setup_fn = setup_memory_segments;
+ config->api.trs_fn = configure_address_transfer;
+ config->api.free_fn = free_memory;
+ }
+
+ if (config->trs_type == SYNC_DMA) {
+ config->api.session_cfg_fn = NULL;
+ config->api.compl_fn = NULL;
+ config->api.bootstrap_fn = NULL;
+ config->api.wait_fn = config->num_workers == 1 || config->policy == MANY ?
+ run_transfers_mt_unsafe : run_transfers_mt_safe;
+ config->api.drain_fn = NULL;
+ } else if (config->trs_type == ASYNC_DMA) {
+ if (config->compl_mode == POLL) {
+ config->api.session_cfg_fn = NULL;
+ config->api.compl_fn = configure_poll_compl;
+ config->api.bootstrap_fn = NULL;
+ config->api.wait_fn = config->num_workers == 1 || config->policy == MANY ?
+ poll_transfers_mt_unsafe : poll_transfers_mt_safe;
+ config->api.drain_fn = drain_poll_transfers;
+ } else {
+ config->api.session_cfg_fn = configure_event_compl_session;
+ config->api.compl_fn = configure_event_compl;
+ config->api.bootstrap_fn = start_initial_transfers;
+ config->api.wait_fn = wait_compl_event;
+ config->api.drain_fn = drain_compl_events;
+ }
+ } else {
+ config->api.session_cfg_fn = NULL;
+ config->api.compl_fn = NULL;
+ config->api.bootstrap_fn = NULL;
+ config->api.wait_fn = config->num_workers == 1 || config->policy == MANY ?
+ run_memcpy_mt_unsafe : run_memcpy_mt_safe;
+ config->api.drain_fn = NULL;
+ }
+}
+
+static void prepare_packet_transfer(sd_t *sd, trs_info_t *info)
+{
+ odp_dma_transfer_param_t *param = &info->trs_param;
+ odp_dma_seg_t *seg;
+
+ for (uint32_t i = 0U; i < param->num_src; ++i) {
+ seg = &param->src_seg[i];
+
+ if (odp_likely(seg->packet != ODP_PACKET_INVALID))
+ odp_packet_free(seg->packet);
+
+ seg->packet = odp_packet_alloc(sd->seg.src_pool, seg->len);
+
+ if (odp_unlikely(seg->packet == ODP_PACKET_INVALID))
+ /* There should always be enough packets. */
+ ODPH_ABORT("Failed to allocate packet, aborting\n");
+
+ fill_data(odp_packet_data(seg->packet), seg->len);
+ }
+
+ for (uint32_t i = 0U; i < param->num_dst; ++i) {
+ seg = &param->dst_seg[i];
+
+ if (odp_likely(seg->packet != ODP_PACKET_INVALID))
+ odp_packet_free(seg->packet);
+
+ seg->packet = odp_packet_alloc(sd->seg.dst_pool, seg->len);
+
+ if (odp_unlikely(seg->packet == ODP_PACKET_INVALID))
+ /* There should always be enough packets. */
+ ODPH_ABORT("Failed to allocate packet, aborting\n");
+ }
+}
+
+static void prepare_address_transfer(sd_t *sd, trs_info_t *info)
+{
+ odp_dma_transfer_param_t *param = &info->trs_param;
+ uint8_t *addr = sd->seg.cur_src;
+ odp_dma_seg_t *seg;
+
+ for (uint32_t i = 0U; i < param->num_src; ++i) {
+ seg = &param->src_seg[i];
+
+ if (odp_unlikely(addr > (uint8_t *)sd->seg.src_high))
+ addr = sd->seg.src;
+
+ seg->addr = addr;
+ addr += sd->dma.src_seg_len;
+ fill_data(seg->addr, seg->len);
+ }
+
+ sd->seg.cur_src = addr + ODP_CACHE_LINE_SIZE;
+ addr = sd->seg.cur_dst;
+
+ for (uint32_t i = 0U; i < param->num_dst; ++i) {
+ if (odp_unlikely(addr > (uint8_t *)sd->seg.dst_high))
+ addr = sd->seg.dst;
+
+ param->dst_seg[i].addr = addr;
+ addr += sd->dma.dst_seg_len;
+ }
+
+ sd->seg.cur_dst = addr + ODP_CACHE_LINE_SIZE;
+}
+
+static void verify_transfer(trs_info_t *info, stats_t *stats)
+{
+ odp_dma_transfer_param_t *param = &info->trs_param;
+ odp_dma_seg_t *seg;
+ const odp_bool_t is_addr = param->dst_format == ODP_DMA_FORMAT_ADDR;
+ uint8_t *data;
+
+ for (uint32_t i = 0U; i < param->num_dst; ++i) {
+ seg = &param->dst_seg[i];
+ data = is_addr ? seg->addr : odp_packet_data(seg->packet);
+
+ for (uint32_t j = 0U; j < seg->len; ++j)
+ if (odp_unlikely(data[j] != DATA)) {
+ ++stats->data_errs;
+ return;
+ }
+ }
+}
+
+static odp_bool_t setup_session_descriptors(prog_config_t *config)
+{
+ sd_t *sd;
+ const odp_dma_param_t dma_params = {
+ .direction = ODP_DMA_MAIN_TO_MAIN,
+ .type = ODP_DMA_TYPE_COPY,
+ .compl_mode_mask = config->compl_mode_mask,
+ .mt_mode = config->num_workers == 1 || config->policy == MANY ?
+ ODP_DMA_MT_SERIAL : ODP_DMA_MT_SAFE,
+ .order = ODP_DMA_ORDER_NONE };
+
+ for (uint32_t i = 0U; i < config->num_sessions; ++i) {
+ char name[ODP_DMA_NAME_LEN];
+
+ sd = &config->sds[i];
+ sd->dma.num_in_segs = config->num_in_segs;
+ sd->dma.num_out_segs = config->num_out_segs;
+ sd->dma.src_seg_len = config->src_seg_len;
+ sd->dma.dst_seg_len = config->dst_seg_len;
+ sd->dma.num_inflight = config->num_inflight;
+ sd->dma.trs_type = config->trs_type;
+ sd->dma.compl_mode = config->compl_mode;
+ snprintf(name, sizeof(name), PROG_NAME "_dma_%u", i);
+ sd->dma.handle = odp_dma_create(name, &dma_params);
+
+ if (sd->dma.handle == ODP_DMA_INVALID) {
+ ODPH_ERR("Error creating DMA session\n");
+ return false;
+ }
+
+ if (config->api.session_cfg_fn != NULL && !config->api.session_cfg_fn(sd))
+ return false;
+
+ sd->seg.shm_size = config->shm_size;
+ sd->seg.seg_type = config->seg_type;
+ sd->prep_trs_fn = config->seg_type == SPARSE_PACKET ? prepare_packet_transfer :
+ config->seg_type == SPARSE_MEMORY ?
+ prepare_address_transfer : NULL;
+ sd->ver_fn = config->is_verify ? verify_transfer : NULL;
+ }
+
+ return true;
+}
+
+static odp_bool_t setup_data(prog_config_t *config)
+{
+ sd_t *sd;
+
+ for (uint32_t i = 0U; i < config->num_sessions; ++i) {
+ sd = &config->sds[i];
+
+ if (!config->api.setup_fn(sd))
+ return false;
+
+ config->api.trs_fn(sd);
+
+ if (config->api.compl_fn != NULL && !config->api.compl_fn(sd))
+ return false;
+ }
+
+ return true;
+}
+
+static int transfer(void *args)
+{
+ thread_config_t *thr_config = args;
+ prog_config_t *prog_config = thr_config->prog_config;
+ sd_t *sd = thr_config->sd;
+ stats_t *stats = &thr_config->stats;
+ test_api_t *api = &prog_conf->api;
+ odp_thrmask_t mask;
+ odp_time_t start_tm;
+
+ odp_barrier_wait(&prog_config->init_barrier);
+
+ if (sd->grp != ODP_SCHED_GROUP_INVALID) {
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, odp_thread_id());
+
+ if (odp_schedule_group_join(sd->grp, &mask) < 0) {
+ ODPH_ERR("Error joining scheduler group\n");
+ goto out;
+ }
+ }
+
+ start_tm = odp_time_local_strict();
+
+ while (odp_atomic_load_u32(&prog_config->is_running))
+ api->wait_fn(sd, stats);
+
+ thr_config->stats.tot_tm = odp_time_diff_ns(odp_time_local_strict(), start_tm);
+
+ if (api->drain_fn != NULL)
+ api->drain_fn(sd);
+
+out:
+ odp_barrier_wait(&prog_config->term_barrier);
+
+ return 0;
+}
+
+static odp_bool_t setup_workers(prog_config_t *config)
+{
+ odp_cpumask_t cpumask;
+ int num_workers;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_params[config->num_workers], *thr_param;
+ thread_config_t *thr_config;
+ sd_t *sd;
+
+ /* Barrier init count for control and worker. */
+ odp_barrier_init(&config->init_barrier, config->num_workers + 1);
+ odp_barrier_init(&config->term_barrier, config->num_workers);
+ num_workers = odp_cpumask_default_worker(&cpumask, config->num_workers);
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = config->odp_instance;
+ thr_common.cpumask = &cpumask;
+
+ for (int i = 0; i < config->num_workers; ++i) {
+ thr_param = &thr_params[i];
+ thr_config = &config->thread_config[i];
+ sd = config->policy == SINGLE ? &config->sds[0U] : &config->sds[i];
+
+ odph_thread_param_init(thr_param);
+ thr_param->start = transfer;
+ thr_param->thr_type = ODP_THREAD_WORKER;
+ thr_config->prog_config = config;
+ thr_config->sd = sd;
+ thr_param->arg = thr_config;
+ }
+
+ num_workers = odph_thread_create(config->threads, &thr_common, thr_params, num_workers);
+
+ if (num_workers != config->num_workers) {
+ ODPH_ERR("Error configuring worker threads\n");
+ return false;
+ }
+
+ for (uint32_t i = 0U; i < config->num_sessions; ++i) {
+ if (config->api.bootstrap_fn != NULL && !config->api.bootstrap_fn(&config->sds[i]))
+ return false;
+ }
+
+ odp_barrier_wait(&config->init_barrier);
+
+ return true;
+}
+
+static odp_bool_t setup_test(prog_config_t *config)
+{
+ setup_api(config);
+
+ return setup_session_descriptors(config) && setup_data(config) && setup_workers(config);
+}
+
+static void stop_test(prog_config_t *config)
+{
+ (void)odph_thread_join(config->threads, config->num_workers);
+}
+
+static void teardown_data(const sd_t *sd, void (*free_fn)(const sd_t *sd))
+{
+ const odp_dma_compl_param_t *compl_param;
+
+ for (uint32_t i = 0U; i < MAX_SEGS; ++i) {
+ compl_param = &sd->dma.infos[i].compl_param;
+
+ if (compl_param->transfer_id != ODP_DMA_TRANSFER_ID_INVALID)
+ odp_dma_transfer_id_free(sd->dma.handle, compl_param->transfer_id);
+
+ if (compl_param->event != ODP_EVENT_INVALID)
+ odp_event_free(compl_param->event);
+ }
+
+ free_fn(sd);
+}
+
+static void teardown_test(prog_config_t *config)
+{
+ sd_t *sd;
+
+ for (uint32_t i = 0U; i < config->num_sessions; ++i) {
+ sd = &config->sds[i];
+ teardown_data(sd, config->api.free_fn);
+
+ if (sd->dma.compl_q != ODP_QUEUE_INVALID)
+ (void)odp_queue_destroy(sd->dma.compl_q);
+
+ if (sd->dma.pool != ODP_POOL_INVALID)
+ (void)odp_pool_destroy(sd->dma.pool);
+
+ if (sd->grp != ODP_SCHED_GROUP_INVALID)
+ (void)odp_schedule_group_destroy(sd->grp);
+
+ if (sd->dma.handle != ODP_DMA_INVALID)
+ (void)odp_dma_destroy(sd->dma.handle);
+ }
+
+ if (config->src_pool != ODP_POOL_INVALID)
+ (void)odp_pool_destroy(config->src_pool);
+
+ if (config->dst_pool != ODP_POOL_INVALID)
+ (void)odp_pool_destroy(config->dst_pool);
+}
+
+static void print_humanised(uint64_t value, const char *type)
+{
+ if (value > GIGAS)
+ printf("%.2f G%s\n", (double)value / GIGAS, type);
+ else if (value > MEGAS)
+ printf("%.2f M%s\n", (double)value / MEGAS, type);
+ else if (value > KILOS)
+ printf("%.2f k%s\n", (double)value / KILOS, type);
+ else
+ printf("%" PRIu64 " %s\n", value, type);
+}
+
+static void print_stats(const prog_config_t *config)
+{
+ const stats_t *stats;
+ uint64_t data_cnt = config->num_in_segs * config->src_seg_len, tot_completed = 0U,
+ tot_tm = 0U, tot_trs_tm = 0U, tot_trs_cc = 0U, tot_trs_cnt = 0U, tot_min_tm = UINT64_MAX,
+ tot_max_tm = 0U, tot_min_cc = UINT64_MAX, tot_max_cc = 0U, avg_start_cc, avg_wait_cc;
+ double avg_tot_tm;
+
+ printf("\n======================\n\n"
+ "DMA performance test done\n\n"
+ " mode: %s\n"
+ " input segment count: %u\n"
+ " output segment count: %u\n"
+ " segment length: %u\n"
+ " segment type: %s\n"
+ " inflight count: %u\n"
+ " session policy: %s\n\n",
+ config->trs_type == SYNC_DMA ? "DMA synchronous" :
+ config->trs_type == ASYNC_DMA && config->compl_mode == POLL ?
+ "DMA asynchronous-poll" :
+ config->trs_type == ASYNC_DMA && config->compl_mode == EVENT ?
+ "DMA asynchronous-event" : "SW", config->num_in_segs,
+ config->num_out_segs, config->src_seg_len,
+ config->seg_type == DENSE_PACKET ? "dense packet" :
+ config->seg_type == SPARSE_PACKET ? "sparse packet" :
+ config->seg_type == DENSE_MEMORY ? "dense memory" : "sparse memory",
+ config->num_inflight, config->policy == SINGLE ? "shared" : "per-worker");
+
+ for (int i = 0; i < config->num_workers; ++i) {
+ stats = &config->thread_config[i].stats;
+ tot_completed += stats->completed;
+ tot_tm += stats->tot_tm;
+ tot_trs_tm += stats->trs_tm;
+ tot_trs_cc += stats->trs_cc;
+ tot_trs_cnt += stats->trs_cnt;
+ tot_min_tm = ODPH_MIN(tot_min_tm, stats->min_trs_tm);
+ tot_max_tm = ODPH_MAX(tot_max_tm, stats->max_trs_tm);
+ tot_min_cc = ODPH_MIN(tot_min_cc, stats->min_trs_cc);
+ tot_max_cc = ODPH_MAX(tot_max_cc, stats->max_trs_cc);
+
+ printf(" worker %d:\n", i);
+ printf(" successful transfers: %" PRIu64 "\n"
+ " start errors: %" PRIu64 "\n",
+ stats->completed, stats->start_errs);
+
+ if (config->trs_type == ASYNC_DMA) {
+ if (config->compl_mode == POLL)
+ printf(" poll errors: %" PRIu64 "\n",
+ stats->poll_errs);
+ else
+ printf(" scheduler timeouts: %" PRIu64 "\n",
+ stats->scheduler_timeouts);
+ }
+
+ printf(" transfer errors: %" PRIu64 "\n", stats->transfer_errs);
+
+ if (config->is_verify)
+ printf(" data errors: %" PRIu64 "\n", stats->data_errs);
+
+ printf(" run time: %" PRIu64 " ns\n", stats->tot_tm);
+
+ if (config->policy == MANY) {
+ printf(" session:\n"
+ " average time per transfer: %" PRIu64 " "
+ "(min: %" PRIu64 ", max: %" PRIu64 ") ns\n"
+ " average cycles per transfer: %" PRIu64 " "
+ "(min: %" PRIu64 ", max: %" PRIu64 ")\n"
+ " ops: ",
+ stats->trs_cnt > 0U ? stats->trs_tm / stats->trs_cnt : 0U,
+ stats->trs_cnt > 0U ? stats->min_trs_tm : 0U,
+ stats->trs_cnt > 0U ? stats->max_trs_tm : 0U,
+ stats->trs_cnt > 0U ? stats->trs_cc / stats->trs_cnt : 0U,
+ stats->trs_cnt > 0U ? stats->min_trs_cc : 0U,
+ stats->trs_cnt > 0U ? stats->max_trs_cc : 0U);
+ print_humanised(stats->completed /
+ ((double)stats->tot_tm / ODP_TIME_SEC_IN_NS),
+ "OPS");
+ printf(" speed: ");
+ print_humanised(stats->completed * data_cnt /
+ ((double)stats->tot_tm / ODP_TIME_SEC_IN_NS), "B/s");
+ }
+
+ avg_start_cc = stats->start_cnt > 0U ? stats->start_cc / stats->start_cnt : 0U;
+ printf(" average cycles breakdown:\n");
+
+ if (config->trs_type == SYNC_DMA) {
+ printf(" odp_dma_transfer(): %" PRIu64 " "
+ "(min: %" PRIu64 ", max: %" PRIu64 ")\n", avg_start_cc,
+ avg_start_cc > 0U ? stats->min_start_cc : 0U,
+ avg_start_cc > 0U ? stats->max_start_cc : 0U);
+ } else if (config->trs_type == SW_COPY) {
+ printf(" memcpy(): %" PRIu64 " "
+ "(min: %" PRIu64 ", max: %" PRIu64 ")\n", avg_start_cc,
+ avg_start_cc > 0U ? stats->min_start_cc : 0U,
+ avg_start_cc > 0U ? stats->max_start_cc : 0U);
+ } else {
+ printf(" odp_dma_transfer_start(): %" PRIu64 " "
+ "(min: %" PRIu64 ", max: %" PRIu64 ")\n", avg_start_cc,
+ avg_start_cc > 0U ? stats->min_start_cc : 0U,
+ avg_start_cc > 0U ? stats->max_start_cc : 0U);
+
+ avg_wait_cc = stats->wait_cnt > 0U ? stats->wait_cc / stats->wait_cnt : 0U;
+
+ if (config->compl_mode == POLL) {
+ printf(" odp_dma_transfer_done(): %" PRIu64 ""
+ " (min: %" PRIu64 ", max: %" PRIu64 ", x%" PRIu64 ""
+ " per transfer)\n", avg_wait_cc,
+ avg_wait_cc > 0U ? stats->min_wait_cc : 0U,
+ avg_wait_cc > 0U ? stats->max_wait_cc : 0U,
+ stats->trs_cnt > 0U ?
+ stats->trs_poll_cnt / stats->trs_cnt : 0U);
+ } else {
+ printf(" odp_schedule(): %" PRIu64 " "
+ " (min: %" PRIu64 ", max: %" PRIu64 ")\n", avg_wait_cc,
+ avg_wait_cc > 0U ? stats->min_wait_cc : 0U,
+ avg_wait_cc > 0U ? stats->max_wait_cc : 0U);
+ }
+ }
+
+ printf("\n");
+ }
+
+ avg_tot_tm = (double)tot_tm / config->num_workers / ODP_TIME_SEC_IN_NS;
+ printf(" total:\n"
+ " average time per transfer: %" PRIu64 " (min: %" PRIu64
+ ", max: %" PRIu64 ") ns\n"
+ " average cycles per transfer: %" PRIu64 " (min: %" PRIu64
+ ", max: %" PRIu64 ")\n"
+ " ops: ",
+ tot_trs_cnt > 0U ? tot_trs_tm / tot_trs_cnt : 0U,
+ tot_trs_cnt > 0U ? tot_min_tm : 0U,
+ tot_trs_cnt > 0U ? tot_max_tm : 0U,
+ tot_trs_cnt > 0U ? tot_trs_cc / tot_trs_cnt : 0U,
+ tot_trs_cnt > 0U ? tot_min_cc : 0U,
+ tot_trs_cnt > 0U ? tot_max_cc : 0U);
+ print_humanised(avg_tot_tm > 0U ? tot_completed / avg_tot_tm : 0U, "OPS");
+ printf(" speed: ");
+ print_humanised(avg_tot_tm > 0U ? tot_completed * data_cnt / avg_tot_tm : 0U, "B/s");
+ printf("\n");
+ printf("======================\n");
+}
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t odph_opts;
+ odp_init_t init_param;
+ odp_instance_t odp_instance;
+ odp_shm_t shm_cfg = ODP_SHM_INVALID;
+ parse_result_t parse_res;
+ int ret = EXIT_SUCCESS;
+
+ argc = odph_parse_options(argc, argv);
+
+ if (odph_options(&odph_opts)) {
+ ODPH_ERR("Error while reading ODP helper options, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = odph_opts.mem_model;
+
+ if (odp_init_global(&odp_instance, &init_param, NULL)) {
+ ODPH_ERR("ODP global init failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_init_local(odp_instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("ODP local init failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ shm_cfg = odp_shm_reserve(PROG_NAME "_cfg", sizeof(prog_config_t), ODP_CACHE_LINE_SIZE,
+ 0U);
+
+ if (shm_cfg == ODP_SHM_INVALID) {
+ ODPH_ERR("Error reserving shared memory\n");
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ prog_conf = odp_shm_addr(shm_cfg);
+
+ if (prog_conf == NULL) {
+ ODPH_ERR("Error resolving shared memory address\n");
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ parse_res = setup_program(argc, argv, prog_conf);
+
+ if (parse_res == PRS_NOK) {
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ if (parse_res == PRS_TERM) {
+ ret = EXIT_SUCCESS;
+ goto out;
+ }
+
+ if (parse_res == PRS_NOT_SUP) {
+ ret = EXIT_NOT_SUP;
+ goto out;
+ }
+
+ if (odp_schedule_config(NULL) < 0) {
+ ODPH_ERR("Error configuring scheduler\n");
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ prog_conf->odp_instance = odp_instance;
+ odp_atomic_init_u32(&prog_conf->is_running, 1U);
+
+ if (!setup_test(prog_conf)) {
+ ret = EXIT_FAILURE;
+ goto out_test;
+ }
+
+ if (prog_conf->time_sec > 0.001) {
+ struct timespec ts;
+
+ ts.tv_sec = prog_conf->time_sec;
+ ts.tv_nsec = (prog_conf->time_sec - ts.tv_sec) * ODP_TIME_SEC_IN_NS;
+ nanosleep(&ts, NULL);
+ odp_atomic_store_u32(&prog_conf->is_running, 0U);
+ }
+
+ stop_test(prog_conf);
+ print_stats(prog_conf);
+
+out_test:
+ /* Release all resources that have been allocated during 'setup_test()'. */
+ teardown_test(prog_conf);
+
+out:
+ if (shm_cfg != ODP_SHM_INVALID)
+ (void)odp_shm_free(shm_cfg);
+
+ if (odp_term_local()) {
+ ODPH_ERR("ODP local terminate failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(odp_instance)) {
+ ODPH_ERR("ODP global terminate failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return ret;
+}
diff --git a/test/performance/odp_dma_perf_run.sh b/test/performance/odp_dma_perf_run.sh
new file mode 100755
index 000000000..31948e40a
--- /dev/null
+++ b/test/performance/odp_dma_perf_run.sh
@@ -0,0 +1,74 @@
+#!/bin/sh
+#
+# Copyright (c) 2022-2023, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+TEST_DIR="${TEST_DIR:-$(dirname $0)}"
+BIN_NAME=odp_dma_perf
+SEGC=0
+SEGS=1024
+INFL=1
+TIME=0.1
+TESTS_RUN=0
+
+check_result()
+{
+ if [ $1 -eq 0 ]; then
+ TESTS_RUN=`expr $TESTS_RUN + 1`
+ elif [ $1 -eq 1 ]; then
+ echo "Test FAILED, exiting"
+ exit 1
+ else
+ echo "Test SKIPPED"
+ fi
+}
+
+echo "odp_dma_perf: synchronous DMA transfer 1"
+echo "===================================="
+
+${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 0 -i $SEGC -o $SEGC -s $SEGS -S 0 -f $INFL -T $TIME -v
+
+check_result $?
+
+echo "odp_dma_perf: synchronous DMA transfer 2"
+echo "===================================="
+
+${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 0 -i $SEGC -o $SEGC -s $SEGS -S 1 -f $INFL -T $TIME -v
+
+check_result $?
+
+echo "odp_dma_perf: asynchronous DMA transfer 1"
+echo "====================================="
+
+${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 1 -i $SEGC -o $SEGC -s $SEGS -S 2 -m 0 -f $INFL -T $TIME -v
+
+check_result $?
+
+echo "odp_dma_perf: asynchronous DMA transfer 2"
+echo "====================================="
+
+${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 1 -i $SEGC -o $SEGC -s $SEGS -S 3 -m 1 -f $INFL -T $TIME -v
+
+check_result $?
+
+echo "odp_dma_perf: SW transfer 1"
+echo "====================================="
+
+${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 2 -i $SEGC -o $SEGC -s $SEGS -S 0 -f $INFL -T $TIME -v
+
+check_result $?
+
+echo "odp_dma_perf: SW transfer 2"
+echo "====================================="
+
+${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 2 -i $SEGC -o $SEGC -s $SEGS -S 2 -f $INFL -T $TIME -v
+
+check_result $?
+
+if [ $TESTS_RUN -eq 0 ]; then
+ exit 77
+fi
+
+exit 0
diff --git a/test/performance/odp_dmafwd.c b/test/performance/odp_dmafwd.c
new file mode 100644
index 000000000..694973ce0
--- /dev/null
+++ b/test/performance/odp_dmafwd.c
@@ -0,0 +1,1475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+/**
+ * @example odp_dmafwd.c
+ *
+ * This tester application can be used to profile the performance of an ODP DMA implementation.
+ * Tester workflow consists of packet reception, copy and forwarding steps. Packets are first
+ * received from configured interfaces after which packets are copied, either with plain SW memory
+ * copy or with DMA offload copy. Finally, copied packets are echoed back to the sender(s).
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <time.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define EXIT_NOT_SUP 2
+#define PROG_NAME "odp_dmafwd"
+#define DELIMITER ","
+
+enum {
+ SW_COPY = 0U,
+ DMA_COPY_EV,
+ DMA_COPY_POLL
+};
+
+#define DEF_CPY_TYPE SW_COPY
+#define DEF_CNT 32768U
+#define DEF_LEN 1024U
+#define DEF_WORKERS 1U
+#define DEF_TIME 0U
+
+#define MAX_IFS 2U
+#define MAX_OUT_QS 32U
+#define MAX_BURST 32U
+#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1)
+
+#define DIV_IF(a, b) ((b) > 0U ? ((a) / (b)) : 0U)
+
+ODP_STATIC_ASSERT(MAX_IFS < UINT8_MAX, "Too large maximum interface count");
+ODP_STATIC_ASSERT(MAX_OUT_QS < UINT8_MAX, "Too large maximum output queue count");
+
+typedef struct {
+ uint32_t burst_size;
+ uint32_t num_pkts;
+ uint32_t pkt_len;
+ uint32_t cache_size;
+} dynamic_defs_t;
+
+typedef enum {
+ PRS_OK,
+ PRS_NOK,
+ PRS_TERM,
+ PRS_NOT_SUP
+} parse_result_t;
+
+typedef struct prog_config_s prog_config_t;
+
+typedef struct {
+ uint64_t copy_errs;
+ uint64_t trs;
+ uint64_t start_errs;
+ uint64_t trs_errs;
+ uint64_t buf_alloc_errs;
+ uint64_t compl_alloc_errs;
+ uint64_t pkt_alloc_errs;
+ uint64_t trs_poll_errs;
+ uint64_t trs_polled;
+ uint64_t fwd_pkts;
+ uint64_t discards;
+ uint64_t sched_cc;
+ uint64_t tot_cc;
+ uint64_t sched_rounds;
+} stats_t;
+
+typedef struct ODP_ALIGNED_CACHE {
+ prog_config_t *prog_config;
+ odp_dma_t dma_handle;
+ odp_pool_t compl_pool;
+ odp_pool_t copy_pool;
+ odp_pool_t trs_pool;
+ odp_queue_t compl_q;
+ odp_stash_t inflight_stash;
+ stats_t stats;
+ int thr_idx;
+} thread_config_t;
+
+typedef struct pktio_s {
+ odp_pktout_queue_t out_qs[MAX_OUT_QS];
+ char *name;
+ odp_pktio_t handle;
+ uint8_t num_out_qs;
+} pktio_t;
+
+typedef struct {
+ odp_packet_t src_pkts[MAX_BURST];
+ odp_packet_t dst_pkts[MAX_BURST];
+ pktio_t *pktio;
+ int num;
+} transfer_t;
+
+/* Function for initializing transfer structures */
+typedef transfer_t *(*init_fn_t)(odp_dma_transfer_param_t *trs_param,
+ odp_dma_compl_param_t *compl_param, odp_dma_seg_t *src_segs,
+ odp_dma_seg_t *dst_segs, pktio_t *pktio, thread_config_t *config);
+/* Function for starting transfers */
+typedef odp_bool_t (*start_fn_t)(odp_dma_transfer_param_t *trs_param,
+ odp_dma_compl_param_t *compl_param, thread_config_t *config);
+/* Function for setting up packets for copy */
+typedef void (*pkt_fn_t)(odp_packet_t pkts[], int num, pktio_t *pktio, init_fn_t init_fn,
+ start_fn_t start_fn, thread_config_t *config);
+/* Function for draining and tearing down inflight operations */
+typedef void (*drain_fn_t)(thread_config_t *config);
+
+typedef struct prog_config_s {
+ uint8_t pktio_idx_map[ODP_PKTIO_MAX_INDEX + 1];
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ thread_config_t thread_config[MAX_WORKERS];
+ pktio_t pktios[MAX_IFS];
+ dynamic_defs_t dyn_defs;
+ odp_instance_t odp_instance;
+ odp_barrier_t init_barrier;
+ odp_barrier_t term_barrier;
+ odp_atomic_u32_t is_running;
+ odp_pool_t pktio_pool;
+ odp_pool_t copy_pool;
+ odp_pool_t trs_pool;
+
+ struct {
+ init_fn_t init_fn;
+ start_fn_t start_fn;
+ pkt_fn_t pkt_fn;
+ drain_fn_t drain_fn;
+ };
+
+ uint64_t inflight_obj_size;
+ uint32_t burst_size;
+ uint32_t num_pkts;
+ uint32_t pkt_len;
+ uint32_t cache_size;
+ uint32_t num_inflight;
+ uint32_t trs_cache_size;
+ uint32_t compl_cache_size;
+ uint32_t stash_cache_size;
+ double time_sec;
+ odp_stash_type_t stash_type;
+ int num_thrs;
+ uint8_t num_ifs;
+ uint8_t copy_type;
+} prog_config_t;
+
+typedef struct {
+ odp_packet_t pkts[MAX_BURST * 2U];
+ pktio_t *pktio;
+ int num;
+} pkt_vec_t;
+
+static prog_config_t *prog_conf;
+
+static void terminate(int signal ODP_UNUSED)
+{
+ odp_atomic_store_u32(&prog_conf->is_running, 0U);
+}
+
+static void init_config(prog_config_t *config)
+{
+ odp_dma_capability_t dma_capa;
+ uint32_t burst_size;
+ odp_pool_capability_t pool_capa;
+ odp_pool_param_t pool_param;
+ thread_config_t *thr;
+
+ memset(config, 0, sizeof(*config));
+
+ if (odp_dma_capability(&dma_capa) == 0) {
+ burst_size = ODPH_MIN(dma_capa.max_src_segs, dma_capa.max_dst_segs);
+ burst_size = ODPH_MIN(burst_size, MAX_BURST);
+ config->dyn_defs.burst_size = burst_size;
+ }
+
+ if (odp_pool_capability(&pool_capa) == 0) {
+ config->dyn_defs.num_pkts = pool_capa.pkt.max_num > 0U ?
+ ODPH_MIN(pool_capa.pkt.max_num, DEF_CNT) : DEF_CNT;
+ config->dyn_defs.pkt_len = pool_capa.pkt.max_len > 0U ?
+ ODPH_MIN(pool_capa.pkt.max_len, DEF_LEN) : DEF_LEN;
+ odp_pool_param_init(&pool_param);
+ config->dyn_defs.cache_size = pool_param.pkt.cache_size;
+ }
+
+ config->pktio_pool = ODP_POOL_INVALID;
+ config->copy_pool = ODP_POOL_INVALID;
+ config->trs_pool = ODP_POOL_INVALID;
+ config->burst_size = config->dyn_defs.burst_size;
+ config->num_pkts = config->dyn_defs.num_pkts;
+ config->pkt_len = config->dyn_defs.pkt_len;
+ config->cache_size = config->dyn_defs.cache_size;
+ config->time_sec = DEF_TIME;
+ config->num_thrs = DEF_WORKERS;
+ config->copy_type = DEF_CPY_TYPE;
+
+ for (int i = 0; i < MAX_WORKERS; ++i) {
+ thr = &config->thread_config[i];
+ thr->dma_handle = ODP_DMA_INVALID;
+ thr->compl_pool = ODP_POOL_INVALID;
+ thr->compl_q = ODP_QUEUE_INVALID;
+ thr->inflight_stash = ODP_STASH_INVALID;
+ }
+
+ for (uint32_t i = 0U; i < MAX_IFS; ++i)
+ config->pktios[i].handle = ODP_PKTIO_INVALID;
+}
+
+static void print_usage(dynamic_defs_t *dyn_defs)
+{
+ printf("\n"
+ "DMA performance tester with packet I/O. Receive and forward packets after\n"
+ "software copy or DMA offload copy.\n"
+ "\n"
+ "Usage: " PROG_NAME " OPTIONS\n"
+ "\n"
+ " E.g. " PROG_NAME " -i eth0\n"
+ " " PROG_NAME " -i eth0 -t 0\n"
+ " " PROG_NAME " -i eth0 -t 1 -b 15 -l 4096 -c 5\n"
+ "\n"
+ "Mandatory OPTIONS:\n"
+ "\n"
+ " -i, --interfaces Ethernet interfaces for packet I/O, comma-separated, no\n"
+ " spaces.\n"
+ "\n"
+ "Optional OPTIONS:\n"
+ "\n"
+ " -t, --copy_type Type of copy. %u by default.\n"
+ " 0: SW\n"
+ " 1: DMA with event completion\n"
+ " 2: DMA with poll completion\n"
+ " -b, --burst_size Copy burst size. This many packets are accumulated before\n"
+ " copy. %u by default.\n"
+ " -n, --num_pkts Number of packet buffers allocated for packet I/O pool.\n"
+ " %u by default.\n"
+ " -l, --pkt_len Maximum size of packet buffers in packet I/O pool. %u by\n"
+ " default.\n"
+ " -c, --worker_count Amount of workers. %u by default.\n"
+ " -C, --cache_size Maximum cache size for pools. %u by default.\n"
+ " -T, --time_sec Time in seconds to run. 0 means infinite. %u by default.\n"
+ " -h, --help This help.\n"
+ "\n", DEF_CPY_TYPE, dyn_defs->burst_size, dyn_defs->num_pkts, dyn_defs->pkt_len,
+ DEF_WORKERS, dyn_defs->cache_size, DEF_TIME);
+}
+
+static void parse_interfaces(prog_config_t *config, const char *optarg)
+{
+ char *tmp_str = strdup(optarg), *tmp;
+
+ if (tmp_str == NULL)
+ return;
+
+ tmp = strtok(tmp_str, DELIMITER);
+
+ while (tmp && config->num_ifs < MAX_IFS) {
+ config->pktios[config->num_ifs].name = strdup(tmp);
+
+ if (config->pktios[config->num_ifs].name != NULL)
+ ++config->num_ifs;
+
+ tmp = strtok(NULL, DELIMITER);
+ }
+
+ free(tmp_str);
+}
+
+static odp_bool_t get_stash_capa(odp_stash_capability_t *stash_capa, odp_stash_type_t *stash_type)
+{
+ if (odp_stash_capability(stash_capa, ODP_STASH_TYPE_FIFO) == 0) {
+ *stash_type = ODP_STASH_TYPE_FIFO;
+ return true;
+ }
+
+ if (odp_stash_capability(stash_capa, ODP_STASH_TYPE_DEFAULT) == 0) {
+ *stash_type = ODP_STASH_TYPE_DEFAULT;
+ return true;
+ }
+
+ return false;
+}
+
+static parse_result_t check_options(prog_config_t *config)
+{
+ odp_dma_capability_t dma_capa;
+ uint32_t burst_size;
+ odp_stash_capability_t stash_capa;
+ const uint64_t obj_size = sizeof(odp_dma_transfer_id_t);
+ uint64_t max_num;
+ odp_pool_capability_t pool_capa;
+
+ if (config->num_ifs == 0U) {
+ ODPH_ERR("Invalid number of interfaces: %u (min: 1, max: %u)\n", config->num_ifs,
+ MAX_IFS);
+ return PRS_NOK;
+ }
+
+ if (config->copy_type != SW_COPY && config->copy_type != DMA_COPY_EV &&
+ config->copy_type != DMA_COPY_POLL) {
+ ODPH_ERR("Invalid copy type: %u\n", config->copy_type);
+ return PRS_NOK;
+ }
+
+ if (config->num_thrs <= 0 || config->num_thrs > MAX_WORKERS) {
+ ODPH_ERR("Invalid worker count: %d (min: 1, max: %d)\n", config->num_thrs,
+ MAX_WORKERS);
+ return PRS_NOK;
+ }
+
+ if (odp_dma_capability(&dma_capa) < 0) {
+ ODPH_ERR("Error querying DMA capabilities\n");
+ return PRS_NOK;
+ }
+
+ if ((uint32_t)config->num_thrs > dma_capa.max_sessions) {
+ ODPH_ERR("Unsupported DMA session count: %d (max: %u)\n", config->num_thrs,
+ dma_capa.max_sessions);
+ return PRS_NOT_SUP;
+ }
+
+ burst_size = ODPH_MIN(dma_capa.max_src_segs, dma_capa.max_dst_segs);
+ burst_size = ODPH_MIN(burst_size, MAX_BURST);
+
+ if (config->burst_size == 0U || config->burst_size > burst_size) {
+ ODPH_ERR("Invalid segment count for DMA: %u (min: 1, max: %u)\n",
+ config->burst_size, burst_size);
+ return PRS_NOK;
+ }
+
+ if (config->pkt_len > dma_capa.max_seg_len) {
+ ODPH_ERR("Invalid packet length for DMA: %u (max: %u)\n", config->pkt_len,
+ dma_capa.max_seg_len);
+ return PRS_NOK;
+ }
+
+ config->num_inflight = dma_capa.max_transfers;
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ ODPH_ERR("Error querying pool capabilities\n");
+ return PRS_NOK;
+ }
+
+ if (config->cache_size < pool_capa.pkt.min_cache_size ||
+ config->cache_size > pool_capa.pkt.max_cache_size) {
+ ODPH_ERR("Invalid pool cache size: %u (min: %u, max: %u)\n", config->cache_size,
+ pool_capa.pkt.min_cache_size, pool_capa.pkt.max_cache_size);
+ return PRS_NOK;
+ }
+
+ if (config->copy_type != SW_COPY)
+ config->trs_cache_size = ODPH_MIN(ODPH_MAX(config->cache_size,
+ pool_capa.buf.min_cache_size),
+ pool_capa.buf.max_cache_size);
+
+ if (config->copy_type == DMA_COPY_EV) {
+ if ((dma_capa.compl_mode_mask & ODP_DMA_COMPL_EVENT) == 0U ||
+ !dma_capa.queue_type_sched) {
+ ODPH_ERR("Unsupported DMA completion mode: event (mode support: %x, "
+ "scheduled queue support: %u)\n", dma_capa.compl_mode_mask,
+ dma_capa.queue_type_sched);
+ return PRS_NOT_SUP;
+ }
+
+ if ((uint32_t)config->num_thrs > dma_capa.pool.max_pools) {
+ ODPH_ERR("Invalid amount of DMA completion pools: %d (max: %u)\n",
+ config->num_thrs, dma_capa.pool.max_pools);
+ return PRS_NOK;
+ }
+
+ if (config->num_inflight > dma_capa.pool.max_num) {
+ ODPH_ERR("Invalid amount of DMA completion events: %u (max: %u)\n",
+ config->num_inflight, dma_capa.pool.max_num);
+ return PRS_NOK;
+ }
+
+ config->compl_cache_size = ODPH_MIN(ODPH_MAX(config->cache_size,
+ dma_capa.pool.min_cache_size),
+ dma_capa.pool.max_cache_size);
+ } else if (config->copy_type == DMA_COPY_POLL) {
+ if ((dma_capa.compl_mode_mask & ODP_DMA_COMPL_POLL) == 0U) {
+ ODPH_ERR("Unsupported DMA completion mode: poll (mode support: %x)\n",
+ dma_capa.compl_mode_mask);
+ return PRS_NOT_SUP;
+ }
+
+ if (!get_stash_capa(&stash_capa, &config->stash_type)) {
+ ODPH_ERR("Error querying stash capabilities\n");
+ return PRS_NOK;
+ }
+
+ if ((uint32_t)config->num_thrs > stash_capa.max_stashes) {
+ ODPH_ERR("Invalid amount of stashes: %d (max: %u)\n", config->num_thrs,
+ stash_capa.max_stashes);
+ return PRS_NOK;
+ }
+
+ if (obj_size == sizeof(uint8_t)) {
+ max_num = stash_capa.max_num.u8;
+ } else if (obj_size == sizeof(uint16_t)) {
+ max_num = stash_capa.max_num.u16;
+ } else if (obj_size <= sizeof(uint32_t)) {
+ max_num = stash_capa.max_num.u32;
+ } else if (obj_size <= sizeof(uint64_t)) {
+ max_num = stash_capa.max_num.u64;
+ } else if (obj_size <= sizeof(odp_u128_t)) {
+ max_num = stash_capa.max_num.u128;
+ } else {
+ ODPH_ERR("Invalid stash object size: %" PRIu64 "\n", obj_size);
+ return PRS_NOK;
+ }
+
+ if (config->num_inflight > max_num) {
+ ODPH_ERR("Invalid stash size: %u (max: %" PRIu64 ")\n",
+ config->num_inflight, max_num);
+ return PRS_NOK;
+ }
+
+ config->inflight_obj_size = obj_size;
+ config->stash_cache_size = ODPH_MIN(config->cache_size, stash_capa.max_cache_size);
+ }
+
+ if (config->num_pkts == 0U ||
+ (pool_capa.pkt.max_num > 0U && config->num_pkts > pool_capa.pkt.max_num)) {
+ ODPH_ERR("Invalid pool packet count: %u (min: 1, max: %u)\n", config->num_pkts,
+ pool_capa.pkt.max_num);
+ return PRS_NOK;
+ }
+
+ if (config->pkt_len == 0U ||
+ (pool_capa.pkt.max_len > 0U && config->pkt_len > pool_capa.pkt.max_len)) {
+ ODPH_ERR("Invalid pool packet length: %u (min: 1, max: %u)\n", config->pkt_len,
+ pool_capa.pkt.max_len);
+ return PRS_NOK;
+ }
+
+ if (config->num_inflight > pool_capa.buf.max_num) {
+ ODPH_ERR("Invalid pool buffer count: %u (max: %u)\n", config->num_inflight,
+ pool_capa.buf.max_num);
+ return PRS_NOK;
+ }
+
+ return PRS_OK;
+}
+
+static parse_result_t parse_options(int argc, char **argv, prog_config_t *config)
+{
+ int opt, long_index;
+
+ static const struct option longopts[] = {
+ { "interfaces", required_argument, NULL, 'i' },
+ { "copy_type", required_argument, NULL, 't' },
+ { "burst_size", required_argument, NULL, 'b' },
+ { "num_pkts", required_argument, NULL, 'n' },
+ { "pkt_len", required_argument, NULL, 'l' },
+ { "worker_count", required_argument, NULL, 'c' },
+ { "cache_size", required_argument, NULL, 'C' },
+ { "time_sec", required_argument, NULL, 'T' },
+ { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ static const char *shortopts = "i:t:b:n:l:c:C:T:h";
+
+ init_config(config);
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'i':
+ parse_interfaces(config, optarg);
+ break;
+ case 't':
+ config->copy_type = atoi(optarg);
+ break;
+ case 'b':
+ config->burst_size = atoi(optarg);
+ break;
+ case 'n':
+ config->num_pkts = atoi(optarg);
+ break;
+ case 'l':
+ config->pkt_len = atoi(optarg);
+ break;
+ case 'c':
+ config->num_thrs = atoi(optarg);
+ break;
+ case 'C':
+ config->cache_size = atoi(optarg);
+ break;
+ case 'T':
+ config->time_sec = atof(optarg);
+ break;
+ case 'h':
+ print_usage(&config->dyn_defs);
+ return PRS_TERM;
+ case '?':
+ default:
+ print_usage(&config->dyn_defs);
+ return PRS_NOK;
+ }
+ }
+
+ return check_options(config);
+}
+
+static parse_result_t setup_program(int argc, char **argv, prog_config_t *config)
+{
+ struct sigaction action = { .sa_handler = terminate };
+
+ if (sigemptyset(&action.sa_mask) == -1 || sigaddset(&action.sa_mask, SIGINT) == -1 ||
+ sigaddset(&action.sa_mask, SIGTERM) == -1 ||
+ sigaddset(&action.sa_mask, SIGHUP) == -1 || sigaction(SIGINT, &action, NULL) == -1 ||
+ sigaction(SIGTERM, &action, NULL) == -1 || sigaction(SIGHUP, &action, NULL) == -1) {
+ ODPH_ERR("Error installing signal handler\n");
+ return PRS_NOK;
+ }
+
+ return parse_options(argc, argv, config);
+}
+
+static inline int send_packets(odp_pktout_queue_t queue, odp_packet_t pkts[], int num)
+{
+ int ret = odp_pktout_send(queue, pkts, num);
+
+ if (odp_unlikely(ret < num)) {
+ ret = ret < 0 ? 0 : ret;
+ odp_packet_free_multi(&pkts[ret], num - ret);
+ }
+
+ return ret;
+}
+
+static void sw_copy_and_send_packets(odp_packet_t pkts[], int num, pktio_t *pktio,
+ init_fn_t init_fn ODP_UNUSED, start_fn_t start_fn ODP_UNUSED,
+ thread_config_t *config)
+{
+ odp_packet_t old_pkt, new_pkt;
+ odp_pool_t copy_pool = config->copy_pool;
+ odp_packet_t out_pkts[num];
+ int num_out_pkts = 0, num_sent;
+ stats_t *stats = &config->stats;
+
+ for (int i = 0; i < num; ++i) {
+ old_pkt = pkts[i];
+ new_pkt = odp_packet_copy(old_pkt, copy_pool);
+
+ if (new_pkt != ODP_PACKET_INVALID)
+ out_pkts[num_out_pkts++] = new_pkt;
+ else
+ ++stats->copy_errs;
+
+ odp_packet_free(old_pkt);
+ }
+
+ if (num_out_pkts > 0) {
+ num_sent = send_packets(pktio->out_qs[config->thr_idx % pktio->num_out_qs],
+ out_pkts, num_out_pkts);
+ stats->fwd_pkts += num_sent;
+ stats->discards += num_out_pkts - num_sent;
+ }
+}
+
+static transfer_t *init_dma_ev_trs(odp_dma_transfer_param_t *trs_param,
+ odp_dma_compl_param_t *compl_param, odp_dma_seg_t *src_segs,
+ odp_dma_seg_t *dst_segs, pktio_t *pktio,
+ thread_config_t *config)
+{
+ odp_buffer_t buf;
+ stats_t *stats = &config->stats;
+ transfer_t *trs;
+ odp_dma_compl_t c_ev;
+
+ buf = odp_buffer_alloc(config->trs_pool);
+
+ if (odp_unlikely(buf == ODP_BUFFER_INVALID)) {
+ ++stats->buf_alloc_errs;
+ return NULL;
+ }
+
+ trs = (transfer_t *)odp_buffer_addr(buf);
+ trs->num = 0;
+ trs->pktio = pktio;
+ trs_param->src_format = ODP_DMA_FORMAT_PACKET;
+ trs_param->dst_format = ODP_DMA_FORMAT_PACKET;
+ trs_param->num_src = 0U;
+ trs_param->num_dst = 0U;
+ trs_param->src_seg = src_segs;
+ trs_param->dst_seg = dst_segs;
+ compl_param->compl_mode = ODP_DMA_COMPL_EVENT;
+ c_ev = odp_dma_compl_alloc(config->compl_pool);
+
+ if (odp_unlikely(c_ev == ODP_DMA_COMPL_INVALID)) {
+ odp_buffer_free(buf);
+ ++stats->compl_alloc_errs;
+ return NULL;
+ }
+
+ compl_param->event = odp_dma_compl_to_event(c_ev);
+ compl_param->queue = config->compl_q;
+ compl_param->user_ptr = buf;
+ memset(src_segs, 0, sizeof(*src_segs) * MAX_BURST);
+ memset(dst_segs, 0, sizeof(*dst_segs) * MAX_BURST);
+
+ return trs;
+}
+
+static transfer_t *init_dma_poll_trs(odp_dma_transfer_param_t *trs_param,
+ odp_dma_compl_param_t *compl_param, odp_dma_seg_t *src_segs,
+ odp_dma_seg_t *dst_segs, pktio_t *pktio,
+ thread_config_t *config)
+{
+ odp_buffer_t buf;
+ stats_t *stats = &config->stats;
+ transfer_t *trs;
+
+ buf = odp_buffer_alloc(config->trs_pool);
+
+ if (odp_unlikely(buf == ODP_BUFFER_INVALID)) {
+ ++stats->buf_alloc_errs;
+ return NULL;
+ }
+
+ trs = (transfer_t *)odp_buffer_addr(buf);
+ trs->num = 0;
+ trs->pktio = pktio;
+ trs_param->src_format = ODP_DMA_FORMAT_PACKET;
+ trs_param->dst_format = ODP_DMA_FORMAT_PACKET;
+ trs_param->num_src = 0U;
+ trs_param->num_dst = 0U;
+ trs_param->src_seg = src_segs;
+ trs_param->dst_seg = dst_segs;
+ compl_param->compl_mode = ODP_DMA_COMPL_POLL;
+ compl_param->transfer_id = odp_dma_transfer_id_alloc(config->dma_handle);
+
+ if (odp_unlikely(compl_param->transfer_id == ODP_DMA_TRANSFER_ID_INVALID)) {
+ odp_buffer_free(buf);
+ ++stats->compl_alloc_errs;
+ return NULL;
+ }
+
+ compl_param->user_ptr = buf;
+ memset(src_segs, 0, sizeof(*src_segs) * MAX_BURST);
+ memset(dst_segs, 0, sizeof(*dst_segs) * MAX_BURST);
+
+ return trs;
+}
+
+static odp_bool_t start_dma_ev_trs(odp_dma_transfer_param_t *trs_param,
+ odp_dma_compl_param_t *compl_param, thread_config_t *config)
+{
+ const int ret = odp_dma_transfer_start(config->dma_handle, trs_param, compl_param);
+
+ if (odp_unlikely(ret <= 0)) {
+ odp_buffer_free(compl_param->user_ptr);
+ odp_event_free(compl_param->event);
+ return false;
+ }
+
+ return true;
+}
+
+static odp_bool_t start_dma_poll_trs(odp_dma_transfer_param_t *trs_param,
+ odp_dma_compl_param_t *compl_param, thread_config_t *config)
+{
+ const int ret = odp_dma_transfer_start(config->dma_handle, trs_param, compl_param);
+
+ if (odp_unlikely(ret <= 0)) {
+ odp_buffer_free(compl_param->user_ptr);
+ odp_dma_transfer_id_free(config->dma_handle, compl_param->transfer_id);
+ return false;
+ }
+
+ if (odp_unlikely(odp_stash_put(config->inflight_stash, &compl_param->transfer_id, 1) != 1))
+ /* Should not happen, but make it visible if it somehow does */
+ ODPH_ABORT("DMA inflight transfer stash overflow, aborting");
+
+ return true;
+}
+
+static void dma_copy(odp_packet_t pkts[], int num, pktio_t *pktio, init_fn_t init_fn,
+ start_fn_t start_fn, thread_config_t *config)
+{
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_compl_param_t compl_param;
+ odp_packet_t pkt;
+ transfer_t *trs = NULL;
+ odp_dma_seg_t src_segs[MAX_BURST], dst_segs[MAX_BURST];
+ uint32_t num_segs = 0U, pkt_len;
+ odp_pool_t copy_pool = config->copy_pool;
+ stats_t *stats = &config->stats;
+
+ odp_dma_transfer_param_init(&trs_param);
+ odp_dma_compl_param_init(&compl_param);
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+
+ if (odp_unlikely(trs == NULL)) {
+ trs = init_fn(&trs_param, &compl_param, src_segs, dst_segs, pktio, config);
+
+ if (trs == NULL) {
+ odp_packet_free(pkt);
+ continue;
+ }
+ }
+
+ pkt_len = odp_packet_len(pkt);
+ src_segs[num_segs].packet = pkt;
+ src_segs[num_segs].len = pkt_len;
+ dst_segs[num_segs].packet = odp_packet_alloc(copy_pool, pkt_len);
+
+ if (odp_unlikely(dst_segs[num_segs].packet == ODP_PACKET_INVALID)) {
+ odp_packet_free(pkt);
+ ++stats->pkt_alloc_errs;
+ continue;
+ }
+
+ dst_segs[num_segs].len = pkt_len;
+ trs->src_pkts[num_segs] = src_segs[num_segs].packet;
+ trs->dst_pkts[num_segs] = dst_segs[num_segs].packet;
+ ++trs->num;
+ ++trs_param.num_src;
+ ++trs_param.num_dst;
+ ++num_segs;
+ }
+
+ if (num_segs > 0U)
+ if (odp_unlikely(!start_fn(&trs_param, &compl_param, config))) {
+ odp_packet_free_multi(trs->src_pkts, trs->num);
+ odp_packet_free_multi(trs->dst_pkts, trs->num);
+ ++stats->start_errs;
+ }
+}
+
+static void drain_events(thread_config_t *config ODP_UNUSED)
+{
+ odp_event_t ev;
+ odp_event_type_t type;
+ odp_dma_result_t res;
+ odp_buffer_t buf;
+ transfer_t *trs;
+
+ while (true) {
+ ev = odp_schedule(NULL, odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS));
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ type = odp_event_type(ev);
+
+ if (type == ODP_EVENT_DMA_COMPL) {
+ memset(&res, 0, sizeof(res));
+ odp_dma_compl_result(odp_dma_compl_from_event(ev), &res);
+ buf = (odp_buffer_t)res.user_ptr;
+ trs = (transfer_t *)odp_buffer_addr(buf);
+ odp_packet_free_multi(trs->src_pkts, trs->num);
+ odp_packet_free_multi(trs->dst_pkts, trs->num);
+ odp_buffer_free(buf);
+ }
+
+ odp_event_free(ev);
+ }
+}
+
+static void drain_polled(thread_config_t *config)
+{
+ odp_dma_transfer_id_t id;
+ odp_dma_result_t res;
+ int ret;
+ odp_buffer_t buf;
+ transfer_t *trs;
+
+ while (true) {
+ if (odp_stash_get(config->inflight_stash, &id, 1) != 1)
+ break;
+
+ memset(&res, 0, sizeof(res));
+
+ do {
+ ret = odp_dma_transfer_done(config->dma_handle, id, &res);
+ } while (ret == 0);
+
+ odp_dma_transfer_id_free(config->dma_handle, id);
+
+ if (ret < 0)
+ continue;
+
+ buf = (odp_buffer_t)res.user_ptr;
+ trs = (transfer_t *)odp_buffer_addr(buf);
+ odp_packet_free_multi(trs->src_pkts, trs->num);
+ odp_packet_free_multi(trs->dst_pkts, trs->num);
+ odp_buffer_free(buf);
+ }
+}
+
+static odp_bool_t setup_copy(prog_config_t *config)
+{
+ odp_pool_param_t pool_param;
+ thread_config_t *thr;
+ const odp_dma_param_t dma_param = {
+ .direction = ODP_DMA_MAIN_TO_MAIN,
+ .type = ODP_DMA_TYPE_COPY,
+ .compl_mode_mask = ODP_DMA_COMPL_EVENT | ODP_DMA_COMPL_POLL,
+ .mt_mode = ODP_DMA_MT_SERIAL,
+ .order = ODP_DMA_ORDER_NONE };
+ odp_dma_pool_param_t compl_pool_param;
+ odp_queue_param_t queue_param;
+ odp_stash_param_t stash_param;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.pkt.seg_len = config->pkt_len;
+ pool_param.pkt.len = config->pkt_len;
+ pool_param.pkt.num = config->num_pkts;
+ pool_param.pkt.cache_size = config->cache_size;
+ pool_param.type = ODP_POOL_PACKET;
+ config->copy_pool = odp_pool_create(PROG_NAME "_copy", &pool_param);
+
+ if (config->copy_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error creating packet copy pool\n");
+ return false;
+ }
+
+ if (config->copy_type == SW_COPY) {
+ config->pkt_fn = sw_copy_and_send_packets;
+
+ for (int i = 0; i < config->num_thrs; ++i)
+ config->thread_config[i].copy_pool = config->copy_pool;
+
+ return true;
+ }
+
+ pool_param.buf.num = config->num_inflight;
+ pool_param.buf.size = sizeof(transfer_t);
+ pool_param.buf.cache_size = config->trs_cache_size;
+ pool_param.type = ODP_POOL_BUFFER;
+ config->trs_pool = odp_pool_create(PROG_NAME "_dma_trs", &pool_param);
+
+ if (config->trs_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error creating DMA transfer tracking pool\n");
+ return false;
+ }
+
+ for (int i = 0; i < config->num_thrs; ++i) {
+ thr = &config->thread_config[i];
+ thr->copy_pool = config->copy_pool;
+ thr->trs_pool = config->trs_pool;
+ thr->dma_handle = odp_dma_create(PROG_NAME "_dma", &dma_param);
+
+ if (thr->dma_handle == ODP_DMA_INVALID) {
+ ODPH_ERR("Error creating DMA session\n");
+ return false;
+ }
+
+ if (config->copy_type == DMA_COPY_EV) {
+ odp_dma_pool_param_init(&compl_pool_param);
+ compl_pool_param.num = config->num_inflight;
+ compl_pool_param.cache_size = config->compl_cache_size;
+ thr->compl_pool = odp_dma_pool_create(PROG_NAME "_dma_compl",
+ &compl_pool_param);
+
+ if (thr->compl_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error creating DMA event completion pool\n");
+ return false;
+ }
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ queue_param.sched.prio = odp_schedule_max_prio();
+ thr->compl_q = odp_queue_create(PROG_NAME "_dma_compl", &queue_param);
+
+ if (thr->compl_q == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Error creating DMA completion queue\n");
+ return false;
+ }
+
+ config->init_fn = init_dma_ev_trs;
+ config->start_fn = start_dma_ev_trs;
+ config->drain_fn = drain_events;
+ } else {
+ odp_stash_param_init(&stash_param);
+ stash_param.type = config->stash_type;
+ stash_param.put_mode = ODP_STASH_OP_LOCAL;
+ stash_param.get_mode = ODP_STASH_OP_LOCAL;
+ stash_param.num_obj = config->num_inflight;
+ stash_param.obj_size = config->inflight_obj_size;
+ stash_param.cache_size = config->stash_cache_size;
+ thr->inflight_stash = odp_stash_create("_dma_inflight", &stash_param);
+
+ if (thr->inflight_stash == ODP_STASH_INVALID) {
+ ODPH_ERR("Error creating DMA inflight transfer stash\n");
+ return false;
+ }
+
+ config->init_fn = init_dma_poll_trs;
+ config->start_fn = start_dma_poll_trs;
+ config->drain_fn = drain_polled;
+ }
+ }
+
+ config->pkt_fn = dma_copy;
+
+ return true;
+}
+
+static odp_bool_t setup_pktios(prog_config_t *config)
+{
+ odp_pool_param_t pool_param;
+ pktio_t *pktio;
+ odp_pktio_param_t pktio_param;
+ odp_pktio_capability_t capa;
+ uint32_t num_input_qs, num_output_qs;
+ odp_pktin_queue_param_t pktin_param;
+ odp_pktout_queue_param_t pktout_param;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.pkt.seg_len = config->pkt_len;
+ pool_param.pkt.len = config->pkt_len;
+ pool_param.pkt.num = config->num_pkts;
+ pool_param.pkt.cache_size = config->cache_size;
+ pool_param.type = ODP_POOL_PACKET;
+ config->pktio_pool = odp_pool_create(PROG_NAME, &pool_param);
+
+ if (config->pktio_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error creating packet I/O pool\n");
+ return false;
+ }
+
+ for (uint32_t i = 0U; i < config->num_ifs; ++i) {
+ pktio = &config->pktios[i];
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+ pktio->handle = odp_pktio_open(pktio->name, config->pktio_pool, &pktio_param);
+
+ if (pktio->handle == ODP_PKTIO_INVALID) {
+ ODPH_ERR("Error opening packet I/O (%s)\n", pktio->name);
+ return false;
+ }
+
+ config->pktio_idx_map[odp_pktio_index(pktio->handle)] = i;
+
+ if (odp_pktio_capability(pktio->handle, &capa) < 0) {
+ ODPH_ERR("Error querying packet I/O capabilities (%s)\n", pktio->name);
+ return false;
+ }
+
+ num_input_qs = ODPH_MIN((uint32_t)config->num_thrs, capa.max_input_queues);
+ num_output_qs = ODPH_MIN((uint32_t)config->num_thrs, capa.max_output_queues);
+ num_output_qs = ODPH_MIN(num_output_qs, MAX_OUT_QS);
+ odp_pktin_queue_param_init(&pktin_param);
+
+ if (num_input_qs > 1) {
+ pktin_param.hash_enable = true;
+ pktin_param.hash_proto.proto.ipv4 = 1U;
+ }
+
+ pktin_param.num_queues = num_input_qs;
+ pktin_param.queue_param.sched.prio = odp_schedule_default_prio();
+
+ if (odp_pktin_queue_config(pktio->handle, &pktin_param) < 0) {
+ ODPH_ERR("Error configuring packet I/O input queues (%s)\n", pktio->name);
+ return false;
+ }
+
+ odp_pktout_queue_param_init(&pktout_param);
+
+ if (num_output_qs == (uint32_t)config->num_thrs)
+ pktout_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+
+ pktout_param.num_queues = num_output_qs;
+ pktio->num_out_qs = num_output_qs;
+
+ if (odp_pktout_queue_config(pktio->handle, &pktout_param) < 0) {
+ ODPH_ERR("Error configuring packet I/O output queues (%s)\n", pktio->name);
+ return false;
+ }
+
+ if (odp_pktout_queue(pktio->handle, pktio->out_qs, num_output_qs) !=
+ (int)num_output_qs) {
+ ODPH_ERR("Error querying packet I/O output queues (%s)\n", pktio->name);
+ return false;
+ }
+
+ if (odp_pktio_start(pktio->handle) < 0) {
+ ODPH_ERR("Error starting packet I/O (%s)\n", pktio->name);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static inline void send_dma_poll_trs_pkts(int burst_size, thread_config_t *config)
+{
+ odp_stash_t stash_handle = config->inflight_stash;
+ odp_dma_transfer_id_t ids[burst_size], id;
+ int32_t num;
+ odp_dma_t dma_handle = config->dma_handle;
+ odp_dma_result_t res;
+ int ret;
+ odp_buffer_t buf;
+ transfer_t *trs;
+ pktio_t *pktio;
+ int num_sent;
+ stats_t *stats = &config->stats;
+
+ while (true) {
+ num = odp_stash_get(stash_handle, &ids, burst_size);
+
+ if (num <= 0)
+ break;
+
+ for (int32_t i = 0; i < num; ++i) {
+ id = ids[i];
+ ret = odp_dma_transfer_done(dma_handle, id, &res);
+
+ if (ret == 0) {
+ if (odp_unlikely(odp_stash_put(stash_handle, &id, 1) != 1))
+ /* Should not happen, but make it visible if it somehow
+ * does */
+ ODPH_ABORT("DMA inflight transfer stash overflow,"
+ " aborting");
+
+ ++stats->trs_polled;
+ continue;
+ }
+
+ odp_dma_transfer_id_free(dma_handle, id);
+
+ if (ret < 0) {
+ ++stats->trs_poll_errs;
+ continue;
+ }
+
+ buf = (odp_buffer_t)res.user_ptr;
+ trs = (transfer_t *)odp_buffer_addr(buf);
+
+ if (res.success) {
+ pktio = trs->pktio;
+ num_sent = send_packets(pktio->out_qs[config->thr_idx %
+ pktio->num_out_qs],
+ trs->dst_pkts, trs->num);
+ ++stats->trs;
+ stats->fwd_pkts += num_sent;
+ stats->discards += trs->num - num_sent;
+ } else {
+ odp_packet_free_multi(trs->dst_pkts, trs->num);
+ ++stats->trs_errs;
+ }
+
+ odp_packet_free_multi(trs->src_pkts, trs->num);
+ odp_buffer_free(buf);
+ }
+ }
+}
+
+static inline void send_dma_ev_trs_pkts(odp_dma_compl_t compl_ev, thread_config_t *config)
+{
+ odp_dma_result_t res;
+ odp_buffer_t buf;
+ transfer_t *trs;
+ pktio_t *pktio;
+ int num_sent;
+ stats_t *stats = &config->stats;
+
+ memset(&res, 0, sizeof(res));
+ odp_dma_compl_result(compl_ev, &res);
+ buf = (odp_buffer_t)res.user_ptr;
+ trs = (transfer_t *)odp_buffer_addr(buf);
+
+ if (res.success) {
+ pktio = trs->pktio;
+ num_sent = send_packets(pktio->out_qs[config->thr_idx % pktio->num_out_qs],
+ trs->dst_pkts, trs->num);
+ ++stats->trs;
+ stats->fwd_pkts += num_sent;
+ stats->discards += trs->num - num_sent;
+ } else {
+ odp_packet_free_multi(trs->dst_pkts, trs->num);
+ ++stats->trs_errs;
+ }
+
+ odp_packet_free_multi(trs->src_pkts, trs->num);
+ odp_buffer_free(buf);
+ odp_dma_compl_free(compl_ev);
+}
+
+static inline void push_packet(odp_packet_t pkt, pkt_vec_t pkt_vecs[], uint8_t *pktio_idx_map)
+{
+ uint8_t idx = pktio_idx_map[odp_packet_input_index(pkt)];
+ pkt_vec_t *pkt_vec = &pkt_vecs[idx];
+
+ pkt_vec->pkts[pkt_vec->num++] = pkt;
+}
+
+static inline void pop_packets(pkt_vec_t *pkt_vec, int num_procd)
+{
+ pkt_vec->num -= num_procd;
+
+ for (int i = 0, j = num_procd; i < pkt_vec->num; ++i, ++j)
+ pkt_vec->pkts[i] = pkt_vec->pkts[j];
+}
+
+static void free_pending_packets(pkt_vec_t pkt_vecs[], uint32_t num_ifs)
+{
+ for (uint32_t i = 0U; i < num_ifs; ++i)
+ odp_packet_free_multi(pkt_vecs[i].pkts, pkt_vecs[i].num);
+}
+
+static int process_packets(void *args)
+{
+ thread_config_t *config = args;
+ const uint8_t num_ifs = config->prog_config->num_ifs;
+ pkt_vec_t pkt_vecs[num_ifs], *pkt_vec;
+ odp_atomic_u32_t *is_running = &config->prog_config->is_running;
+ uint64_t c1, c2, c3, c4, cdiff = 0U, rounds = 0U;
+ const uint8_t copy_type = config->prog_config->copy_type;
+ const int burst_size = config->prog_config->burst_size;
+ odp_event_t evs[burst_size];
+ int num_evs;
+ odp_event_t ev;
+ odp_event_type_t type;
+ uint8_t *pktio_map = config->prog_config->pktio_idx_map;
+ stats_t *stats = &config->stats;
+ init_fn_t init_fn = config->prog_config->init_fn;
+ start_fn_t start_fn = config->prog_config->start_fn;
+ pkt_fn_t pkt_fn = config->prog_config->pkt_fn;
+
+ for (uint32_t i = 0U; i < num_ifs; ++i) {
+ pkt_vecs[i].pktio = &config->prog_config->pktios[i];
+ pkt_vecs[i].num = 0;
+ }
+
+ config->thr_idx = odp_thread_id();
+ odp_barrier_wait(&config->prog_config->init_barrier);
+ c1 = odp_cpu_cycles();
+
+ while (odp_atomic_load_u32(is_running)) {
+ c3 = odp_cpu_cycles();
+ num_evs = odp_schedule_multi_no_wait(NULL, evs, burst_size);
+ c4 = odp_cpu_cycles();
+ cdiff += odp_cpu_cycles_diff(c4, c3);
+ ++rounds;
+
+ if (copy_type == DMA_COPY_POLL)
+ send_dma_poll_trs_pkts(burst_size, config);
+
+ if (num_evs == 0)
+ continue;
+
+ for (int i = 0; i < num_evs; ++i) {
+ ev = evs[i];
+ type = odp_event_type(ev);
+
+ if (type == ODP_EVENT_DMA_COMPL) {
+ send_dma_ev_trs_pkts(odp_dma_compl_from_event(ev), config);
+ } else if (type == ODP_EVENT_PACKET) {
+ push_packet(odp_packet_from_event(ev), pkt_vecs, pktio_map);
+ } else {
+ odp_event_free(ev);
+ ++stats->discards;
+ }
+ }
+
+ for (uint32_t i = 0U; i < num_ifs; ++i) {
+ pkt_vec = &pkt_vecs[i];
+
+ if (pkt_vec->num >= burst_size) {
+ pkt_fn(pkt_vec->pkts, burst_size, pkt_vec->pktio, init_fn,
+ start_fn, config);
+ pop_packets(pkt_vec, burst_size);
+ }
+ }
+ }
+
+ c2 = odp_cpu_cycles();
+ stats->sched_cc = cdiff;
+ stats->tot_cc = odp_cpu_cycles_diff(c2, c1);
+ stats->sched_rounds = rounds;
+ free_pending_packets(pkt_vecs, num_ifs);
+ odp_barrier_wait(&config->prog_config->term_barrier);
+
+ if (config->prog_config->drain_fn)
+ config->prog_config->drain_fn(config);
+
+ return 0;
+}
+
+static odp_bool_t setup_workers(prog_config_t *config)
+{
+ odp_cpumask_t cpumask;
+ int num_workers;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[config->num_thrs];
+
+ num_workers = odp_cpumask_default_worker(&cpumask, config->num_thrs);
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = config->odp_instance;
+ thr_common.cpumask = &cpumask;
+
+ for (int i = 0; i < config->num_thrs; ++i) {
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = process_packets;
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ config->thread_config[i].prog_config = config;
+ thr_param[i].arg = &config->thread_config[i];
+ }
+
+ num_workers = odph_thread_create(config->thread_tbl, &thr_common, thr_param, num_workers);
+
+ if (num_workers != config->num_thrs) {
+ ODPH_ERR("Error configuring worker threads\n");
+ return false;
+ }
+
+ return true;
+}
+
+static odp_bool_t setup_test(prog_config_t *config)
+{
+ odp_barrier_init(&config->init_barrier, config->num_thrs + 1);
+ odp_barrier_init(&config->term_barrier, config->num_thrs + 1);
+
+ if (!setup_copy(config))
+ return false;
+
+ if (!setup_pktios(config))
+ return false;
+
+ if (!setup_workers(config))
+ return false;
+
+ odp_barrier_wait(&config->init_barrier);
+
+ return true;
+}
+
+static void stop_test(prog_config_t *config)
+{
+ for (uint32_t i = 0U; i < config->num_ifs; ++i)
+ if (config->pktios[i].handle != ODP_PKTIO_INVALID)
+ (void)odp_pktio_stop(config->pktios[i].handle);
+
+ odp_barrier_wait(&config->term_barrier);
+ (void)odph_thread_join(config->thread_tbl, config->num_thrs);
+}
+
+static void teardown(prog_config_t *config)
+{
+ thread_config_t *thr;
+
+ for (uint32_t i = 0U; i < config->num_ifs; ++i) {
+ free(config->pktios[i].name);
+
+ if (config->pktios[i].handle != ODP_PKTIO_INVALID)
+ (void)odp_pktio_close(config->pktios[i].handle);
+ }
+
+ if (config->pktio_pool != ODP_POOL_INVALID)
+ (void)odp_pool_destroy(config->pktio_pool);
+
+ for (int i = 0; i < config->num_thrs; ++i) {
+ thr = &config->thread_config[i];
+
+ if (thr->inflight_stash != ODP_STASH_INVALID)
+ (void)odp_stash_destroy(thr->inflight_stash);
+
+ if (thr->compl_q != ODP_QUEUE_INVALID)
+ (void)odp_queue_destroy(thr->compl_q);
+
+ if (thr->compl_pool != ODP_POOL_INVALID)
+ (void)odp_pool_destroy(thr->compl_pool);
+
+ if (thr->dma_handle != ODP_DMA_INVALID)
+ (void)odp_dma_destroy(thr->dma_handle);
+ }
+
+ if (config->copy_pool != ODP_POOL_INVALID)
+ (void)odp_pool_destroy(config->copy_pool);
+
+ if (config->trs_pool != ODP_POOL_INVALID)
+ (void)odp_pool_destroy(config->trs_pool);
+}
+
+static void print_stats(const prog_config_t *config)
+{
+ const stats_t *stats;
+ const char *align1 = config->copy_type == DMA_COPY_EV ? " " : "";
+ const char *align2 = config->copy_type == SW_COPY ? " " :
+ config->copy_type == DMA_COPY_EV ? " " :
+ " ";
+
+ printf("\n==================\n\n"
+ "DMA forwarder done\n\n"
+ " copy mode: %s\n"
+ " burst size: %u\n"
+ " packet length: %u\n"
+ " max cache size: %u\n", config->copy_type == SW_COPY ? "SW" :
+ config->copy_type == DMA_COPY_EV ? "DMA-event" : "DMA-poll",
+ config->burst_size, config->pkt_len, config->cache_size);
+
+ for (int i = 0; i < config->num_thrs; ++i) {
+ stats = &config->thread_config[i].stats;
+
+ printf("\n worker %d:\n", i);
+
+ if (config->copy_type == SW_COPY) {
+ printf(" packet copy errors: %" PRIu64 "\n",
+ stats->copy_errs);
+ } else {
+ printf(" successful DMA transfers: %s%" PRIu64 "\n"
+ " DMA transfer start errors: %s%" PRIu64 "\n"
+ " DMA transfer errors: %s%" PRIu64 "\n"
+ " transfer buffer allocation errors: %s%" PRIu64 "\n"
+ " copy packet allocation errors: %s%" PRIu64 "\n",
+ align1, stats->trs, align1, stats->start_errs, align1,
+ stats->trs_errs, align1, stats->buf_alloc_errs, align1,
+ stats->pkt_alloc_errs);
+
+ if (config->copy_type == DMA_COPY_EV)
+ printf(" completion event allocation errors: %" PRIu64 "\n",
+ stats->compl_alloc_errs);
+ else
+ printf(" transfer ID allocation errors: %" PRIu64 "\n"
+ " transfer poll errors: %" PRIu64 "\n"
+ " transfers polled: %" PRIu64 "\n",
+ stats->compl_alloc_errs, stats->trs_poll_errs,
+ stats->trs_polled);
+ }
+
+ printf(" packets forwarded:%s%" PRIu64 "\n"
+ " packets dropped: %s%" PRIu64 "\n"
+ " call cycles per schedule round:\n"
+ " total: %" PRIu64 "\n"
+ " schedule: %" PRIu64 "\n"
+ " rounds: %" PRIu64 "\n", align2, stats->fwd_pkts, align2,
+ stats->discards, DIV_IF(stats->tot_cc, stats->sched_rounds),
+ DIV_IF(stats->sched_cc, stats->sched_rounds), stats->sched_rounds);
+ }
+
+ printf("\n==================\n");
+}
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t odph_opts;
+ odp_init_t init_param;
+ odp_instance_t odp_instance;
+ odp_shm_t shm_cfg = ODP_SHM_INVALID;
+ int ret = EXIT_SUCCESS;
+ parse_result_t parse_res;
+
+ argc = odph_parse_options(argc, argv);
+
+ if (odph_options(&odph_opts) == -1) {
+ ODPH_ERR("Error while reading ODP helper options, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = odph_opts.mem_model;
+
+ if (odp_init_global(&odp_instance, &init_param, NULL)) {
+ ODPH_ERR("ODP global init failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_init_local(odp_instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("ODP local init failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ shm_cfg = odp_shm_reserve(PROG_NAME "_cfg", sizeof(prog_config_t), ODP_CACHE_LINE_SIZE,
+ 0U);
+
+ if (shm_cfg == ODP_SHM_INVALID) {
+ ODPH_ERR("Error reserving shared memory\n");
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ prog_conf = odp_shm_addr(shm_cfg);
+
+ if (prog_conf == NULL) {
+ ODPH_ERR("Error resolving shared memory address\n");
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ parse_res = setup_program(argc, argv, prog_conf);
+
+ if (parse_res == PRS_NOK) {
+ ret = EXIT_FAILURE;
+ goto out_test;
+ }
+
+ if (parse_res == PRS_TERM) {
+ ret = EXIT_SUCCESS;
+ goto out_test;
+ }
+
+ if (parse_res == PRS_NOT_SUP) {
+ ret = EXIT_NOT_SUP;
+ goto out_test;
+ }
+
+ if (odp_schedule_config(NULL) < 0) {
+ ODPH_ERR("Error configuring scheduler\n");
+ ret = EXIT_FAILURE;
+ goto out_test;
+ }
+
+ prog_conf->odp_instance = odp_instance;
+ odp_atomic_init_u32(&prog_conf->is_running, 1U);
+
+ if (!setup_test(prog_conf)) {
+ ret = EXIT_FAILURE;
+ goto out_test;
+ }
+
+ if (prog_conf->time_sec > 0.001) {
+ struct timespec ts;
+
+ ts.tv_sec = prog_conf->time_sec;
+ ts.tv_nsec = (prog_conf->time_sec - ts.tv_sec) * ODP_TIME_SEC_IN_NS;
+ nanosleep(&ts, NULL);
+ odp_atomic_store_u32(&prog_conf->is_running, 0U);
+ } else {
+ while (odp_atomic_load_u32(&prog_conf->is_running))
+ sleep(1U);
+ }
+
+ stop_test(prog_conf);
+ print_stats(prog_conf);
+
+out_test:
+ teardown(prog_conf);
+
+out:
+ if (shm_cfg != ODP_SHM_INVALID)
+ (void)odp_shm_free(shm_cfg);
+
+ if (odp_term_local()) {
+ ODPH_ERR("ODP local terminate failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(odp_instance)) {
+ ODPH_ERR("ODP global terminate failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return ret;
+}
diff --git a/test/performance/odp_dmafwd_run.sh b/test/performance/odp_dmafwd_run.sh
new file mode 100755
index 000000000..ebb9b153a
--- /dev/null
+++ b/test/performance/odp_dmafwd_run.sh
@@ -0,0 +1,72 @@
+#!/bin/bash
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2023 Nokia
+
+TEST_DIR="${TEST_DIR:-$PWD}"
+TEST_SRC_DIR=$(dirname $0)
+PERF_TEST_DIR=platform/${ODP_PLATFORM}/test/performance
+PERF_TEST_DIR=${TEST_SRC_DIR}/../../${PERF_TEST_DIR}
+
+BIN_NAME=odp_dmafwd
+BATCH=10
+TIME=0.1
+TESTS_RUN=0
+
+check_env()
+{
+ if [ -f "./pktio_env" ]; then
+ . ./pktio_env
+ elif [ "${ODP_PLATFORM}" = "" ]; then
+ echo "$0: ERROR: ODP_PLATFORM must be defined"
+ exit 1
+ elif [ -f ${PERF_TEST_DIR}/dmafwd/pktio_env ]; then
+ . ${PERF_TEST_DIR}/dmafwd/pktio_env
+ else
+ echo "ERROR: unable to find pktio_env"
+ echo "pktio_env has to be in current directory or in platform/\$ODP_PLATFORM/test/"
+ echo "ODP_PLATFORM=\"${ODP_PLATFORM}\""
+ exit 1
+ fi
+}
+
+check_result()
+{
+ if [ $1 -eq 0 ]; then
+ TESTS_RUN=`expr $TESTS_RUN + 1`
+ elif [ $1 -eq 1 ]; then
+ echo "Test FAILED, exiting"
+ exit 1
+ else
+ echo "Test SKIPPED"
+ return 0
+ fi
+
+ validate_result
+}
+
+check_exit()
+{
+ if [ $TESTS_RUN -eq 0 ]; then
+ exit 77
+ fi
+
+ exit 0
+}
+
+check_env
+setup_interfaces
+echo "${BIN_NAME}: SW copy"
+echo "==================="
+./${BIN_NAME}${EXEEXT} -i ${IF0} -b ${BATCH} -T ${TIME} -t 0
+check_result $?
+echo "${BIN_NAME}: DMA copy event"
+echo "===================="
+./${BIN_NAME}${EXEEXT} -i ${IF0} -b ${BATCH} -T ${TIME} -t 1
+check_result $?
+echo "${BIN_NAME}: DMA copy poll"
+echo "===================="
+./${BIN_NAME}${EXEEXT} -i ${IF0} -b ${BATCH} -T ${TIME} -t 2
+check_result $?
+cleanup_interfaces
+check_exit
diff --git a/test/performance/odp_ipsec.c b/test/performance/odp_ipsec.c
new file mode 100644
index 000000000..3ea93ec96
--- /dev/null
+++ b/test/performance/odp_ipsec.c
@@ -0,0 +1,1422 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2022, Marvell
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_ipsec.c
+ *
+ * Performance test application for IPsec APIs
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif /* _GNU_SOURCE */
+
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include <inttypes.h>
+
+/** @def POOL_NUM_PKT
+ * Number of packets in the pool
+ */
+#define POOL_NUM_PKT 4096
+
+#define MAX_DEQUEUE_BURST 16
+
+static uint8_t test_salt[16] = "0123456789abcdef";
+
+static uint8_t test_key16[16] = { 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a,
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10,
+};
+
+static uint8_t test_key20[20] = { 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a,
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14,
+};
+
+static uint8_t test_key24[24] = { 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a,
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14,
+ 0x15, 0x16, 0x17, 0x18
+};
+
+static uint8_t test_key32[32] = { 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a,
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14,
+ 0x15, 0x16, 0x17, 0x18, 0x19,
+ 0x1a, 0x1b, 0x1c, 0x1d, 0x1e,
+ 0x1f, 0x20,
+};
+
+static uint8_t test_key64[64] = { 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a,
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14,
+ 0x15, 0x16, 0x17, 0x18, 0x19,
+ 0x1a, 0x1b, 0x1c, 0x1d, 0x1e,
+ 0x1f, 0x20, 0x21, 0x22, 0x23,
+ 0x24, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x4b, 0x2c, 0x2d,
+ 0x2e, 0x2f, 0x30, 0x31, 0x32,
+ 0x33, 0x34, 0x55, 0x36, 0x37,
+ 0x38, 0x39, 0x5a, 0x3b, 0x3c,
+ 0x3d, 0x3e, 0x5f, 0x40,
+};
+
+/**
+ * Structure that holds template for sa create call
+ * for different algorithms supported by test
+ */
+typedef struct {
+ const char *name; /**< Algorithm name */
+ odp_ipsec_crypto_param_t crypto; /**< Prefilled SA crypto param */
+} ipsec_alg_config_t;
+
+/**
+ * Parsed command line crypto arguments. Describes test configuration.
+ */
+typedef struct {
+ /**
+ * If non zero prints content of packets. Enabled by -d or
+ * --debug option.
+ */
+ int debug_packets;
+
+ /**
+ * Maximum number of outstanding encryption requests. Note code
+ * poll for results over queue and if nothing is available it can
+ * submit more encryption requests up to maximum number specified by
+ * this option. Specified through -f or --flight option.
+ */
+ int in_flight;
+
+ /**
+ * Number of packets to be IPsec processed to get good average number.
+ * Specified through -c or --count option.
+ * Default is 10000.
+ */
+ int packet_count;
+
+ /**
+ * Payload size to test. If 0 set of predefined payload sizes
+ * is tested. Specified through -p or --payload option.
+ */
+ unsigned int payload_length;
+
+ /**
+ * Pointer to selected algorithm to test. If NULL all available
+ * alogorthims are tested. Name of algorithm is passed through
+ * -a or --algorithm option.
+ */
+ ipsec_alg_config_t *alg_config;
+
+ /**
+ * Use scheduler to get completion events from crypto operation.
+ * Specified through -s argument.
+ * */
+ int schedule;
+
+ /*
+ * Poll completion queue for crypto completion events.
+ * Specified through -p argument.
+ */
+ int poll;
+
+ /*
+ * Use tunnel instead of transport mode.
+ * Specified through -t argument.
+ */
+ int tunnel;
+
+ /*
+ * Use AH transformation.
+ * Specified through -u argument.
+ */
+ int ah;
+
+ /*
+ * Burst size.
+ * Prepare and submit as many packets for IPsec processing in each
+ * iteration of the loop.
+ */
+ int burst_size;
+
+ /*
+ * Use vector packet completion from IPsec APIs.
+ * Specified through -v or --vector argument.
+ */
+ uint32_t vec_pkt_size;
+} ipsec_args_t;
+
+/*
+ * Helper structure that holds averages for test of one algorithm
+ * for given payload size.
+ */
+typedef struct {
+ /**
+ * Elapsed time for one crypto operation.
+ */
+ double elapsed;
+
+ /**
+ * CPU time spent pre one crypto operation by whole process
+ * i.e include current and all other threads in process.
+ * It is filled with 'getrusage(RUSAGE_SELF, ...)' call.
+ */
+ double rusage_self;
+
+ /**
+ * CPU time spent per one crypto operation by current thread
+ * only. It is filled with 'getrusage(RUSAGE_THREAD, ...)'
+ * call.
+ */
+ double rusage_thread;
+} ipsec_run_result_t;
+
+/**
+ * Structure holds one snap to misc times of current process.
+ */
+typedef struct {
+ struct timeval tv; /**< Elapsed time */
+ struct rusage ru_self; /**< Rusage value for whole process */
+ struct rusage ru_thread; /**< Rusage value for current thread */
+} time_record_t;
+
+/**
+ * Set of predefined payloads.
+ */
+static unsigned int global_payloads[] = {
+ 64,
+ 256,
+ 1024,
+ 8192,
+ 16384
+};
+
+/** Number of payloads used in the test */
+static unsigned int global_num_payloads;
+
+/**
+ * Set of known algorithms to test
+ */
+static ipsec_alg_config_t algs_config[] = {
+ {
+ .name = "3des-cbc-null",
+ .crypto = {
+ .cipher_alg = ODP_CIPHER_ALG_3DES_CBC,
+ .cipher_key = {
+ .data = test_key24,
+ .length = sizeof(test_key24)
+ },
+ .auth_alg = ODP_AUTH_ALG_NULL
+ },
+ },
+ {
+ .name = "3des-cbc-hmac-md5-96",
+ .crypto = {
+ .cipher_alg = ODP_CIPHER_ALG_3DES_CBC,
+ .cipher_key = {
+ .data = test_key24,
+ .length = sizeof(test_key24)
+ },
+ .auth_alg = ODP_AUTH_ALG_MD5_HMAC,
+ .auth_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ },
+ },
+ {
+ .name = "null-hmac-md5-96",
+ .crypto = {
+ .cipher_alg = ODP_CIPHER_ALG_NULL,
+ .auth_alg = ODP_AUTH_ALG_MD5_HMAC,
+ .auth_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ },
+ },
+ {
+ .name = "aes-cbc-null",
+ .crypto = {
+ .cipher_alg = ODP_CIPHER_ALG_AES_CBC,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .auth_alg = ODP_AUTH_ALG_NULL
+ },
+ },
+ {
+ .name = "aes-cbc-hmac-sha1-96",
+ .crypto = {
+ .cipher_alg = ODP_CIPHER_ALG_AES_CBC,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .auth_alg = ODP_AUTH_ALG_SHA1_HMAC,
+ .auth_key = {
+ .data = test_key20,
+ .length = sizeof(test_key20)
+ },
+ },
+ },
+ {
+ .name = "aes-ctr-null",
+ .crypto = {
+ .cipher_alg = ODP_CIPHER_ALG_AES_CTR,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .cipher_key_extra = {
+ .data = test_salt,
+ .length = 4,
+ },
+ .auth_alg = ODP_AUTH_ALG_NULL
+ },
+ },
+ {
+ .name = "aes-ctr-hmac-sha1-96",
+ .crypto = {
+ .cipher_alg = ODP_CIPHER_ALG_AES_CTR,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .cipher_key_extra = {
+ .data = test_salt,
+ .length = 4,
+ },
+ .auth_alg = ODP_AUTH_ALG_SHA1_HMAC,
+ .auth_key = {
+ .data = test_key20,
+ .length = sizeof(test_key20)
+ },
+ },
+ },
+ {
+ .name = "null-hmac-sha1-96",
+ .crypto = {
+ .cipher_alg = ODP_CIPHER_ALG_NULL,
+ .auth_alg = ODP_AUTH_ALG_SHA1_HMAC,
+ .auth_key = {
+ .data = test_key20,
+ .length = sizeof(test_key20)
+ },
+ },
+ },
+ {
+ .name = "null-hmac-sha256-128",
+ .crypto = {
+ .cipher_alg = ODP_CIPHER_ALG_NULL,
+ .auth_alg = ODP_AUTH_ALG_SHA256_HMAC,
+ .auth_key = {
+ .data = test_key32,
+ .length = sizeof(test_key32)
+ },
+ },
+ },
+ {
+ .name = "null-hmac-sha512-256",
+ .crypto = {
+ .cipher_alg = ODP_CIPHER_ALG_NULL,
+ .auth_alg = ODP_AUTH_ALG_SHA512_HMAC,
+ .auth_key = {
+ .data = test_key64,
+ .length = sizeof(test_key64)
+ },
+ },
+ },
+ {
+ .name = "null-aes-gmac",
+ .crypto = {
+ .cipher_alg = ODP_CIPHER_ALG_NULL,
+ .auth_alg = ODP_AUTH_ALG_AES_GMAC,
+ .auth_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .auth_key_extra = {
+ .data = test_salt,
+ .length = 4,
+ },
+ },
+ },
+ {
+ .name = "aes-gcm",
+ .crypto = {
+ .cipher_alg = ODP_CIPHER_ALG_AES_GCM,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .cipher_key_extra = {
+ .data = test_salt,
+ .length = 4,
+ },
+ .auth_alg = ODP_AUTH_ALG_AES_GCM,
+ },
+ },
+ {
+ .name = "aes-ccm",
+ .crypto = {
+ .cipher_alg = ODP_CIPHER_ALG_AES_CCM,
+ .cipher_key = {
+ .data = test_key16,
+ .length = sizeof(test_key16)
+ },
+ .cipher_key_extra = {
+ .data = test_salt,
+ .length = 3,
+ },
+ .auth_alg = ODP_AUTH_ALG_AES_CCM,
+ },
+ },
+ {
+ .name = "chacha20-poly1305",
+ .crypto = {
+ .cipher_alg = ODP_CIPHER_ALG_CHACHA20_POLY1305,
+ .cipher_key = {
+ .data = test_key32,
+ .length = sizeof(test_key32)
+ },
+ .cipher_key_extra = {
+ .data = test_salt,
+ .length = 4,
+ },
+ .auth_alg = ODP_AUTH_ALG_CHACHA20_POLY1305,
+ },
+ },
+};
+
+/**
+ * Find corresponding config for given name. Returns NULL
+ * if config for given name is not found.
+ */
+static ipsec_alg_config_t *
+find_config_by_name(const char *name)
+{
+ unsigned int i;
+ ipsec_alg_config_t *ret = NULL;
+
+ for (i = 0; i < ODPH_ARRAY_SIZE(algs_config); i++) {
+ if (strcmp(algs_config[i].name, name) == 0) {
+ ret = algs_config + i;
+ break;
+ }
+ }
+ return ret;
+}
+
+/**
+ * Helper function that prints list of algorithms that this
+ * test understands.
+ */
+static void
+print_config_names(const char *prefix)
+{
+ unsigned int i;
+
+ for (i = 0; i < ODPH_ARRAY_SIZE(algs_config); i++)
+ printf("%s %s\n", prefix, algs_config[i].name);
+}
+
+/**
+ * Snap current time values and put them into 'rec'.
+ */
+static void
+fill_time_record(time_record_t *rec)
+{
+ gettimeofday(&rec->tv, NULL);
+ getrusage(RUSAGE_SELF, &rec->ru_self);
+ getrusage(RUSAGE_THREAD, &rec->ru_thread);
+}
+
+/**
+ * Calculated CPU time difference for given two rusage structures.
+ * Note it adds user space and system time together.
+ */
+static unsigned long long
+get_rusage_diff(struct rusage *start, struct rusage *end)
+{
+ unsigned long long rusage_diff;
+ unsigned long long rusage_start;
+ unsigned long long rusage_end;
+
+ rusage_start = (start->ru_utime.tv_sec * 1000000) +
+ (start->ru_utime.tv_usec);
+ rusage_start += (start->ru_stime.tv_sec * 1000000) +
+ (start->ru_stime.tv_usec);
+
+ rusage_end = (end->ru_utime.tv_sec * 1000000) +
+ (end->ru_utime.tv_usec);
+ rusage_end += (end->ru_stime.tv_sec * 1000000) +
+ (end->ru_stime.tv_usec);
+
+ rusage_diff = rusage_end - rusage_start;
+
+ return rusage_diff;
+}
+
+/**
+ * Get diff for RUSAGE_SELF (whole process) between two time snap
+ * records.
+ */
+static unsigned long long
+get_rusage_self_diff(time_record_t *start, time_record_t *end)
+{
+ return get_rusage_diff(&start->ru_self, &end->ru_self);
+}
+
+/**
+ * Get diff for RUSAGE_THREAD (current thread only) between two
+ * time snap records.
+ */
+static unsigned long long
+get_rusage_thread_diff(time_record_t *start, time_record_t *end)
+{
+ return get_rusage_diff(&start->ru_thread, &end->ru_thread);
+}
+
+/**
+ * Get diff of elapsed time between two time snap records
+ */
+static unsigned long long
+get_elapsed_usec(time_record_t *start, time_record_t *end)
+{
+ unsigned long long s;
+ unsigned long long e;
+
+ s = (start->tv.tv_sec * 1000000) + (start->tv.tv_usec);
+ e = (end->tv.tv_sec * 1000000) + (end->tv.tv_usec);
+
+ return e - s;
+}
+
+/**
+ * Print header line for our report.
+ */
+static void
+print_result_header(void)
+{
+ printf("\n%30.30s %15s %15s %15s %15s %15s %15s\n",
+ "algorithm", "avg over #", "payload (bytes)", "elapsed (us)",
+ "rusg self (us)", "rusg thrd (us)", "throughput (Kb)");
+}
+
+/**
+ * Print one line of our report.
+ */
+static void
+print_result(ipsec_args_t *cargs,
+ unsigned int payload_length,
+ ipsec_alg_config_t *config,
+ ipsec_run_result_t *result)
+{
+ unsigned int throughput;
+
+ throughput = (1000000.0 / result->elapsed) * payload_length / 1024;
+ printf("%30.30s %15d %15d %15.3f %15.3f %15.3f %15d\n",
+ config->name, cargs->packet_count, payload_length,
+ result->elapsed, result->rusage_self, result->rusage_thread,
+ throughput);
+}
+
+#define IPV4ADDR(a, b, c, d) odp_cpu_to_be_32((a << 24) | \
+ (b << 16) | \
+ (c << 8) | \
+ (d << 0))
+
+/**
+ * Create ODP IPsec SA for given config.
+ */
+static odp_ipsec_sa_t
+create_sa_from_config(ipsec_alg_config_t *config,
+ ipsec_args_t *cargs)
+{
+ odp_ipsec_sa_param_t param;
+ odp_queue_t out_queue;
+
+ odp_ipsec_sa_param_init(&param);
+ memcpy(&param.crypto, &config->crypto,
+ sizeof(odp_ipsec_crypto_param_t));
+
+ param.proto = ODP_IPSEC_ESP;
+ param.dir = ODP_IPSEC_DIR_OUTBOUND;
+
+ if (cargs->tunnel) {
+ uint32_t src = IPV4ADDR(10, 0, 111, 2);
+ uint32_t dst = IPV4ADDR(10, 0, 222, 2);
+ odp_ipsec_tunnel_param_t tunnel;
+
+ memset(&tunnel, 0, sizeof(tunnel));
+ tunnel.type = ODP_IPSEC_TUNNEL_IPV4;
+ tunnel.ipv4.src_addr = &src;
+ tunnel.ipv4.dst_addr = &dst;
+ tunnel.ipv4.ttl = 64;
+
+ param.mode = ODP_IPSEC_MODE_TUNNEL;
+ param.outbound.tunnel = tunnel;
+ } else {
+ param.mode = ODP_IPSEC_MODE_TRANSPORT;
+ }
+
+ if (cargs->schedule || cargs->poll) {
+ out_queue = odp_queue_lookup("ipsec-out");
+ if (out_queue == ODP_QUEUE_INVALID) {
+ ODPH_ERR("ipsec-out queue not found\n");
+ return ODP_IPSEC_SA_INVALID;
+ }
+ param.dest_queue = out_queue;
+ } else {
+ param.dest_queue = ODP_QUEUE_INVALID;
+ }
+
+ return odp_ipsec_sa_create(&param);
+}
+
+static uint8_t test_data[] = {
+ /* IP */
+ 0x45, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x01, 0xac, 0x27, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00
+};
+
+static inline void debug_packets(int debug, odp_packet_t *pkt, int num_pkts)
+{
+ if (odp_likely(!debug))
+ return;
+ for (int i = 0; i < num_pkts; i++)
+ odp_packet_print_data(pkt[i], 0, odp_packet_len(pkt[i]));
+}
+
+static int
+make_packet_multi(odp_pool_t pkt_pool, unsigned int payload_length,
+ odp_packet_t pkt[], int num)
+{
+ int i, ret;
+
+ ret = odp_packet_alloc_multi(pkt_pool, payload_length, pkt, num);
+ if (ret != num) {
+ ODPH_ERR("Could not allocate buffer\n");
+ if (ret > 0)
+ odp_packet_free_sp(pkt, ret);
+ return -1;
+ }
+
+ for (i = 0; i < num; i++) {
+ odp_packet_copy_from_mem(pkt[i], 0, sizeof(test_data), test_data);
+ odp_packet_l3_offset_set(pkt[i], 0);
+
+ uint8_t *mem = odp_packet_data(pkt[i]);
+ ((odph_ipv4hdr_t *)mem)->tot_len = odp_cpu_to_be_16(payload_length);
+ memset(mem + sizeof(test_data), 1, payload_length - sizeof(test_data));
+ }
+
+ return 0;
+}
+
+static inline void check_ipsec_result(odp_packet_t ipsec_pkt)
+{
+ odp_ipsec_packet_result_t result;
+
+ if (odp_unlikely(odp_ipsec_result(&result, ipsec_pkt)))
+ ODPH_ERR("odp_ipsec_result() failed\n");
+ else if (odp_unlikely(result.status.error.all))
+ ODPH_ERR("IPsec processing error: %" PRIu32 "\n",
+ result.status.error.all);
+}
+
+/**
+ * Run measurement iterations for given config and payload size.
+ * Result of run returned in 'result' out parameter.
+ */
+static int
+run_measure_one(ipsec_args_t *cargs,
+ odp_ipsec_sa_t sa,
+ unsigned int payload_length,
+ time_record_t *start,
+ time_record_t *end)
+{
+ int in_flight, pkts_allowed, num_out, num_pkts, rc = 0;
+ const int max_in_flight = cargs->in_flight;
+ const int burst_size = cargs->burst_size;
+ const int packet_count = cargs->packet_count;
+ const int debug = cargs->debug_packets;
+ odp_ipsec_out_param_t param;
+ odp_pool_t pkt_pool;
+
+ pkt_pool = odp_pool_lookup("packet_pool");
+ if (pkt_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("pkt_pool not found\n");
+ return -1;
+ }
+
+ if (payload_length < sizeof(test_data))
+ return -1;
+
+ int packets_sent = 0;
+ int packets_received = 0;
+
+ /* Initialize parameters block */
+ memset(&param, 0, sizeof(param));
+ param.num_sa = 1;
+ param.num_opt = 0;
+ param.sa = &sa;
+
+ fill_time_record(start);
+
+ while ((packets_sent < packet_count) ||
+ (packets_received < packet_count)) {
+ num_pkts = packet_count - packets_sent;
+
+ /* Enqueue up to burst size */
+ num_pkts = num_pkts > burst_size ? burst_size : num_pkts;
+
+ /* Enqueue up to (max in flight - current in flight) */
+ in_flight = packets_sent - packets_received;
+ pkts_allowed = max_in_flight - in_flight;
+
+ /* Enqueue either a burst of packets or skip */
+ num_pkts = num_pkts > pkts_allowed ? 0 : num_pkts;
+
+ if (odp_likely(num_pkts)) {
+ odp_packet_t out_pkt[num_pkts];
+ odp_packet_t pkt[num_pkts];
+ int i;
+
+ if (odp_unlikely(make_packet_multi(pkt_pool,
+ payload_length,
+ pkt,
+ num_pkts)))
+ return -1;
+
+ debug_packets(debug, pkt, num_pkts);
+ num_out = num_pkts;
+
+ rc = odp_ipsec_out(pkt, num_pkts,
+ out_pkt, &num_out,
+ &param);
+ if (odp_unlikely(rc <= 0)) {
+ ODPH_ERR("Failed odp_ipsec_out: rc = %d\n", rc);
+ odp_packet_free_sp(pkt, num_pkts);
+ break;
+ }
+
+ for (i = 0; i < num_out; i++)
+ check_ipsec_result(out_pkt[i]);
+
+ packets_sent += rc;
+ packets_received += num_out;
+ debug_packets(debug, out_pkt, num_out);
+
+ if (odp_unlikely(rc != num_pkts))
+ odp_packet_free_sp(&pkt[rc], num_pkts - rc);
+ odp_packet_free_sp(out_pkt, num_out);
+ }
+ }
+
+ fill_time_record(end);
+
+ return rc < 0 ? rc : 0;
+}
+
+static uint32_t dequeue_burst(odp_queue_t polled_queue,
+ odp_event_t *events,
+ int max_burst)
+{
+ int num = 0;
+
+ if (polled_queue != ODP_QUEUE_INVALID) {
+ int rc = odp_queue_deq_multi(polled_queue,
+ events,
+ max_burst);
+ num = odp_likely(rc >= 0) ? rc : 0;
+ } else {
+ num = odp_schedule_multi(NULL,
+ ODP_SCHED_NO_WAIT,
+ events,
+ max_burst);
+ }
+ return num;
+}
+
+static inline uint32_t vec_pkt_handle(int debug, odp_event_t ev)
+{
+ odp_packet_vector_t vec = odp_packet_vector_from_event(ev);
+ uint32_t vec_size = odp_packet_vector_size(vec);
+ odp_packet_t *pkt_tbl;
+ uint32_t j;
+
+ odp_packet_vector_tbl(vec, &pkt_tbl);
+
+ for (j = 0; j < vec_size; j++)
+ check_ipsec_result(pkt_tbl[j]);
+
+ debug_packets(debug, pkt_tbl, vec_size);
+
+ odp_packet_free_sp(pkt_tbl, vec_size);
+ odp_packet_vector_free(vec);
+
+ return vec_size;
+}
+
+static int
+run_measure_one_async(ipsec_args_t *cargs,
+ odp_ipsec_sa_t sa,
+ unsigned int payload_length,
+ time_record_t *start,
+ time_record_t *end)
+{
+ int in_flight, packets_allowed, num_pkts, rc = 0;
+ const int max_in_flight = cargs->in_flight;
+ const int burst_size = cargs->burst_size;
+ const int packet_count = cargs->packet_count;
+ const int debug = cargs->debug_packets;
+ odp_ipsec_out_param_t param;
+ odp_pool_t pkt_pool;
+ odp_queue_t polled_queue = ODP_QUEUE_INVALID;
+
+ pkt_pool = odp_pool_lookup("packet_pool");
+ if (pkt_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("pkt_pool not found\n");
+ return -1;
+ }
+
+ if (cargs->poll) {
+ polled_queue = odp_queue_lookup("ipsec-out");
+ if (polled_queue == ODP_QUEUE_INVALID) {
+ ODPH_ERR("ipsec-out queue not found\n");
+ return -1;
+ }
+ }
+
+ if (payload_length < sizeof(test_data))
+ return -1;
+
+ int packets_sent = 0;
+ int packets_received = 0;
+
+ /* Initialize parameters block */
+ memset(&param, 0, sizeof(param));
+ param.num_sa = 1;
+ param.num_opt = 0;
+ param.sa = &sa;
+
+ fill_time_record(start);
+
+ while ((packets_sent < packet_count) ||
+ (packets_received < packet_count)) {
+
+ num_pkts = packet_count - packets_sent;
+
+ /* Enqueue up to burst size */
+ num_pkts = num_pkts > burst_size ? burst_size : num_pkts;
+
+ /* Enqueue up to (max in flight - current in flight) */
+ in_flight = packets_sent - packets_received;
+ packets_allowed = max_in_flight - in_flight;
+
+ if (num_pkts > 0 && num_pkts <= packets_allowed) {
+ odp_packet_t pkt[num_pkts];
+
+ if (odp_unlikely(make_packet_multi(pkt_pool,
+ payload_length,
+ pkt,
+ num_pkts)))
+ return -1;
+
+ debug_packets(debug, pkt, num_pkts);
+
+ rc = odp_ipsec_out_enq(pkt, num_pkts, &param);
+ if (odp_unlikely(rc <= 0)) {
+ ODPH_ERR("Failed odp_ipsec_out_enq: rc = %d\n",
+ rc);
+ odp_packet_free_sp(pkt, num_pkts);
+ break;
+ }
+
+ if (odp_unlikely(rc != num_pkts))
+ odp_packet_free_sp(&pkt[rc], num_pkts - rc);
+
+ packets_sent += rc;
+ } else {
+ odp_packet_t pkt_out[max_in_flight];
+ int i = 0;
+
+ /*
+ * Dequeue packets until we can enqueue the next burst
+ * or until we have received all remaining packets
+ * when there are no more packets to be sent.
+ */
+ while (num_pkts > packets_allowed ||
+ (num_pkts == 0 && packets_received < packet_count)) {
+ odp_event_t events[MAX_DEQUEUE_BURST];
+ uint32_t num;
+
+ num = dequeue_burst(polled_queue, events, MAX_DEQUEUE_BURST);
+
+ for (uint32_t n = 0; n < num; n++) {
+ if (odp_event_type(events[n]) == ODP_EVENT_PACKET_VECTOR) {
+ uint32_t vec_size;
+
+ vec_size = vec_pkt_handle(debug, events[n]);
+ packets_received += vec_size - 1;
+ packets_allowed += vec_size - 1;
+ } else {
+ pkt_out[i] = odp_ipsec_packet_from_event(events[n]);
+ check_ipsec_result(pkt_out[i]);
+ i++;
+ }
+
+ }
+ packets_received += num;
+ packets_allowed += num;
+ }
+ debug_packets(debug, pkt_out, i);
+
+ if (i > 0)
+ odp_packet_free_sp(pkt_out, i);
+ }
+ }
+
+ fill_time_record(end);
+
+ return rc < 0 ? rc : 0;
+}
+
+/**
+ * Process one algorithm. Note if paload size is specicified it is
+ * only one run. Or iterate over set of predefined payloads.
+ */
+static int
+run_measure_one_config(ipsec_args_t *cargs,
+ ipsec_alg_config_t *config)
+{
+ unsigned int num_payloads = global_num_payloads;
+ unsigned int *payloads = global_payloads;
+ odp_ipsec_capability_t capa;
+ odp_ipsec_sa_t sa;
+ unsigned int i;
+ int rc = 0;
+
+ if (odp_ipsec_capability(&capa) < 0) {
+ ODPH_ERR("IPSEC capability call failed.\n");
+ return -1;
+ }
+
+ if (cargs->ah && (ODP_SUPPORT_NO == capa.proto_ah)) {
+ ODPH_ERR("IPSEC AH protocol not supported.\n");
+ return -1;
+ }
+
+ rc = odph_ipsec_alg_check(&capa, config->crypto.cipher_alg,
+ config->crypto.cipher_key.length,
+ config->crypto.auth_alg,
+ config->crypto.auth_key.length);
+
+ if (rc) {
+ printf(" => %s skipped\n\n", config->name);
+ return 0;
+ }
+
+ sa = create_sa_from_config(config, cargs);
+ if (sa == ODP_IPSEC_SA_INVALID) {
+ ODPH_ERR("IPsec SA create failed.\n");
+ return -1;
+ }
+
+ print_result_header();
+ if (cargs->payload_length) {
+ num_payloads = 1;
+ payloads = &cargs->payload_length;
+ }
+
+ for (i = 0; i < num_payloads; i++) {
+ double count;
+ ipsec_run_result_t result;
+ time_record_t start, end;
+
+ if (cargs->schedule || cargs->poll)
+ rc = run_measure_one_async(cargs, sa,
+ payloads[i],
+ &start, &end);
+ else
+ rc = run_measure_one(cargs, sa,
+ payloads[i],
+ &start, &end);
+ if (rc)
+ break;
+
+ count = get_elapsed_usec(&start, &end);
+ result.elapsed = count / cargs->packet_count;
+
+ count = get_rusage_self_diff(&start, &end);
+ result.rusage_self = count / cargs->packet_count;
+
+ count = get_rusage_thread_diff(&start, &end);
+ result.rusage_thread = count / cargs->packet_count;
+
+ print_result(cargs, payloads[i],
+ config, &result);
+ }
+
+ odp_ipsec_sa_disable(sa);
+ if (cargs->schedule || cargs->poll) {
+ odp_queue_t out_queue = odp_queue_lookup("ipsec-out");
+ odp_ipsec_status_t status;
+
+ while (1) {
+ odp_event_t event;
+
+ if (cargs->poll)
+ event = odp_queue_deq(out_queue);
+ else
+ event = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (event != ODP_EVENT_INVALID &&
+ odp_event_type(event) == ODP_EVENT_IPSEC_STATUS &&
+ odp_ipsec_status(&status, event) == ODP_IPSEC_OK &&
+ status.id == ODP_IPSEC_STATUS_SA_DISABLE &&
+ status.sa == sa)
+ break;
+ }
+ }
+ odp_ipsec_sa_destroy(sa);
+
+ return rc;
+}
+
+typedef struct thr_arg {
+ ipsec_args_t ipsec_args;
+ ipsec_alg_config_t *ipsec_alg_config;
+} thr_arg_t;
+
+static int run_thr_func(void *arg)
+{
+ thr_arg_t *thr_args = (thr_arg_t *)arg;
+
+ run_measure_one_config(&thr_args->ipsec_args,
+ thr_args->ipsec_alg_config);
+ return 0;
+}
+
+/**
+ * Print usage information
+ */
+static void usage(char *progname)
+{
+ printf("\n"
+ "Usage: %s OPTIONS\n"
+ " E.g. %s -i 100000\n"
+ "\n"
+ "OpenDataPlane crypto speed measure.\n"
+ "Optional OPTIONS\n"
+ " -a, --algorithm <name> Specify algorithm name (default all)\n"
+ " Supported values are:\n",
+ progname, progname);
+
+ print_config_names(" ");
+ printf(" -d, --debug Enable dump of processed packets.\n"
+ " -f, --flight <number> Max number of packet processed in parallel (default 1)\n"
+ " -c, --count <number> Number of packets (default 10000)\n"
+ " -b, --burst <number> Number of packets in one IPsec API submission (default 1)\n"
+ " -v, --vector <number> Enable vector packet completion from IPsec APIs with specified vector size.\n"
+ " -l, --payload Payload length.\n"
+ " -s, --schedule Use scheduler for completion events.\n"
+ " -p, --poll Poll completion queue for completion events.\n"
+ " -t, --tunnel Use tunnel-mode IPsec transformation.\n"
+ " -u, --ah Use AH transformation instead of ESP.\n"
+ " -h, --help Display help and exit.\n"
+ "\n");
+}
+
+static void parse_args(int argc, char *argv[], ipsec_args_t *cargs)
+{
+ int opt;
+ int long_index;
+ static const struct option longopts[] = {
+ {"algorithm", optional_argument, NULL, 'a'},
+ {"debug", no_argument, NULL, 'd'},
+ {"flight", optional_argument, NULL, 'f'},
+ {"help", no_argument, NULL, 'h'},
+ {"count", optional_argument, NULL, 'c'},
+ {"burst", optional_argument, NULL, 'b'},
+ {"vector", optional_argument, NULL, 'v'},
+ {"payload", optional_argument, NULL, 'l'},
+ {"sessions", optional_argument, NULL, 'm'},
+ {"poll", no_argument, NULL, 'p'},
+ {"schedule", no_argument, NULL, 's'},
+ {"tunnel", no_argument, NULL, 't'},
+ {"ah", no_argument, NULL, 'u'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "+a:b:c:df:hm:nl:sptuv:";
+
+ cargs->in_flight = 1;
+ cargs->debug_packets = 0;
+ cargs->packet_count = 10000;
+ cargs->burst_size = 1;
+ cargs->vec_pkt_size = 0;
+ cargs->payload_length = 0;
+ cargs->alg_config = NULL;
+ cargs->schedule = 0;
+ cargs->ah = 0;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break; /* No more options */
+
+ switch (opt) {
+ case 'a':
+ cargs->alg_config = find_config_by_name(optarg);
+ if (!cargs->alg_config) {
+ printf("cannot test crypto '%s' configuration\n",
+ optarg);
+ usage(argv[0]);
+ exit(-1);
+ }
+ break;
+ case 'd':
+ cargs->debug_packets = 1;
+ break;
+ case 'c':
+ cargs->packet_count = atoi(optarg);
+ break;
+ case 'b':
+ if (optarg == NULL)
+ cargs->burst_size = 32;
+ else
+ cargs->burst_size = atoi(optarg);
+ if (cargs->burst_size > POOL_NUM_PKT) {
+ printf("Invalid burst size (max allowed: %d)\n", POOL_NUM_PKT);
+ exit(-1);
+ }
+ break;
+ case 'v':
+ if (optarg == NULL)
+ cargs->vec_pkt_size = 32;
+ else
+ cargs->vec_pkt_size = atoi(optarg);
+ break;
+ case 'f':
+ cargs->in_flight = atoi(optarg);
+ break;
+ case 'h':
+ usage(argv[0]);
+ exit(EXIT_SUCCESS);
+ break;
+ case 'l':
+ cargs->payload_length = atoi(optarg);
+ break;
+ case 's':
+ cargs->schedule = 1;
+ break;
+ case 'p':
+ cargs->poll = 1;
+ break;
+ case 't':
+ cargs->tunnel = 1;
+ break;
+ case 'u':
+ cargs->ah = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (cargs->in_flight < cargs->burst_size) {
+ printf("-f (flight) must be greater than or equal to -b (burst)\n");
+ exit(-1);
+ }
+
+ optind = 1; /* reset 'extern optind' from the getopt lib */
+
+ if (cargs->schedule && cargs->poll) {
+ printf("-s (schedule) and -p (poll) options are not compatible\n");
+ usage(argv[0]);
+ exit(-1);
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ odp_pool_t vec_pool = ODP_POOL_INVALID;
+ ipsec_args_t cargs;
+ odp_pool_t pool;
+ odp_queue_param_t qparam;
+ odp_pool_param_t param;
+ odp_queue_t out_queue = ODP_QUEUE_INVALID;
+ thr_arg_t thr_arg;
+ odp_cpumask_t cpumask;
+ char cpumaskstr[ODP_CPUMASK_STR_SIZE];
+ int num_workers = 1;
+ odph_helper_options_t helper_options;
+ odph_thread_t thread_tbl[num_workers];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
+ odp_instance_t instance;
+ odp_init_t init_param;
+ odp_ipsec_capability_t ipsec_capa;
+ odp_pool_capability_t capa;
+ odp_ipsec_config_t config;
+ uint32_t max_seg_len;
+ unsigned int i;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ memset(&cargs, 0, sizeof(cargs));
+
+ /* Parse and store the application arguments */
+ parse_args(argc, argv, &cargs);
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init_param, NULL)) {
+ ODPH_ERR("ODP global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_WORKER)) {
+ ODPH_ERR("ODP local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_sys_info_print();
+
+ if (odp_pool_capability(&capa)) {
+ ODPH_ERR("Pool capability request failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ max_seg_len = capa.pkt.max_seg_len;
+
+ for (i = 0; i < ODPH_ARRAY_SIZE(global_payloads); i++) {
+ if (global_payloads[i] > max_seg_len)
+ break;
+ }
+
+ global_num_payloads = i;
+
+ /* Create packet pool */
+ odp_pool_param_init(&param);
+ param.pkt.seg_len = max_seg_len;
+ param.pkt.len = max_seg_len;
+ param.pkt.num = POOL_NUM_PKT;
+ param.type = ODP_POOL_PACKET;
+ pool = odp_pool_create("packet_pool", &param);
+
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_ERR("packet pool create failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ odp_pool_print(pool);
+
+ if (odp_ipsec_capability(&ipsec_capa) < 0) {
+ ODPH_ERR("IPSEC capability call failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (cargs.schedule && !ipsec_capa.queue_type_sched) {
+ ODPH_ERR("Scheduled type destination queue not supported.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (cargs.poll && !ipsec_capa.queue_type_plain) {
+ ODPH_ERR("Plain type destination queue not supported.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (cargs.vec_pkt_size) {
+ if (capa.vector.max_pools < 1) {
+ ODPH_ERR("Vector packet pool not available");
+ exit(EXIT_FAILURE);
+ }
+
+ if (!ipsec_capa.vector.supported) {
+ ODPH_ERR("Vector packet completion not supported by IPsec.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (capa.vector.max_size < cargs.vec_pkt_size) {
+ ODPH_ERR("Vector size larger than max size supported by vector pool.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (!cargs.schedule && !cargs.poll) {
+ ODPH_ERR("Vector packet is not supported with sync APIs.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Create vector pool */
+ odp_pool_param_init(&param);
+ param.vector.num = POOL_NUM_PKT;
+ param.vector.max_size = cargs.vec_pkt_size;
+ param.type = ODP_POOL_VECTOR;
+ vec_pool = odp_pool_create("vector_pool", &param);
+
+ if (vec_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Vector packet pool create failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_pool_print(vec_pool);
+ }
+
+ odp_ipsec_config_init(&config);
+ config.max_num_sa = 2;
+ config.inbound.chksums.all_chksum = 0;
+ config.outbound.all_chksum = 0;
+
+ if (vec_pool != ODP_POOL_INVALID) {
+ config.vector.enable = true;
+ config.vector.pool = vec_pool;
+ config.vector.max_size = cargs.vec_pkt_size;
+ config.vector.max_tmo_ns = ipsec_capa.vector.max_tmo_ns;
+ }
+
+ odp_queue_param_init(&qparam);
+ if (cargs.schedule) {
+ odp_schedule_config(NULL);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.prio = odp_schedule_default_prio();
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qparam.sched.group = ODP_SCHED_GROUP_ALL;
+ out_queue = odp_queue_create("ipsec-out", &qparam);
+ } else if (cargs.poll) {
+ qparam.type = ODP_QUEUE_TYPE_PLAIN;
+ out_queue = odp_queue_create("ipsec-out", &qparam);
+ }
+ if (cargs.schedule || cargs.poll) {
+ if (out_queue == ODP_QUEUE_INVALID) {
+ ODPH_ERR("ipsec-out queue create failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ config.inbound_mode = ODP_IPSEC_OP_MODE_ASYNC;
+ config.outbound_mode = ODP_IPSEC_OP_MODE_ASYNC;
+ config.inbound.default_queue = out_queue;
+ } else {
+ config.inbound_mode = ODP_IPSEC_OP_MODE_SYNC;
+ config.outbound_mode = ODP_IPSEC_OP_MODE_SYNC;
+ config.inbound.default_queue = ODP_QUEUE_INVALID;
+ }
+ if (odp_ipsec_config(&config)) {
+ ODPH_ERR("odp_ipsec_config() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (cargs.schedule) {
+ printf("Run in async scheduled mode\n");
+
+ thr_arg.ipsec_args = cargs;
+ thr_arg.ipsec_alg_config = cargs.alg_config;
+ num_workers = odp_cpumask_default_worker(&cpumask,
+ num_workers);
+ (void)odp_cpumask_to_str(&cpumask, cpumaskstr,
+ sizeof(cpumaskstr));
+ printf("num worker threads: %i\n",
+ num_workers);
+ printf("first CPU: %i\n",
+ odp_cpumask_first(&cpumask));
+ printf("cpu mask: %s\n",
+ cpumaskstr);
+ } else if (cargs.poll) {
+ printf("Run in async poll mode\n");
+ } else {
+ printf("Run in sync mode\n");
+ }
+
+ if (cargs.alg_config) {
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ if (cargs.schedule) {
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_thr_func;
+ thr_param.arg = &thr_arg;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+ odph_thread_create(thread_tbl, &thr_common, &thr_param, num_workers);
+
+ odph_thread_join(thread_tbl, num_workers);
+ } else {
+ run_measure_one_config(&cargs, cargs.alg_config);
+ }
+ } else {
+ for (i = 0; i < ODPH_ARRAY_SIZE(algs_config); i++) {
+ if (cargs.ah &&
+ algs_config[i].crypto.cipher_alg !=
+ ODP_CIPHER_ALG_NULL)
+ continue;
+ run_measure_one_config(&cargs, algs_config + i);
+ }
+ }
+
+ if (cargs.schedule || cargs.poll)
+ odp_queue_destroy(out_queue);
+
+ if (cargs.vec_pkt_size) {
+ if (odp_pool_destroy(vec_pool)) {
+ ODPH_ERR("Error: vector pool destroy\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (odp_pool_destroy(pool)) {
+ ODPH_ERR("Error: pool destroy\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Error: term local\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Error: term global\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
+
diff --git a/test/performance/odp_ipsec_run.sh b/test/performance/odp_ipsec_run.sh
new file mode 100755
index 000000000..2ddb48d07
--- /dev/null
+++ b/test/performance/odp_ipsec_run.sh
@@ -0,0 +1,19 @@
+#!/bin/sh
+#
+# Copyright (c) 2022, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+TEST_DIR="${TEST_DIR:-$(dirname $0)}"
+
+# Run with a small number of packets in make check
+
+$TEST_DIR/odp_ipsec${EXEEXT} -c 100
+
+if [ $? -ne 0 ] ; then
+ echo Test FAILED
+ exit 1
+fi
+
+exit 0
diff --git a/test/performance/odp_ipsecfwd.c b/test/performance/odp_ipsecfwd.c
new file mode 100644
index 000000000..0220cf6ae
--- /dev/null
+++ b/test/performance/odp_ipsecfwd.c
@@ -0,0 +1,2074 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022-2023 Nokia
+ */
+
+/**
+ * @example odp_ipsecfwd.c
+ *
+ * Simple IPsec performance tester application which forwards and processes
+ * plain and IPsec packets.
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <stdlib.h>
+#include <signal.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <string.h>
+
+#include <errno.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include <libconfig.h>
+
+#define PROG_NAME "odp_ipsecfwd"
+#define SHORT_PROG_NAME "ipsfwd"
+#define DELIMITER ","
+
+#define MAX_IFS 2U
+#define MAX_SAS 4000U
+#define MAX_FWDS 64U
+#define MAX_SPIS (UINT16_MAX + 1U)
+#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1)
+#define MAX_QUEUES 64U
+#define MAX_SA_QUEUES 1024U
+#define PKT_SIZE 1024U
+#define PKT_CNT 32768U
+#define MAX_BURST 32U
+#define ORDERED 0U
+#define IP_ADDR_LEN 32U
+
+#define ALG_ENTRY(_alg_name, _type) \
+ { \
+ .idx = (_alg_name), \
+ .type = (_type), \
+ .name = #_alg_name \
+ }
+
+enum {
+ CIPHER_TYPE,
+ COMB_CIPHER_TYPE,
+ AUTH_TYPE,
+ COMB_AUTH_TYPE
+};
+
+typedef enum {
+ PRS_OK,
+ PRS_NOK,
+ PRS_TERM
+} parse_result_t;
+
+enum {
+ DIR_IN = 0,
+ DIR_OUT
+};
+
+typedef struct pktio_s pktio_t;
+
+typedef struct pktio_s {
+ union {
+ odp_pktout_queue_t out_dir_qs[MAX_QUEUES];
+ odp_queue_t out_ev_qs[MAX_QUEUES];
+ };
+
+ odp_pktin_queue_t in_dir_qs[MAX_QUEUES];
+ odph_ethaddr_t src_mac;
+ char *name;
+ odp_pktio_t handle;
+ uint32_t (*send_fn)(const pktio_t *pktio, uint8_t index, odp_packet_t pkts[], int num);
+ uint32_t num_tx_qs;
+ uint8_t idx;
+} pktio_t;
+
+typedef struct {
+ uint32_t prefix;
+ uint32_t mask;
+ odph_ethaddr_t dst_mac;
+ const pktio_t *pktio;
+} fwd_entry_t;
+
+typedef struct {
+ fwd_entry_t entries[MAX_FWDS];
+ uint32_t num;
+} lookup_table_t;
+
+typedef struct {
+ uint64_t ipsec_in_pkts;
+ uint64_t ipsec_out_pkts;
+ uint64_t ipsec_in_errs;
+ uint64_t ipsec_out_errs;
+ uint64_t status_errs;
+ uint64_t fwd_pkts;
+ uint64_t discards;
+} stats_t;
+
+typedef struct prog_config_s prog_config_t;
+
+typedef struct ODP_ALIGNED_CACHE {
+ stats_t stats;
+ prog_config_t *prog_config;
+ int thr_idx;
+ uint8_t pktio;
+} thread_config_t;
+
+typedef struct {
+ odp_ipsec_sa_param_t sa_param;
+ char cipher_key[65U];
+ char cipher_key_extra[5U];
+ char auth_key[65U];
+ char auth_key_extra[5U];
+ odp_u32be_t lkp_dst_ip;
+ odp_u32be_t src_ip;
+ odp_u32be_t dst_ip;
+} sa_config_t;
+
+typedef uint32_t (*rx_fn_t)(thread_config_t *config, odp_event_t evs[], int num);
+typedef void (*ipsec_fn_t)(odp_packet_t pkts[], int num, lookup_table_t *fwd_tbl, stats_t *stats);
+typedef void (*drain_fn_t)(prog_config_t *config);
+
+typedef struct {
+ rx_fn_t rx;
+ ipsec_fn_t proc;
+ ipsec_fn_t compl;
+ drain_fn_t drain;
+} ops_t;
+
+typedef struct prog_config_s {
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ thread_config_t thread_config[MAX_WORKERS];
+ odp_ipsec_sa_t sas[MAX_SAS];
+ odp_queue_t sa_qs[MAX_SA_QUEUES];
+ pktio_t pktios[MAX_IFS];
+ lookup_table_t fwd_tbl;
+ odp_atomic_u32_t is_running;
+ sa_config_t default_cfg;
+ ops_t ops;
+ char *conf_file;
+ odp_instance_t odp_instance;
+ odp_queue_t compl_q;
+ odp_pool_t pktio_pool;
+ odp_barrier_t init_barrier;
+ odp_barrier_t term_barrier;
+ uint32_t num_input_qs;
+ uint32_t num_sa_qs;
+ uint32_t num_output_qs;
+ uint32_t num_pkts;
+ uint32_t pkt_len;
+ uint32_t num_ifs;
+ uint32_t num_sas;
+ int num_thrs;
+ odp_bool_t is_dir_rx;
+ odp_bool_t is_hashed_tx;
+ uint8_t mode;
+} prog_config_t;
+
+typedef struct {
+ const char *name;
+ int idx;
+ int type;
+} exposed_alg_t;
+
+typedef struct {
+ odp_packet_t pkts[MAX_BURST];
+ const pktio_t *pktio;
+ uint32_t num;
+} pkt_vec_t;
+
+typedef struct {
+ pkt_vec_t vecs[MAX_QUEUES];
+ uint8_t num_qs;
+} pkt_out_t;
+
+typedef struct {
+ pkt_out_t ifs[MAX_IFS];
+ odp_bool_t is_hashed_tx;
+ uint8_t q_idx;
+} pkt_ifs_t;
+
+static const exposed_alg_t exposed_algs[] = {
+ ALG_ENTRY(ODP_CIPHER_ALG_NULL, CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_DES, CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_3DES_CBC, CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_AES_CBC, CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_AES_CTR, CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_AES_ECB, CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_AES_GCM, COMB_CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_AES_CCM, COMB_CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_CHACHA20_POLY1305, COMB_CIPHER_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_NULL, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_MD5_HMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_SHA1_HMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_SHA224_HMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_SHA256_HMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_SHA384_HMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_SHA512_HMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_AES_GCM, COMB_AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_AES_GMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_AES_CCM, COMB_AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_AES_CMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_AES_XCBC_MAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_CHACHA20_POLY1305, COMB_AUTH_TYPE)
+};
+
+/* SPIs for in and out directions */
+static odp_ipsec_sa_t *spi_to_sa_map[2U][MAX_SPIS];
+static const int ipsec_out_mark;
+static __thread pkt_ifs_t ifs;
+static prog_config_t *prog_conf;
+
+static void init_config(prog_config_t *config)
+{
+ memset(config, 0, sizeof(*config));
+ odp_ipsec_sa_param_init(&config->default_cfg.sa_param);
+ config->compl_q = ODP_QUEUE_INVALID;
+ config->pktio_pool = ODP_POOL_INVALID;
+ config->num_input_qs = 1;
+ config->num_sa_qs = 1;
+ config->num_output_qs = 1;
+ config->num_thrs = 1;
+}
+
+static void terminate(int signal ODP_UNUSED)
+{
+ odp_atomic_store_u32(&prog_conf->is_running, 0U);
+}
+
+static void parse_interfaces(prog_config_t *config, const char *optarg)
+{
+ char *tmp_str = strdup(optarg), *tmp;
+
+ if (tmp_str == NULL)
+ return;
+
+ tmp = strtok(tmp_str, DELIMITER);
+
+ while (tmp && config->num_ifs < MAX_IFS) {
+ config->pktios[config->num_ifs].name = strdup(tmp);
+
+ if (config->pktios[config->num_ifs].name != NULL)
+ ++config->num_ifs;
+
+ tmp = strtok(NULL, DELIMITER);
+ }
+
+ free(tmp_str);
+}
+
+static void print_supported_algos(const odp_ipsec_capability_t *ipsec_capa)
+{
+ int c_cnt, a_cnt;
+ const size_t len = ODPH_ARRAY_SIZE(exposed_algs);
+
+ printf(" Cipher algorithms:\n");
+
+ for (size_t i = 0U; i < len; ++i) {
+ if ((exposed_algs[i].type == CIPHER_TYPE ||
+ exposed_algs[i].type == COMB_CIPHER_TYPE) &&
+ (ipsec_capa->ciphers.all_bits & (1 << exposed_algs[i].idx)) > 0U) {
+ c_cnt = odp_ipsec_cipher_capability(exposed_algs[i].idx, NULL, 0);
+
+ if (c_cnt < 0)
+ continue;
+
+ printf(" %d: %s",
+ exposed_algs[i].idx, exposed_algs[i].name);
+ printf(exposed_algs[i].type == COMB_CIPHER_TYPE ? " (combined)" : "");
+
+ odp_ipsec_cipher_capability_t capa[c_cnt];
+
+ (void)odp_ipsec_cipher_capability(exposed_algs[i].idx, capa, c_cnt);
+
+ for (int j = 0; j < c_cnt; ++j)
+ printf(j == 0 ? " (key lengths: %u" : ", %u", capa[j].key_len);
+
+ printf(")\n");
+ }
+ }
+
+ printf(" Authentication algorithms:\n");
+
+ for (size_t i = 0U; i < len; ++i) {
+ if ((exposed_algs[i].type == AUTH_TYPE ||
+ exposed_algs[i].type == COMB_AUTH_TYPE) &&
+ (ipsec_capa->auths.all_bits & (1 << exposed_algs[i].idx)) > 0U) {
+ a_cnt = odp_ipsec_auth_capability(exposed_algs[i].idx, NULL, 0);
+
+ if (a_cnt < 0)
+ continue;
+
+ printf(" %d: %s",
+ exposed_algs[i].idx, exposed_algs[i].name);
+ printf(exposed_algs[i].type == COMB_AUTH_TYPE ? " (combined)" : "");
+
+ odp_ipsec_auth_capability_t capa[a_cnt];
+
+ (void)odp_ipsec_auth_capability(exposed_algs[i].idx, capa, a_cnt);
+
+ for (int j = 0; j < a_cnt; ++j)
+ printf(j == 0 ? " (key/icv lengths: %u/%u" : ", %u/%u",
+ capa[j].key_len, capa[j].icv_len);
+
+ printf(")\n");
+ }
+ }
+}
+
+static void print_usage(void)
+{
+ odp_pool_capability_t pool_capa;
+ odp_ipsec_capability_t ipsec_capa;
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ ODPH_ERR("Error querying pool capabilities\n");
+ return;
+ }
+
+ if (odp_ipsec_capability(&ipsec_capa) < 0) {
+ ODPH_ERR("Error querying IPsec capabilities\n");
+ return;
+ }
+
+ printf("\n"
+ "Simple IPsec performance tester. Forward and process plain and IPsec packets.\n"
+ "\n"
+ "Usage: %s OPTIONS\n"
+ "\n"
+ " E.g. %s -i ens9f1 -C /etc/odp/ipsecfwd.conf\n"
+ "\n"
+ " With ipsecfwd.conf containing, for example:\n"
+ " default: {\n"
+ " dir = 1\n"
+ " proto = 0\n"
+ " mode = 0\n"
+ " crypto: {\n"
+ " cipher_alg = 4\n"
+ " cipher_key = \"jWnZr4t7w!zwC*F-\"\n"
+ " auth_alg = 2\n"
+ " auth_key = \"n2r5u7x!A%%D*\"\n"
+ " icv_len = 12\n"
+ " };\n"
+ " };\n"
+ "\n"
+ " sa: (\n"
+ " {\n"
+ " spi = 1337\n"
+ " outbound: {\n"
+ " tunnel: {\n"
+ " src_addr = \"192.168.1.10\"\n"
+ " dst_addr = \"192.168.1.16\"\n"
+ " };\n"
+ " };\n"
+ " },\n"
+ " {\n"
+ " spi = 1338\n"
+ " outbound: {\n"
+ " tunnel: {\n"
+ " src_addr = \"192.168.3.110\"\n"
+ " dst_addr = \"192.168.3.116\"\n"
+ " };\n"
+ " };\n"
+ " }\n"
+ " );\n"
+ "\n"
+ " fwd: (\n"
+ " {\n"
+ " prefix: \"192.168.1.0/24\"\n"
+ " if: \"ens9f1\"\n"
+ " dst_mac: \"00:00:05:00:07:00\"\n"
+ " },\n"
+ " {\n"
+ " prefix: \"192.1.0.0/16\"\n"
+ " if: \"ens9f0\"\n"
+ " dst_mac: \"00:00:05:00:08:00\"\n"
+ " }\n"
+ " );\n"
+ "\n"
+ "Mandatory OPTIONS:\n"
+ "\n"
+ " -i, --interfaces Ethernet interfaces for packet I/O, comma-separated,\n"
+ " no spaces.\n"
+ " -C, --conf Configuration file. 'libconfig' syntax is expected.\n"
+ " SA configuration supports default fallback, i.e.\n"
+ " individual SA configuration blocks may omit some\n"
+ " parameters and instead set these once in default block\n"
+ " which then are used to fill missing parameters. The only\n"
+ " required argument for an SA is the 'spi' parameter.\n"
+ " Individual SA parameter blocks are expected to be in\n"
+ " 'sa'-named list. Parameter naming follows API\n"
+ " specification, see 'odp_ipsec_sa_param_t' for parameter\n"
+ " names and hierarchy. Traffic is mapped to SAs based on UDP\n"
+ " port: the port is used as the SPI. For forwarding entries,\n"
+ " individual parameter blocks are similarly expected to be\n"
+ " in 'fwd'-named list. With forwarding entries, every\n"
+ " parameter is always required and interfaces present in\n"
+ " forwarding entries should be one of the interfaces passed\n"
+ " with '--interfaces' option. The entries are looked up\n"
+ " in the order they are in the list. See example above for\n"
+ " potential SA and forwarding configuration.\n"
+ "\n"
+ " Supported cipher and authentication algorithms for SAs:\n",
+ PROG_NAME, PROG_NAME);
+ print_supported_algos(&ipsec_capa);
+ printf("\n"
+ "Optional OPTIONS:\n"
+ "\n"
+ " -n, --num_pkts Number of packet buffers allocated for packet I/O pool.\n"
+ " %u by default.\n"
+ " -l, --pkt_len Maximum size of packet buffers in packet I/O pool. %u by\n"
+ " default.\n"
+ " -c, --count Worker thread count. 1 by default.\n"
+ " -m, --mode Queueing mode.\n"
+ " 0: ordered (default)\n"
+ " 1: parallel\n"
+ " -I, --num_input_qs Input queue count. 1 by default.\n"
+ " -S, --num_sa_qs SA queue count. 1 by default.\n"
+ " -O, --num_output_qs Output queue count. 1 by default.\n"
+ " -d, --direct_rx Use direct RX. Interfaces will be polled by workers\n"
+ " directly. '--mode', '--num_input_qs' and '--num_output_qs'\n"
+ " options are ignored, input and output queue counts will\n"
+ " match worker count.\n"
+ " -h, --help This help.\n"
+ "\n", pool_capa.pkt.max_num > 0U ? ODPH_MIN(pool_capa.pkt.max_num, PKT_CNT) :
+ PKT_CNT, pool_capa.pkt.max_len > 0U ? ODPH_MIN(pool_capa.pkt.max_len, PKT_SIZE) :
+ PKT_SIZE);
+}
+
+static inline odp_ipsec_sa_t *get_in_sa(odp_packet_t pkt)
+{
+ odph_esphdr_t esp;
+ uint32_t spi;
+
+ if (!odp_packet_has_ipsec(pkt))
+ return NULL;
+
+ if (odp_packet_copy_to_mem(pkt, odp_packet_l4_offset(pkt), ODPH_ESPHDR_LEN, &esp) < 0)
+ return NULL;
+
+ spi = odp_be_to_cpu_32(esp.spi);
+
+ return spi <= UINT16_MAX ? spi_to_sa_map[DIR_IN][spi] : NULL;
+}
+
+static inline int process_ipsec_in_enq(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num)
+{
+ odp_ipsec_in_param_t param;
+ int left, sent = 0, ret;
+
+ memset(&param, 0, sizeof(param));
+ /* IPsec in/out need to be identified somehow, so use user_ptr for this. */
+ for (int i = 0; i < num; ++i)
+ odp_packet_user_ptr_set(pkts[i], NULL);
+
+ while (sent < num) {
+ left = num - sent;
+ param.num_sa = left;
+ param.sa = &sas[sent];
+ ret = odp_ipsec_in_enq(&pkts[sent], left, &param);
+
+ if (odp_unlikely(ret <= 0))
+ break;
+
+ sent += ret;
+ }
+
+ return sent;
+}
+
+static inline odp_ipsec_sa_t *get_out_sa(odp_packet_t pkt)
+{
+ odph_udphdr_t udp;
+ uint16_t dst_port;
+
+ if (!odp_packet_has_udp(pkt))
+ return NULL;
+
+ if (odp_packet_copy_to_mem(pkt, odp_packet_l4_offset(pkt), ODPH_UDPHDR_LEN, &udp) < 0)
+ return NULL;
+
+ dst_port = odp_be_to_cpu_16(udp.dst_port);
+
+ return dst_port ? spi_to_sa_map[DIR_OUT][dst_port] : NULL;
+}
+
+static inline int process_ipsec_out_enq(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num)
+{
+ odp_ipsec_out_param_t param;
+ int left, sent = 0, ret;
+
+ memset(&param, 0, sizeof(param));
+ /* IPsec in/out need to be identified somehow, so use user_ptr for this. */
+ for (int i = 0; i < num; ++i)
+ odp_packet_user_ptr_set(pkts[i], &ipsec_out_mark);
+
+ while (sent < num) {
+ left = num - sent;
+ param.num_sa = left;
+ param.sa = &sas[sent];
+ ret = odp_ipsec_out_enq(&pkts[sent], left, &param);
+
+ if (odp_unlikely(ret <= 0))
+ break;
+
+ sent += ret;
+ }
+
+ return sent;
+}
+
+static inline const fwd_entry_t *get_fwd_entry(lookup_table_t *table, uint32_t ip)
+{
+ fwd_entry_t *entry;
+
+ for (uint32_t i = 0U; i < table->num; ++i) {
+ entry = &table->entries[i];
+
+ if ((ip & entry->mask) == entry->prefix)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static inline const pktio_t *lookup_and_apply(odp_packet_t pkt, lookup_table_t *fwd_tbl,
+ uint8_t *q_idx)
+{
+ const uint32_t l3_off = odp_packet_l3_offset(pkt);
+ odph_ipv4hdr_t ipv4;
+ uint32_t dst_ip, src_ip;
+ const fwd_entry_t *fwd;
+ odph_ethhdr_t eth;
+
+ if (odp_packet_copy_to_mem(pkt, l3_off, ODPH_IPV4HDR_LEN, &ipv4) < 0)
+ return NULL;
+
+ dst_ip = odp_be_to_cpu_32(ipv4.dst_addr);
+ fwd = get_fwd_entry(fwd_tbl, dst_ip);
+
+ if (fwd == NULL)
+ return NULL;
+
+ if (l3_off != ODPH_ETHHDR_LEN) {
+ if (l3_off > ODPH_ETHHDR_LEN) {
+ if (odp_packet_pull_head(pkt, l3_off - ODPH_ETHHDR_LEN) == NULL)
+ return NULL;
+ } else {
+ if (odp_packet_push_head(pkt, ODPH_ETHHDR_LEN - l3_off) == NULL)
+ return NULL;
+ }
+ }
+
+ eth.dst = fwd->dst_mac;
+ eth.src = fwd->pktio->src_mac;
+ eth.type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
+
+ if (odp_packet_copy_from_mem(pkt, 0U, ODPH_ETHHDR_LEN, &eth) < 0)
+ return NULL;
+
+ if (q_idx != NULL) {
+ src_ip = odp_be_to_cpu_32(ipv4.src_addr);
+ *q_idx = (src_ip ^ dst_ip) % fwd->pktio->num_tx_qs;
+ }
+
+ return fwd->pktio;
+}
+
+static inline uint32_t forward_packets(odp_packet_t pkts[], int num, lookup_table_t *fwd_tbl)
+{
+ odp_packet_t pkt;
+ odp_bool_t is_hashed_tx = ifs.is_hashed_tx;
+ uint8_t q_idx = is_hashed_tx ? 0U : ifs.q_idx, qs_done;
+ uint8_t *q_idx_ptr = is_hashed_tx ? &q_idx : NULL;
+ const pktio_t *pktio;
+ pkt_out_t *out;
+ pkt_vec_t *vec;
+ uint32_t num_procd = 0U, ret;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+ pktio = lookup_and_apply(pkt, fwd_tbl, q_idx_ptr);
+
+ if (pktio == NULL) {
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ out = &ifs.ifs[pktio->idx];
+ vec = &out->vecs[q_idx];
+
+ if (vec->num == 0U)
+ out->num_qs++;
+
+ vec->pkts[vec->num++] = pkt;
+ vec->pktio = pktio;
+ }
+
+ for (uint32_t i = 0U; i < MAX_IFS; ++i) {
+ qs_done = 0U;
+ out = &ifs.ifs[i];
+
+ for (uint32_t j = 0U; j < MAX_QUEUES && qs_done < out->num_qs; ++j) {
+ if (out->vecs[j].num == 0U)
+ continue;
+
+ vec = &out->vecs[j];
+ pktio = vec->pktio;
+ ret = pktio->send_fn(pktio, j, vec->pkts, vec->num);
+
+ if (odp_unlikely(ret < vec->num))
+ odp_packet_free_multi(&vec->pkts[ret], vec->num - ret);
+
+ ++qs_done;
+ vec->num = 0U;
+ num_procd += ret;
+ }
+
+ out->num_qs = 0U;
+ }
+
+ return num_procd;
+}
+
+static inline void process_packets_out_enq(odp_packet_t pkts[], int num, lookup_table_t *fwd_tbl,
+ stats_t *stats)
+{
+ odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_fwd[MAX_BURST];
+ odp_ipsec_sa_t *sa, sas[MAX_BURST];
+ int num_pkts_ips = 0, num_pkts_fwd = 0, num_procd;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+ sa = get_out_sa(pkt);
+
+ if (sa != NULL) {
+ sas[num_pkts_ips] = *sa;
+ pkts_ips[num_pkts_ips] = pkt;
+ ++num_pkts_ips;
+ } else {
+ pkts_fwd[num_pkts_fwd++] = pkt;
+ }
+ }
+
+ if (num_pkts_ips > 0) {
+ num_procd = process_ipsec_out_enq(pkts_ips, sas, num_pkts_ips);
+
+ if (odp_unlikely(num_procd < num_pkts_ips)) {
+ stats->ipsec_out_errs += num_pkts_ips - num_procd;
+ odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd);
+ }
+ }
+
+ if (num_pkts_fwd > 0) {
+ num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl);
+ stats->discards += num_pkts_fwd - num_procd;
+ stats->fwd_pkts += num_procd;
+ }
+}
+
+static void process_packets_in_enq(odp_packet_t pkts[], int num, lookup_table_t *fwd_tbl,
+ stats_t *stats)
+{
+ odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_out[MAX_BURST];
+ odp_ipsec_sa_t *sa, sas[MAX_BURST];
+ int num_pkts_ips = 0, num_pkts_out = 0, num_procd;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+
+ if (odp_unlikely(odp_packet_has_error(pkt))) {
+ ++stats->discards;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ sa = get_in_sa(pkt);
+
+ if (sa != NULL) {
+ sas[num_pkts_ips] = *sa;
+ pkts_ips[num_pkts_ips] = pkt;
+ ++num_pkts_ips;
+ } else {
+ pkts_out[num_pkts_out++] = pkt;
+ }
+ }
+
+ if (num_pkts_ips > 0) {
+ num_procd = process_ipsec_in_enq(pkts_ips, sas, num_pkts_ips);
+
+ if (odp_unlikely(num_procd < num_pkts_ips)) {
+ stats->ipsec_in_errs += num_pkts_ips - num_procd;
+ odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd);
+ }
+ }
+
+ if (num_pkts_out > 0)
+ process_packets_out_enq(pkts_out, num_pkts_out, fwd_tbl, stats);
+}
+
+static inline odp_bool_t is_ipsec_in(odp_packet_t pkt)
+{
+ return odp_packet_user_ptr(pkt) == NULL;
+}
+
+static void complete_ipsec_ops(odp_packet_t pkts[], int num, lookup_table_t *fwd_tbl,
+ stats_t *stats)
+{
+ odp_packet_t pkt, pkts_out[MAX_BURST], pkts_fwd[MAX_BURST];
+ odp_bool_t is_in;
+ odp_ipsec_packet_result_t result;
+ int num_pkts_out = 0, num_pkts_fwd = 0, num_procd;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+ is_in = is_ipsec_in(pkt);
+
+ if (odp_unlikely(odp_ipsec_result(&result, pkt) < 0)) {
+ is_in ? ++stats->ipsec_in_errs : ++stats->ipsec_out_errs;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (odp_unlikely(result.status.all != ODP_IPSEC_OK)) {
+ is_in ? ++stats->ipsec_in_errs : ++stats->ipsec_out_errs;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (is_in) {
+ ++stats->ipsec_in_pkts;
+ pkts_out[num_pkts_out++] = pkt;
+ } else {
+ ++stats->ipsec_out_pkts;
+ pkts_fwd[num_pkts_fwd++] = pkt;
+ }
+ }
+
+ if (num_pkts_out > 0)
+ process_packets_out_enq(pkts_out, num_pkts_out, fwd_tbl, stats);
+
+ if (num_pkts_fwd > 0) {
+ num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl);
+ stats->discards += num_pkts_fwd - num_procd;
+ stats->fwd_pkts += num_procd;
+ }
+}
+
+static void drain_scheduler(prog_config_t *config ODP_UNUSED)
+{
+ odp_event_t ev;
+
+ while (true) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ odp_event_free(ev);
+ }
+}
+
+static inline int process_ipsec_in(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num,
+ odp_packet_t pkts_out[])
+{
+ odp_ipsec_in_param_t param;
+ int left, sent = 0, num_out, ret;
+
+ memset(&param, 0, sizeof(param));
+
+ while (sent < num) {
+ left = num - sent;
+ num_out = left;
+ param.num_sa = left;
+ param.sa = &sas[sent];
+ ret = odp_ipsec_in(&pkts[sent], left, &pkts_out[sent], &num_out, &param);
+
+ if (odp_unlikely(ret <= 0))
+ break;
+
+ sent += ret;
+ }
+
+ return sent;
+}
+
+static inline int process_ipsec_out(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num,
+ odp_packet_t pkts_out[])
+{
+ odp_ipsec_out_param_t param;
+ int left, sent = 0, num_out, ret;
+
+ memset(&param, 0, sizeof(param));
+
+ while (sent < num) {
+ left = num - sent;
+ num_out = left;
+ param.num_sa = left;
+ param.sa = &sas[sent];
+ ret = odp_ipsec_out(&pkts[sent], left, &pkts_out[sent], &num_out, &param);
+
+ if (odp_unlikely(ret <= 0))
+ break;
+
+ sent += ret;
+ }
+
+ return sent;
+}
+
+static inline void process_packets_out(odp_packet_t pkts[], int num, lookup_table_t *fwd_tbl,
+ stats_t *stats)
+{
+ odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_fwd[MAX_BURST], pkts_ips_out[MAX_BURST];
+ odp_ipsec_sa_t *sa, sas[MAX_BURST];
+ int num_pkts_ips = 0, num_pkts_fwd = 0, num_procd;
+ odp_ipsec_packet_result_t result;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+ sa = get_out_sa(pkt);
+
+ if (sa != NULL) {
+ sas[num_pkts_ips] = *sa;
+ pkts_ips[num_pkts_ips] = pkt;
+ ++num_pkts_ips;
+ } else {
+ pkts_fwd[num_pkts_fwd++] = pkt;
+ }
+ }
+
+ if (num_pkts_ips > 0) {
+ num_procd = process_ipsec_out(pkts_ips, sas, num_pkts_ips, pkts_ips_out);
+
+ if (odp_unlikely(num_procd < num_pkts_ips)) {
+ stats->ipsec_out_errs += num_pkts_ips - num_procd;
+ odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd);
+ }
+
+ for (int i = 0; i < num_procd; ++i) {
+ pkt = pkts_ips_out[i];
+
+ if (odp_unlikely(odp_ipsec_result(&result, pkt) < 0)) {
+ ++stats->ipsec_out_errs;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (odp_unlikely(result.status.all != ODP_IPSEC_OK)) {
+ ++stats->ipsec_out_errs;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ ++stats->ipsec_out_pkts;
+ pkts_fwd[num_pkts_fwd++] = pkt;
+ }
+ }
+
+ if (num_pkts_fwd > 0) {
+ num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl);
+ stats->discards += num_pkts_fwd - num_procd;
+ stats->fwd_pkts += num_procd;
+ }
+}
+
+static void process_packets_in(odp_packet_t pkts[], int num, lookup_table_t *fwd_tbl,
+ stats_t *stats)
+{
+ odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_out[MAX_BURST], pkts_ips_out[MAX_BURST];
+ odp_ipsec_sa_t *sa, sas[MAX_BURST];
+ int num_pkts_ips = 0, num_pkts_out = 0, num_procd;
+ odp_ipsec_packet_result_t result;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+
+ if (odp_unlikely(odp_packet_has_error(pkt))) {
+ ++stats->discards;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ sa = get_in_sa(pkt);
+
+ if (sa != NULL) {
+ sas[num_pkts_ips] = *sa;
+ pkts_ips[num_pkts_ips] = pkt;
+ ++num_pkts_ips;
+ } else {
+ pkts_out[num_pkts_out++] = pkt;
+ }
+ }
+
+ if (num_pkts_ips > 0) {
+ num_procd = process_ipsec_in(pkts_ips, sas, num_pkts_ips, pkts_ips_out);
+
+ if (odp_unlikely(num_procd < num_pkts_ips)) {
+ stats->ipsec_in_errs += num_pkts_ips - num_procd;
+ odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd);
+ }
+
+ for (int i = 0; i < num_procd; ++i) {
+ pkt = pkts_ips_out[i];
+
+ if (odp_unlikely(odp_ipsec_result(&result, pkt) < 0)) {
+ ++stats->ipsec_in_errs;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (odp_unlikely(result.status.all != ODP_IPSEC_OK)) {
+ ++stats->ipsec_in_errs;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ ++stats->ipsec_in_pkts;
+ pkts_out[num_pkts_out++] = pkt;
+ }
+ }
+
+ if (num_pkts_out > 0)
+ process_packets_out(pkts_out, num_pkts_out, fwd_tbl, stats);
+}
+
+static void drain_direct_inputs(prog_config_t *config)
+{
+ odp_packet_t pkt;
+
+ for (uint32_t i = 0U; i < config->num_ifs; ++i) {
+ for (uint32_t j = 0U; j < config->num_input_qs; ++j) {
+ while (odp_pktin_recv(config->pktios[i].in_dir_qs[j], &pkt, 1) == 1)
+ odp_packet_free(pkt);
+ }
+ }
+}
+
+static odp_bool_t setup_ipsec(prog_config_t *config)
+{
+ odp_queue_param_t q_param;
+ odp_ipsec_config_t ipsec_config;
+ char q_name[ODP_QUEUE_NAME_LEN];
+
+ if (!config->is_dir_rx) {
+ snprintf(q_name, sizeof(q_name), SHORT_PROG_NAME "_sa_status");
+ odp_queue_param_init(&q_param);
+ q_param.type = ODP_QUEUE_TYPE_SCHED;
+ q_param.sched.prio = odp_schedule_default_prio();
+ q_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ q_param.sched.group = ODP_SCHED_GROUP_ALL;
+ config->compl_q = odp_queue_create(q_name, &q_param);
+
+ if (config->compl_q == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Error creating IPsec completion queue\n");
+ return false;
+ }
+ }
+
+ odp_ipsec_config_init(&ipsec_config);
+
+ if (!config->is_dir_rx) {
+ ipsec_config.inbound_mode = ODP_IPSEC_OP_MODE_ASYNC;
+ ipsec_config.outbound_mode = ODP_IPSEC_OP_MODE_ASYNC;
+ config->ops.proc = process_packets_in_enq;
+ config->ops.compl = complete_ipsec_ops;
+ config->ops.drain = drain_scheduler;
+ } else {
+ ipsec_config.inbound_mode = ODP_IPSEC_OP_MODE_SYNC;
+ ipsec_config.outbound_mode = ODP_IPSEC_OP_MODE_SYNC;
+ config->ops.proc = process_packets_in;
+ config->ops.compl = NULL;
+ config->ops.drain = drain_direct_inputs;
+ }
+
+ ipsec_config.inbound.default_queue = config->compl_q;
+ /* For tunnel to tunnel, we need to parse up to this to check the UDP port for SA. */
+ ipsec_config.inbound.parse_level = ODP_PROTO_LAYER_L4;
+
+ if (odp_ipsec_config(&ipsec_config) < 0) {
+ ODPH_ERR("Error configuring IPsec\n");
+ return false;
+ }
+
+ return true;
+}
+
+static odp_bool_t create_sa_dest_queues(odp_ipsec_capability_t *ipsec_capa,
+ prog_config_t *config)
+{
+ odp_queue_param_t q_param;
+ const uint32_t max_sa_qs = ODPH_MIN(MAX_SA_QUEUES, ipsec_capa->max_queues);
+
+ if (config->num_sa_qs == 0U || config->num_sa_qs > max_sa_qs) {
+ ODPH_ERR("Invalid number of SA queues: %u (min: 1, max: %u)\n", config->num_sa_qs,
+ max_sa_qs);
+ config->num_sa_qs = 0U;
+ return false;
+ }
+
+ for (uint32_t i = 0U; i < config->num_sa_qs; ++i) {
+ char q_name[ODP_QUEUE_NAME_LEN];
+
+ snprintf(q_name, sizeof(q_name), SHORT_PROG_NAME "_sa_compl_%u", i);
+ odp_queue_param_init(&q_param);
+ q_param.type = ODP_QUEUE_TYPE_SCHED;
+ q_param.sched.prio = odp_schedule_max_prio();
+ q_param.sched.sync = config->mode == ORDERED ? ODP_SCHED_SYNC_ORDERED :
+ ODP_SCHED_SYNC_PARALLEL;
+ q_param.sched.group = ODP_SCHED_GROUP_ALL;
+ config->sa_qs[i] = odp_queue_create(q_name, &q_param);
+
+ if (config->sa_qs[i] == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Error creating SA destination queue (created count: %u)\n", i);
+ config->num_sa_qs = i;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void parse_crypto(config_setting_t *cfg, sa_config_t *config)
+{
+ int val;
+ const char *val_str;
+ config_setting_t *cs = config_setting_lookup(cfg, "crypto");
+
+ if (cs == NULL)
+ return;
+
+ if (config_setting_lookup_int(cs, "cipher_alg", &val) == CONFIG_TRUE)
+ config->sa_param.crypto.cipher_alg = val;
+
+ if (config_setting_lookup_string(cs, "cipher_key", &val_str) == CONFIG_TRUE) {
+ strcpy(config->cipher_key, val_str);
+ config->sa_param.crypto.cipher_key.data = (uint8_t *)config->cipher_key;
+ config->sa_param.crypto.cipher_key.length =
+ strlen((const char *)config->cipher_key);
+ }
+
+ if (config_setting_lookup_string(cs, "cipher_key_extra", &val_str) == CONFIG_TRUE) {
+ strcpy(config->cipher_key_extra, val_str);
+ config->sa_param.crypto.cipher_key_extra.data =
+ (uint8_t *)config->cipher_key_extra;
+ config->sa_param.crypto.cipher_key_extra.length =
+ strlen((const char *)config->cipher_key_extra);
+ }
+
+ if (config_setting_lookup_int(cs, "auth_alg", &val) == CONFIG_TRUE)
+ config->sa_param.crypto.auth_alg = val;
+
+ if (config_setting_lookup_string(cs, "auth_key", &val_str) == CONFIG_TRUE) {
+ strcpy(config->auth_key, val_str);
+ config->sa_param.crypto.auth_key.data = (uint8_t *)config->auth_key;
+ config->sa_param.crypto.auth_key.length = strlen((const char *)config->auth_key);
+ }
+
+ if (config_setting_lookup_string(cs, "auth_key_extra", &val_str) == CONFIG_TRUE) {
+ strcpy(config->auth_key_extra, val_str);
+ config->sa_param.crypto.auth_key_extra.data = (uint8_t *)config->auth_key_extra;
+ config->sa_param.crypto.auth_key_extra.length =
+ strlen((const char *)config->auth_key_extra);
+ }
+
+ if (config_setting_lookup_int(cs, "icv_len", &val) == CONFIG_TRUE)
+ config->sa_param.crypto.icv_len = val;
+}
+
+static void parse_opt(config_setting_t *cfg, sa_config_t *config)
+{
+ int val;
+ config_setting_t *cs = config_setting_lookup(cfg, "opt");
+
+ if (cs == NULL)
+ return;
+
+ if (config_setting_lookup_int(cs, "esn", &val) == CONFIG_TRUE)
+ config->sa_param.opt.esn = val;
+
+ if (config_setting_lookup_int(cs, "udp_encap", &val) == CONFIG_TRUE)
+ config->sa_param.opt.udp_encap = val;
+
+ if (config_setting_lookup_int(cs, "copy_dscp", &val) == CONFIG_TRUE)
+ config->sa_param.opt.copy_dscp = val;
+
+ if (config_setting_lookup_int(cs, "copy_flabel", &val) == CONFIG_TRUE)
+ config->sa_param.opt.copy_flabel = val;
+
+ if (config_setting_lookup_int(cs, "copy_df", &val) == CONFIG_TRUE)
+ config->sa_param.opt.copy_df = val;
+
+ if (config_setting_lookup_int(cs, "dec_ttl", &val) == CONFIG_TRUE)
+ config->sa_param.opt.dec_ttl = val;
+}
+
+static void parse_limits(config_setting_t *cfg, sa_config_t *config)
+{
+ config_setting_t *cs = config_setting_lookup(cfg, "lifetime"), *soft, *hard;
+ long long val;
+
+ if (cs == NULL)
+ return;
+
+ soft = config_setting_lookup(cs, "soft_limit");
+ hard = config_setting_lookup(cs, "hard_limit");
+
+ if (soft != NULL) {
+ if (config_setting_lookup_int64(soft, "bytes", &val) == CONFIG_TRUE)
+ config->sa_param.lifetime.soft_limit.bytes = val;
+
+ if (config_setting_lookup_int64(soft, "packets", &val) == CONFIG_TRUE)
+ config->sa_param.lifetime.soft_limit.packets = val;
+ }
+
+ if (hard != NULL) {
+ if (config_setting_lookup_int64(hard, "bytes", &val) == CONFIG_TRUE)
+ config->sa_param.lifetime.hard_limit.bytes = val;
+
+ if (config_setting_lookup_int64(hard, "packets", &val) == CONFIG_TRUE)
+ config->sa_param.lifetime.hard_limit.packets = val;
+ }
+}
+
+static void parse_inbound(config_setting_t *cfg, sa_config_t *config)
+{
+ config_setting_t *cs = config_setting_lookup(cfg, "inbound");
+ int val;
+ const char *val_str;
+
+ if (cs == NULL)
+ return;
+
+ if (config_setting_lookup_int(cs, "lookup_mode", &val) == CONFIG_TRUE)
+ config->sa_param.inbound.lookup_mode = val;
+
+ if (config_setting_lookup_string(cs, "lookup_dst_addr", &val_str) == CONFIG_TRUE) {
+ if (odph_ipv4_addr_parse(&config->lkp_dst_ip, val_str) == 0) {
+ config->lkp_dst_ip = odp_cpu_to_be_32(config->lkp_dst_ip);
+ config->sa_param.inbound.lookup_param.dst_addr = &config->lkp_dst_ip;
+ }
+ }
+
+ if (config_setting_lookup_int(cs, "antireplay_ws", &val) == CONFIG_TRUE)
+ config->sa_param.inbound.antireplay_ws = val;
+
+ if (config_setting_lookup_int(cs, "reassembly_en", &val) == CONFIG_TRUE)
+ config->sa_param.inbound.reassembly_en = val;
+}
+
+static void parse_outbound(config_setting_t *cfg, sa_config_t *config)
+{
+ config_setting_t *cs = config_setting_lookup(cfg, "outbound"), *tunnel;
+ const char *val_str;
+ int val;
+
+ if (cs == NULL)
+ return;
+
+ tunnel = config_setting_lookup(cs, "tunnel");
+
+ if (tunnel != NULL) {
+ if (config_setting_lookup_string(tunnel, "src_addr", &val_str) == CONFIG_TRUE) {
+ if (odph_ipv4_addr_parse(&config->src_ip, val_str) == 0) {
+ config->src_ip = odp_cpu_to_be_32(config->src_ip);
+ config->sa_param.outbound.tunnel.ipv4.src_addr = &config->src_ip;
+ }
+ }
+
+ if (config_setting_lookup_string(tunnel, "dst_addr", &val_str) == CONFIG_TRUE) {
+ if (odph_ipv4_addr_parse(&config->dst_ip, val_str) == 0) {
+ config->dst_ip = odp_cpu_to_be_32(config->dst_ip);
+ config->sa_param.outbound.tunnel.ipv4.dst_addr = &config->dst_ip;
+ }
+ }
+
+ if (config_setting_lookup_int(tunnel, "dscp", &val) == CONFIG_TRUE)
+ config->sa_param.outbound.tunnel.ipv4.dscp = val;
+
+ if (config_setting_lookup_int(tunnel, "df", &val) == CONFIG_TRUE)
+ config->sa_param.outbound.tunnel.ipv4.df = val;
+
+ if (config_setting_lookup_int(tunnel, "ttl", &val) == CONFIG_TRUE)
+ config->sa_param.outbound.tunnel.ipv4.ttl = val;
+ }
+
+ if (config_setting_lookup_int(cs, "frag_mode", &val) == CONFIG_TRUE)
+ config->sa_param.outbound.frag_mode = val;
+
+ if (config_setting_lookup_int(cs, "mtu", &val) == CONFIG_TRUE)
+ config->sa_param.outbound.mtu = val;
+}
+
+static void parse_sa_entry(config_setting_t *cfg, sa_config_t *config)
+{
+ int val;
+
+ if (config_setting_lookup_int(cfg, "dir", &val) == CONFIG_TRUE)
+ config->sa_param.dir = val;
+
+ if (config_setting_lookup_int(cfg, "proto", &val) == CONFIG_TRUE)
+ config->sa_param.proto = val;
+
+ if (config_setting_lookup_int(cfg, "mode", &val) == CONFIG_TRUE)
+ config->sa_param.mode = val;
+
+ if (config_setting_lookup_int(cfg, "spi", &val) == CONFIG_TRUE)
+ config->sa_param.spi = val;
+
+ parse_crypto(cfg, config);
+ parse_opt(cfg, config);
+ parse_limits(cfg, config);
+ parse_inbound(cfg, config);
+ parse_outbound(cfg, config);
+}
+
+static void create_sa_entry(odp_ipsec_sa_param_t *sa_param, prog_config_t *config,
+ uint32_t max_num_sa)
+{
+ uint32_t dir = sa_param->dir;
+ uint32_t spi = sa_param->spi;
+ odp_ipsec_sa_t sa;
+
+ if (config->num_sas == max_num_sa) {
+ ODPH_ERR("Maximum number of SAs parsed (%u), ignoring rest\n", max_num_sa);
+ return;
+ }
+
+ if (spi > UINT16_MAX) {
+ ODPH_ERR("Unsupported SPI value for SA %u (> %u)\n", spi, UINT16_MAX);
+ return;
+ }
+
+ if (spi_to_sa_map[dir][spi] != NULL) {
+ ODPH_ERR("Non-unique SPIs not supported for SA %u\n", spi);
+ return;
+ }
+
+ sa_param->dest_queue = config->sa_qs[config->num_sas % config->num_sa_qs];
+ sa = odp_ipsec_sa_create(sa_param);
+
+ if (sa == ODP_IPSEC_SA_INVALID) {
+ ODPH_ERR("Error creating SA handle for SA %u\n", spi);
+ return;
+ }
+
+ config->sas[config->num_sas] = sa;
+ spi_to_sa_map[dir][spi] = &config->sas[config->num_sas];
+ ++config->num_sas;
+}
+
+static void parse_and_create_sa_entries(config_t *cfg, prog_config_t *config, uint32_t max_num_sa)
+{
+ config_setting_t *cs;
+ int count;
+
+ cs = config_lookup(cfg, "default");
+
+ if (cs != NULL)
+ parse_sa_entry(cs, &config->default_cfg);
+
+ cs = config_lookup(cfg, "sa");
+
+ if (cs == NULL)
+ return;
+
+ count = config_setting_length(cs);
+
+ for (int i = 0; i < count; i++) {
+ sa_config_t sa_cfg;
+ config_setting_t *sa;
+ int val;
+
+ sa_cfg = config->default_cfg;
+ sa = config_setting_get_elem(cs, i);
+
+ if (sa == NULL)
+ continue;
+
+ if (config_setting_lookup_int(sa, "spi", &val) == CONFIG_TRUE) {
+ parse_sa_entry(sa, &sa_cfg);
+ create_sa_entry(&sa_cfg.sa_param, config, max_num_sa);
+ }
+ }
+}
+
+static void parse_sas(config_t *cfg, prog_config_t *config)
+{
+ odp_ipsec_capability_t ipsec_capa;
+ uint32_t max_num_sa;
+
+ if (odp_ipsec_capability(&ipsec_capa) < 0) {
+ ODPH_ERR("Error querying IPsec capabilities\n");
+ return;
+ }
+
+ if (!setup_ipsec(config))
+ return;
+
+ if (!config->is_dir_rx && !create_sa_dest_queues(&ipsec_capa, config))
+ return;
+
+ max_num_sa = ODPH_MIN(MAX_SAS, ipsec_capa.max_num_sa);
+ parse_and_create_sa_entries(cfg, config, max_num_sa);
+}
+
+static const pktio_t *get_pktio(const char *iface, const prog_config_t *config)
+{
+ for (uint32_t i = 0U; i < config->num_ifs; ++i) {
+ if (strcmp(iface, config->pktios[i].name) == 0)
+ return &config->pktios[i];
+ }
+
+ return NULL;
+}
+
+static void create_fwd_table_entry(config_setting_t *cfg, prog_config_t *config)
+{
+ const char *val_str;
+ char dst_ip_str[16U] = { 0 };
+ uint32_t mask, dst_ip;
+ odph_ethaddr_t dst_mac;
+ const pktio_t *pktio = NULL;
+ fwd_entry_t *entry;
+
+ if (config->fwd_tbl.num == MAX_FWDS) {
+ ODPH_ERR("Maximum number of forwarding entries parsed (%u), ignoring rest\n",
+ MAX_FWDS);
+ return;
+ }
+
+ if (config_setting_lookup_string(cfg, "prefix", &val_str) == CONFIG_TRUE) {
+ if (sscanf(val_str, "%[^/]/%u", dst_ip_str, &mask) != 2) {
+ ODPH_ERR("Error parsing IP and subnet mask for forwarding entry\n");
+ return;
+ }
+
+ if (odph_ipv4_addr_parse(&dst_ip, dst_ip_str) < 0) {
+ ODPH_ERR("Syntax error in IP address for forwarding entry\n");
+ return;
+ }
+
+ if (mask > IP_ADDR_LEN) {
+ ODPH_ERR("Invalid subnet mask for forwarding entry: %u\n", mask);
+ return;
+ }
+ } else {
+ return;
+ }
+
+ if (config_setting_lookup_string(cfg, "if", &val_str) == CONFIG_TRUE) {
+ pktio = get_pktio(val_str, config);
+
+ if (pktio == NULL) {
+ ODPH_ERR("Error parsing next interface for forwarding entry\n");
+ return;
+ }
+ } else {
+ return;
+ }
+
+ if (config_setting_lookup_string(cfg, "dst_mac", &val_str) == CONFIG_TRUE) {
+ if (odph_eth_addr_parse(&dst_mac, val_str) < 0) {
+ ODPH_ERR("Syntax error in destination MAC for forwarding entry\n");
+ return;
+ }
+ } else {
+ return;
+ }
+
+ mask = mask > 0U ? 0xFFFFFFFF << (IP_ADDR_LEN - mask) : 0U;
+ entry = &config->fwd_tbl.entries[config->fwd_tbl.num];
+ entry->prefix = dst_ip & mask;
+ entry->mask = mask;
+ entry->dst_mac = dst_mac;
+ entry->pktio = pktio;
+ ++config->fwd_tbl.num;
+}
+
+static void parse_fwd_table(config_t *cfg, prog_config_t *config)
+{
+ config_setting_t *cs;
+ int count;
+
+ cs = config_lookup(cfg, "fwd");
+
+ if (cs == NULL)
+ return;
+
+ count = config_setting_length(cs);
+
+ for (int i = 0; i < count; i++) {
+ config_setting_t *fwd = config_setting_get_elem(cs, i);
+
+ if (fwd == NULL)
+ continue;
+
+ create_fwd_table_entry(fwd, config);
+ }
+}
+
+static parse_result_t check_options(prog_config_t *config)
+{
+ odp_pool_capability_t pool_capa;
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ ODPH_ERR("Error querying pool capabilities\n");
+ return PRS_NOK;
+ }
+
+ if (config->num_ifs == 0U) {
+ ODPH_ERR("Invalid number of interfaces: %u (min: 1, max: %u)\n", config->num_ifs,
+ MAX_IFS);
+ return PRS_NOK;
+ }
+
+ if (config->fwd_tbl.num == 0U) {
+ ODPH_ERR("Invalid number of forwarding entries: %u (min: 1, max: %u)\n",
+ config->fwd_tbl.num, MAX_FWDS);
+ return PRS_NOK;
+ }
+
+ if (pool_capa.pkt.max_num > 0U && config->num_pkts > pool_capa.pkt.max_num) {
+ ODPH_ERR("Invalid pool packet count: %u (max: %u)\n", config->num_pkts,
+ pool_capa.pkt.max_num);
+ return PRS_NOK;
+ }
+
+ if (config->num_pkts == 0U)
+ config->num_pkts = pool_capa.pkt.max_num > 0U ?
+ ODPH_MIN(pool_capa.pkt.max_num, PKT_CNT) : PKT_CNT;
+
+ if (pool_capa.pkt.max_len > 0U && config->pkt_len > pool_capa.pkt.max_len) {
+ ODPH_ERR("Invalid pool packet length: %u (max: %u)\n", config->pkt_len,
+ pool_capa.pkt.max_len);
+ return PRS_NOK;
+ }
+
+ if (config->pkt_len == 0U)
+ config->pkt_len = pool_capa.pkt.max_len > 0U ?
+ ODPH_MIN(pool_capa.pkt.max_len, PKT_SIZE) : PKT_SIZE;
+
+ if (config->num_thrs <= 0 || config->num_thrs > MAX_WORKERS) {
+ ODPH_ERR("Invalid thread count: %d (min: 1, max: %d)\n", config->num_thrs,
+ MAX_WORKERS);
+ return PRS_NOK;
+ }
+
+ if (config->is_dir_rx) {
+ config->num_input_qs = config->num_thrs;
+ config->num_output_qs = config->num_thrs;
+ }
+
+ return PRS_OK;
+}
+
+static parse_result_t parse_options(int argc, char **argv, prog_config_t *config)
+{
+ int opt, long_index;
+ config_t cfg;
+
+ static const struct option longopts[] = {
+ { "interfaces", required_argument, NULL, 'i' },
+ { "num_pkts", required_argument, NULL, 'n' },
+ { "pkt_len", required_argument, NULL, 'l' },
+ { "count", required_argument, NULL, 'c' },
+ { "mode", required_argument, NULL, 'm' },
+ { "conf", required_argument, NULL, 'C' },
+ { "num_input_qs", required_argument, NULL, 'I' },
+ { "num_sa_qs", required_argument, NULL, 'S' },
+ { "num_output_qs", required_argument, NULL, 'O' },
+ { "direct_rx", no_argument, NULL, 'd' },
+ { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ static const char *shortopts = "i:n:l:c:m:C:I:S:O:dh";
+
+ while (true) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'i':
+ parse_interfaces(config, optarg);
+ break;
+ case 'n':
+ config->num_pkts = atoi(optarg);
+ break;
+ case 'l':
+ config->pkt_len = atoi(optarg);
+ break;
+ case 'c':
+ config->num_thrs = atoi(optarg);
+ break;
+ case 'm':
+ config->mode = !!atoi(optarg);
+ break;
+ case 'C':
+ config->conf_file = strdup(optarg);
+ break;
+ case 'I':
+ config->num_input_qs = atoi(optarg);
+ break;
+ case 'S':
+ config->num_sa_qs = atoi(optarg);
+ break;
+ case 'O':
+ config->num_output_qs = atoi(optarg);
+ break;
+ case 'd':
+ config->is_dir_rx = true;
+ break;
+ case 'h':
+ print_usage();
+ return PRS_TERM;
+ case '?':
+ default:
+ print_usage();
+ return PRS_NOK;
+ }
+ }
+
+ config_init(&cfg);
+
+ if (config_read_file(&cfg, config->conf_file) == CONFIG_FALSE) {
+ ODPH_ERR("Error opening SA configuration file: %s\n", config_error_text(&cfg));
+ config_destroy(&cfg);
+ return PRS_NOK;
+ }
+
+ parse_sas(&cfg, config);
+ parse_fwd_table(&cfg, config);
+ config_destroy(&cfg);
+
+ return check_options(config);
+}
+
+static parse_result_t setup_program(int argc, char **argv, prog_config_t *config)
+{
+ struct sigaction action = { .sa_handler = terminate };
+
+ if (sigemptyset(&action.sa_mask) == -1 || sigaddset(&action.sa_mask, SIGINT) == -1 ||
+ sigaddset(&action.sa_mask, SIGTERM) == -1 ||
+ sigaddset(&action.sa_mask, SIGHUP) == -1 || sigaction(SIGINT, &action, NULL) == -1 ||
+ sigaction(SIGTERM, &action, NULL) == -1 || sigaction(SIGHUP, &action, NULL) == -1) {
+ ODPH_ERR("Error installing signal handler\n");
+ return PRS_NOK;
+ }
+
+ return parse_options(argc, argv, config);
+}
+
+static uint32_t schedule(thread_config_t *config ODP_UNUSED, odp_event_t evs[], int num)
+{
+ return odp_schedule_multi_no_wait(NULL, evs, num);
+}
+
+static uint32_t recv(thread_config_t *config, odp_event_t evs[], int num)
+{
+ prog_config_t *prog_config = config->prog_config;
+ pktio_t *pktio = &prog_config->pktios[config->pktio++ % prog_config->num_ifs];
+ odp_pktin_queue_t in_q = pktio->in_dir_qs[config->thr_idx % prog_config->num_input_qs];
+ odp_packet_t pkts[num];
+ int ret;
+
+ ret = odp_pktin_recv(in_q, pkts, num);
+
+ if (odp_unlikely(ret <= 0))
+ return 0U;
+
+ odp_packet_to_event_multi(pkts, evs, ret);
+
+ return ret;
+}
+
+static uint32_t send(const pktio_t *pktio, uint8_t index, odp_packet_t pkts[], int num)
+{
+ int ret = odp_pktout_send(pktio->out_dir_qs[index], pkts, num);
+
+ return ret < 0 ? 0U : (uint32_t)ret;
+}
+
+static uint32_t enqueue(const pktio_t *pktio, uint8_t index, odp_packet_t pkts[], int num)
+{
+ odp_event_t evs[MAX_BURST];
+ int ret;
+
+ odp_packet_to_event_multi(pkts, evs, num);
+
+ ret = odp_queue_enq_multi(pktio->out_ev_qs[index], evs, num);
+
+ return ret < 0 ? 0U : (uint32_t)ret;
+}
+
+static odp_bool_t setup_pktios(prog_config_t *config)
+{
+ odp_pool_param_t pool_param;
+ pktio_t *pktio;
+ odp_pktio_param_t pktio_param;
+ odp_pktin_queue_param_t pktin_param;
+ odp_pktio_capability_t capa;
+ odp_pktout_queue_param_t pktout_param;
+ odp_pktio_config_t pktio_config;
+ uint32_t max_output_qs;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.pkt.seg_len = config->pkt_len;
+ pool_param.pkt.len = config->pkt_len;
+ pool_param.pkt.num = config->num_pkts;
+ pool_param.type = ODP_POOL_PACKET;
+ config->pktio_pool = odp_pool_create(PROG_NAME, &pool_param);
+
+ if (config->pktio_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error creating packet I/O pool\n");
+ return false;
+ }
+
+ config->ops.rx = !config->is_dir_rx ? schedule : recv;
+ config->is_hashed_tx = !config->is_dir_rx && config->mode == ORDERED;
+
+ for (uint32_t i = 0U; i < config->num_ifs; ++i) {
+ pktio = &config->pktios[i];
+ pktio->idx = i;
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = !config->is_dir_rx ?
+ ODP_PKTIN_MODE_SCHED : ODP_PKTIN_MODE_DIRECT;
+ pktio_param.out_mode = config->is_hashed_tx ?
+ ODP_PKTOUT_MODE_QUEUE : ODP_PKTOUT_MODE_DIRECT;
+ pktio->handle = odp_pktio_open(pktio->name, config->pktio_pool, &pktio_param);
+
+ if (pktio->handle == ODP_PKTIO_INVALID) {
+ ODPH_ERR("Error opening packet I/O (%s)\n", pktio->name);
+ return false;
+ }
+
+ if (odp_pktio_capability(pktio->handle, &capa) < 0) {
+ ODPH_ERR("Error querying packet I/O capabilities (%s)\n", pktio->name);
+ return false;
+ }
+
+ if (config->num_input_qs == 0U || config->num_input_qs > capa.max_input_queues) {
+ ODPH_ERR("Invalid number of input queues for packet I/O: %u (min: 1, max: "
+ "%u) (%s)\n", config->num_input_qs, capa.max_input_queues,
+ pktio->name);
+ return false;
+ }
+
+ max_output_qs = ODPH_MIN(MAX_QUEUES, capa.max_output_queues);
+
+ if (config->num_output_qs == 0U || config->num_output_qs > max_output_qs) {
+ ODPH_ERR("Invalid number of output queues for packet I/O: %u (min: 1, "
+ "max: %u) (%s)\n", config->num_output_qs, max_output_qs,
+ pktio->name);
+ return false;
+ }
+
+ odp_pktin_queue_param_init(&pktin_param);
+
+ if (config->is_hashed_tx)
+ pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ORDERED;
+
+ if (config->num_input_qs > 1U) {
+ pktin_param.hash_enable = true;
+ pktin_param.hash_proto.proto.ipv4_udp = 1U;
+ pktin_param.num_queues = config->num_input_qs;
+ }
+
+ pktin_param.op_mode = (config->is_dir_rx &&
+ config->num_thrs > (int)config->num_input_qs) ?
+ ODP_PKTIO_OP_MT : ODP_PKTIO_OP_MT_UNSAFE;
+
+ if (odp_pktin_queue_config(pktio->handle, &pktin_param) < 0) {
+ ODPH_ERR("Error configuring packet I/O input queues (%s)\n", pktio->name);
+ return false;
+ }
+
+ if (config->is_dir_rx) {
+ if (odp_pktin_queue(pktio->handle, pktio->in_dir_qs, config->num_input_qs)
+ != (int)config->num_input_qs) {
+ ODPH_ERR("Error querying packet I/O input queue (%s)\n",
+ pktio->name);
+ return false;
+ }
+ }
+
+ pktio->send_fn = config->is_hashed_tx ? enqueue : send;
+ pktio->num_tx_qs = config->num_output_qs;
+ odp_pktout_queue_param_init(&pktout_param);
+ pktout_param.num_queues = pktio->num_tx_qs;
+
+ if (!config->is_hashed_tx) {
+ pktout_param.op_mode = config->num_thrs > (int)pktio->num_tx_qs ?
+ ODP_PKTIO_OP_MT : ODP_PKTIO_OP_MT_UNSAFE;
+ }
+
+ if (odp_pktout_queue_config(pktio->handle, &pktout_param) < 0) {
+ ODPH_ERR("Error configuring packet I/O output queues (%s)\n", pktio->name);
+ return false;
+ }
+
+ if (config->is_hashed_tx) {
+ if (odp_pktout_event_queue(pktio->handle, pktio->out_ev_qs,
+ pktio->num_tx_qs) != (int)pktio->num_tx_qs) {
+ ODPH_ERR("Error querying packet I/O output event queue (%s)\n",
+ pktio->name);
+ return false;
+ }
+ } else {
+ if (odp_pktout_queue(pktio->handle, pktio->out_dir_qs, pktio->num_tx_qs)
+ != (int)pktio->num_tx_qs) {
+ ODPH_ERR("Error querying packet I/O output queue (%s)\n",
+ pktio->name);
+ return false;
+ }
+ }
+
+ odp_pktio_config_init(&pktio_config);
+
+ if (odp_pktio_config(pktio->handle, &pktio_config) < 0) {
+ ODPH_ERR("Error configuring packet I/O extra options (%s)\n", pktio->name);
+ return false;
+ }
+
+ if (odp_pktio_mac_addr(pktio->handle, &pktio->src_mac, sizeof(pktio->src_mac))
+ != sizeof(pktio->src_mac)) {
+ ODPH_ERR("Error getting packet I/O MAC address (%s)\n", pktio->name);
+ return false;
+ }
+
+ if (odp_pktio_start(pktio->handle) < 0) {
+ ODPH_ERR("Error starting packet I/O (%s)\n", pktio->name);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static inline void check_ipsec_status_ev(odp_event_t ev, stats_t *stats)
+{
+ odp_ipsec_status_t status;
+
+ if (odp_unlikely(odp_ipsec_status(&status, ev) < 0 || status.result < 0))
+ ++stats->status_errs;
+
+ odp_event_free(ev);
+}
+
+static int process_packets(void *args)
+{
+ thread_config_t *config = args;
+ int thr_idx = odp_thread_id();
+ odp_event_t evs[MAX_BURST], ev;
+ ops_t ops = config->prog_config->ops;
+ odp_atomic_u32_t *is_running = &config->prog_config->is_running;
+ uint32_t cnt;
+ odp_event_type_t type;
+ odp_event_subtype_t subtype;
+ odp_packet_t pkt, pkts_in[MAX_BURST], pkts_ips[MAX_BURST];
+ lookup_table_t *fwd_tbl = &config->prog_config->fwd_tbl;
+ stats_t *stats = &config->stats;
+
+ ifs.is_hashed_tx = config->prog_config->is_hashed_tx;
+ ifs.q_idx = thr_idx % config->prog_config->num_output_qs;
+ config->thr_idx = thr_idx;
+ odp_barrier_wait(&config->prog_config->init_barrier);
+
+ while (odp_atomic_load_u32(is_running)) {
+ int num_pkts_in = 0, num_pkts_ips = 0;
+ /* TODO: Add possibility to configure scheduler and ipsec enq/deq burst sizes. */
+ cnt = ops.rx(config, evs, MAX_BURST);
+
+ if (cnt == 0U)
+ continue;
+
+ for (uint32_t i = 0U; i < cnt; ++i) {
+ ev = evs[i];
+ type = odp_event_types(ev, &subtype);
+ pkt = odp_packet_from_event(ev);
+
+ if (type == ODP_EVENT_PACKET) {
+ if (subtype == ODP_EVENT_PACKET_BASIC) {
+ pkts_in[num_pkts_in++] = pkt;
+ } else if (subtype == ODP_EVENT_PACKET_IPSEC) {
+ pkts_ips[num_pkts_ips++] = pkt;
+ } else {
+ ++stats->discards;
+ odp_event_free(ev);
+ }
+ } else if (type == ODP_EVENT_IPSEC_STATUS) {
+ check_ipsec_status_ev(ev, stats);
+ } else {
+ ++stats->discards;
+ odp_event_free(ev);
+ }
+ }
+
+ if (num_pkts_in > 0)
+ ops.proc(pkts_in, num_pkts_in, fwd_tbl, stats);
+
+ if (ops.compl && num_pkts_ips > 0)
+ ops.compl(pkts_ips, num_pkts_ips, fwd_tbl, stats);
+ }
+
+ odp_barrier_wait(&config->prog_config->term_barrier);
+ ops.drain(config->prog_config);
+
+ return 0;
+}
+
+static odp_bool_t setup_workers(prog_config_t *config)
+{
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[config->num_thrs];
+ odp_cpumask_t cpumask;
+ int num_workers;
+
+ num_workers = odp_cpumask_default_worker(&cpumask, config->num_thrs);
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = config->odp_instance;
+ thr_common.cpumask = &cpumask;
+
+ for (int i = 0; i < config->num_thrs; ++i) {
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = process_packets;
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ config->thread_config[i].prog_config = config;
+ thr_param[i].arg = &config->thread_config[i];
+ }
+
+ num_workers = odph_thread_create(config->thread_tbl, &thr_common, thr_param, num_workers);
+
+ if (num_workers != config->num_thrs) {
+ ODPH_ERR("Error configuring worker threads\n");
+ return false;
+ }
+
+ return true;
+}
+
+static odp_bool_t setup_test(prog_config_t *config)
+{
+ odp_barrier_init(&config->init_barrier, config->num_thrs + 1);
+ odp_barrier_init(&config->term_barrier, config->num_thrs + 1);
+
+ if (!setup_pktios(config))
+ return false;
+
+ if (!setup_workers(config))
+ return false;
+
+ odp_barrier_wait(&config->init_barrier);
+
+ return true;
+}
+
+static void stop_test(prog_config_t *config)
+{
+ for (uint32_t i = 0U; i < config->num_ifs; ++i)
+ if (config->pktios[i].handle != ODP_PKTIO_INVALID)
+ (void)odp_pktio_stop(config->pktios[i].handle);
+
+ odp_barrier_wait(&config->term_barrier);
+ (void)odph_thread_join(config->thread_tbl, config->num_thrs);
+}
+
+static void print_stats(const prog_config_t *config)
+{
+ const stats_t *stats;
+
+ printf("\n====================\n\n"
+ "IPsec forwarder done\n\n"
+ " configuration file: %s\n"
+ " queuing mode: %s\n"
+ " input queue count: %u\n"
+ " SA queue count: %u\n"
+ " output queue count: %u\n"
+ " RX mode: %s\n", config->conf_file,
+ config->mode == ORDERED ? "ordered" : "parallel", config->num_input_qs,
+ config->num_sa_qs, config->num_output_qs,
+ config->is_dir_rx ? "direct" : "scheduled");
+
+ for (int i = 0; i < config->num_thrs; ++i) {
+ stats = &config->thread_config[i].stats;
+
+ printf("\n worker %d:\n"
+ " IPsec in packets: %" PRIu64 "\n"
+ " IPsec out packets: %" PRIu64 "\n"
+ " IPsec in packet errors: %" PRIu64 "\n"
+ " IPsec out packet errors: %" PRIu64 "\n"
+ " IPsec status errors: %" PRIu64 "\n"
+ " packets forwarded: %" PRIu64 "\n"
+ " packets dropped: %" PRIu64 "\n", i, stats->ipsec_in_pkts,
+ stats->ipsec_out_pkts, stats->ipsec_in_errs, stats->ipsec_out_errs,
+ stats->status_errs, stats->fwd_pkts, stats->discards);
+ }
+
+ printf("\n====================\n");
+}
+
+static void wait_sas_disabled(uint32_t num_sas)
+{
+ uint32_t num_sas_dis = 0U;
+ odp_event_t ev;
+ odp_ipsec_status_t status;
+
+ while (num_sas_dis < num_sas) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ if (odp_event_type(ev) != ODP_EVENT_IPSEC_STATUS) {
+ odp_event_free(ev);
+ continue;
+ }
+
+ if (odp_ipsec_status(&status, ev) < 0) {
+ odp_event_free(ev);
+ continue;
+ }
+
+ if (status.id == ODP_IPSEC_STATUS_SA_DISABLE)
+ ++num_sas_dis;
+
+ odp_event_free(ev);
+ }
+}
+
+static void teardown_test(const prog_config_t *config)
+{
+ for (uint32_t i = 0U; i < config->num_ifs; ++i) {
+ free(config->pktios[i].name);
+
+ if (config->pktios[i].handle != ODP_PKTIO_INVALID)
+ (void)odp_pktio_close(config->pktios[i].handle);
+ }
+
+ if (config->pktio_pool != ODP_POOL_INVALID)
+ (void)odp_pool_destroy(config->pktio_pool);
+
+ for (uint32_t i = 0U; i < config->num_sas; ++i)
+ (void)odp_ipsec_sa_disable(config->sas[i]);
+
+ if (!config->is_dir_rx)
+ /* Drain SA status events. */
+ wait_sas_disabled(config->num_sas);
+
+ for (uint32_t i = 0U; i < config->num_sas; ++i)
+ (void)odp_ipsec_sa_destroy(config->sas[i]);
+
+ for (uint32_t i = 0U; i < config->num_sa_qs; ++i)
+ (void)odp_queue_destroy(config->sa_qs[i]);
+
+ if (config->compl_q != ODP_QUEUE_INVALID)
+ (void)odp_queue_destroy(config->compl_q);
+
+ free(config->conf_file);
+}
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t odph_opts;
+ odp_init_t init_param;
+ odp_instance_t odp_instance;
+ odp_shm_t shm_cfg = ODP_SHM_INVALID;
+ parse_result_t parse_res;
+ int ret = EXIT_SUCCESS;
+
+ argc = odph_parse_options(argc, argv);
+
+ if (odph_options(&odph_opts) == -1) {
+ ODPH_ERR("Error while reading ODP helper options, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = odph_opts.mem_model;
+
+ if (odp_init_global(&odp_instance, &init_param, NULL) < 0) {
+ ODPH_ERR("ODP global init failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_init_local(odp_instance, ODP_THREAD_CONTROL) < 0) {
+ ODPH_ERR("ODP local init failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ shm_cfg = odp_shm_reserve(PROG_NAME "_cfg", sizeof(prog_config_t), ODP_CACHE_LINE_SIZE,
+ 0U);
+
+ if (shm_cfg == ODP_SHM_INVALID) {
+ ODPH_ERR("Error reserving shared memory\n");
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ prog_conf = odp_shm_addr(shm_cfg);
+
+ if (prog_conf == NULL) {
+ ODPH_ERR("Error resolving shared memory address\n");
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ init_config(prog_conf);
+
+ if (!prog_conf->is_dir_rx && odp_schedule_config(NULL) < 0) {
+ ODPH_ERR("Error configuring scheduler\n");
+ ret = EXIT_FAILURE;
+ goto out_test;
+ }
+
+ parse_res = setup_program(argc, argv, prog_conf);
+
+ if (parse_res == PRS_NOK) {
+ ret = EXIT_FAILURE;
+ goto out_test;
+ }
+
+ if (parse_res == PRS_TERM) {
+ ret = EXIT_SUCCESS;
+ goto out_test;
+ }
+
+ prog_conf->odp_instance = odp_instance;
+ odp_atomic_init_u32(&prog_conf->is_running, 1U);
+
+ if (!setup_test(prog_conf)) {
+ ret = EXIT_FAILURE;
+ goto out_test;
+ }
+
+ while (odp_atomic_load_u32(&prog_conf->is_running))
+ odp_cpu_pause();
+
+ stop_test(prog_conf);
+ print_stats(prog_conf);
+
+out_test:
+ teardown_test(prog_conf);
+
+out:
+ if (shm_cfg != ODP_SHM_INVALID)
+ (void)odp_shm_free(shm_cfg);
+
+ if (odp_term_local() < 0) {
+ ODPH_ERR("ODP local terminate failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(odp_instance) < 0) {
+ ODPH_ERR("ODP global terminate failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return ret;
+}
diff --git a/test/performance/odp_ipsecfwd.conf b/test/performance/odp_ipsecfwd.conf
new file mode 100644
index 000000000..81ddaef7e
--- /dev/null
+++ b/test/performance/odp_ipsecfwd.conf
@@ -0,0 +1,41 @@
+default: {
+ dir = 1
+ proto = 0
+ mode = 0
+ crypto: {
+ cipher_alg = 4
+ cipher_key = "jWnZr4t7w!zwC*F-"
+ auth_alg = 2
+ auth_key = "n2r5u7x!A%D*"
+ icv_len = 12
+ };
+};
+
+sa: (
+ {
+ spi = 1337
+ outbound: {
+ tunnel: {
+ src_addr = "192.168.1.10"
+ dst_addr = "192.168.1.16"
+ };
+ };
+ },
+ {
+ spi = 1338
+ outbound: {
+ tunnel: {
+ src_addr = "192.168.3.110"
+ dst_addr = "192.168.3.116"
+ };
+ };
+ }
+);
+
+fwd: (
+ {
+ prefix: "192.168.1.0/24"
+ if: "ens9f1"
+ dst_mac: "00:00:05:00:07:00"
+ }
+);
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
new file mode 100644
index 000000000..b993de4cb
--- /dev/null
+++ b/test/performance/odp_l2fwd.c
@@ -0,0 +1,2643 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2019-2024, Nokia
+ * Copyright (c) 2020-2021, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_l2fwd.c
+ *
+ * L2 forwarding example application
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+/* enable strtok */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <stdlib.h>
+#include <getopt.h>
+#include <unistd.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <signal.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+/* Maximum number of worker threads */
+#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1)
+
+/* Default number of packets per pool */
+#define DEFAULT_NUM_PKT (16 * 1024)
+
+/* Packet length to pool create */
+#define POOL_PKT_LEN 1536
+
+/* Maximum number of packet in a burst */
+#define MAX_PKT_BURST 32
+
+/* Maximum number of pktio queues per interface */
+#define MAX_QUEUES 32
+
+/* Maximum number of schedule groups */
+#define MAX_GROUPS 32
+
+/* Maximum number of pktio interfaces */
+#define MAX_PKTIOS 8
+
+/* Default vector size */
+#define DEFAULT_VEC_SIZE MAX_PKT_BURST
+
+/* Default vector timeout */
+#define DEFAULT_VEC_TMO ODP_TIME_MSEC_IN_NS
+
+/* Maximum thread info string length */
+#define EXTRA_STR_LEN 32
+
+/* Packet input mode */
+typedef enum pktin_mode_t {
+ DIRECT_RECV,
+ PLAIN_QUEUE,
+ SCHED_PARALLEL,
+ SCHED_ATOMIC,
+ SCHED_ORDERED,
+} pktin_mode_t;
+
+/* Packet output modes */
+typedef enum pktout_mode_t {
+ PKTOUT_DIRECT,
+ PKTOUT_QUEUE
+} pktout_mode_t;
+
+static inline int sched_mode(pktin_mode_t in_mode)
+{
+ return (in_mode == SCHED_PARALLEL) ||
+ (in_mode == SCHED_ATOMIC) ||
+ (in_mode == SCHED_ORDERED);
+}
+
+/* Get rid of path in filename - only for unix-type paths using '/' */
+#define NO_PATH(file_name) (strrchr((file_name), '/') ? \
+ strrchr((file_name), '/') + 1 : (file_name))
+/*
+ * Parsed command line application arguments
+ */
+typedef struct {
+ /* Some extra features (e.g. error checks) have been enabled */
+ uint8_t extra_feat;
+
+ /* Prefetch packet data */
+ uint8_t prefetch;
+
+ /* Change destination eth addresses */
+ uint8_t dst_change;
+
+ /* Change source eth addresses */
+ uint8_t src_change;
+
+ /* Read packet data in uint64_t words */
+ uint16_t data_rd;
+
+ /* Check packet errors */
+ uint8_t error_check;
+
+ /* Packet copy */
+ uint8_t packet_copy;
+
+ /* Checksum offload */
+ uint8_t chksum;
+
+ /* Print debug info on every packet */
+ uint8_t verbose_pkt;
+
+ unsigned int cpu_count;
+ int if_count; /* Number of interfaces to be used */
+ int addr_count; /* Number of dst addresses to be used */
+ int num_workers; /* Number of worker threads */
+ char **if_names; /* Array of pointers to interface names */
+ odph_ethaddr_t addrs[MAX_PKTIOS]; /* Array of dst addresses */
+ pktin_mode_t in_mode; /* Packet input mode */
+ pktout_mode_t out_mode; /* Packet output mode */
+ int time; /* Time in seconds to run. */
+ int accuracy; /* Number of seconds to get and print stats */
+ char *if_str; /* Storage for interface names */
+ int sched_mode; /* Scheduler mode */
+ int num_groups; /* Number of scheduling groups */
+ int group_mode; /* How threads join groups */
+ int burst_rx; /* Receive burst size */
+ int rx_queues; /* RX queues per interface */
+ int pool_per_if; /* Create pool per interface */
+ uint32_t num_pkt; /* Number of packets per pool */
+ bool vector_mode; /* Vector mode enabled */
+ uint32_t num_vec; /* Number of vectors per pool */
+ uint64_t vec_tmo_ns; /* Vector formation timeout in ns */
+ uint32_t vec_size; /* Vector size */
+ int verbose; /* Verbose output */
+ uint32_t packet_len; /* Maximum packet length supported */
+ uint32_t seg_len; /* Pool segment length */
+ int promisc_mode; /* Promiscuous mode enabled */
+ int flow_aware; /* Flow aware scheduling enabled */
+ uint8_t input_ts; /* Packet input timestamping enabled */
+ int mtu; /* Interface MTU */
+ int num_prio;
+ odp_schedule_prio_t prio[MAX_PKTIOS]; /* Priority of input queues of an interface */
+
+} appl_args_t;
+
+/* Statistics */
+typedef union ODP_ALIGNED_CACHE {
+ struct {
+ /* Number of forwarded packets */
+ uint64_t packets;
+ /* Packets dropped due to receive error */
+ uint64_t rx_drops;
+ /* Packets dropped due to transmit error */
+ uint64_t tx_drops;
+ /* Number of failed packet copies */
+ uint64_t copy_fails;
+ /* Dummy sum of packet data */
+ uint64_t dummy_sum;
+ } s;
+
+ uint8_t padding[ODP_CACHE_LINE_SIZE];
+} stats_t;
+
+/* Thread specific data */
+typedef struct thread_args_t {
+ stats_t stats;
+
+ struct {
+ odp_pktin_queue_t pktin;
+ odp_pktout_queue_t pktout;
+ odp_queue_t rx_queue;
+ odp_queue_t tx_queue;
+ int rx_idx;
+ int tx_idx;
+ int rx_queue_idx;
+ int tx_queue_idx;
+ } pktio[MAX_PKTIOS];
+
+ /* Groups to join */
+ odp_schedule_group_t group[MAX_GROUPS];
+
+ int thr_idx;
+ int num_pktio;
+ int num_grp_join;
+
+} thread_args_t;
+
+/*
+ * Grouping of all global data
+ */
+typedef struct {
+ /* Thread table */
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ /* Thread specific arguments */
+ thread_args_t thread_args[MAX_WORKERS];
+ /* Barriers to synchronize main and workers */
+ odp_barrier_t init_barrier;
+ odp_barrier_t term_barrier;
+ /* Application (parsed) arguments */
+ appl_args_t appl;
+ /* Table of port ethernet addresses */
+ odph_ethaddr_t port_eth_addr[MAX_PKTIOS];
+ /* Table of dst ethernet addresses */
+ odph_ethaddr_t dst_eth_addr[MAX_PKTIOS];
+ /* Table of dst ports. This is used by non-sched modes. */
+ int dst_port[MAX_PKTIOS];
+ /* Table of pktio handles */
+ struct {
+ odp_pktio_t pktio;
+ odp_pktin_queue_t pktin[MAX_QUEUES];
+ odp_pktout_queue_t pktout[MAX_QUEUES];
+ odp_queue_t rx_q[MAX_QUEUES];
+ odp_queue_t tx_q[MAX_QUEUES];
+ int num_rx_thr;
+ int num_tx_thr;
+ int num_rx_queue;
+ int num_tx_queue;
+ int next_rx_queue;
+ int next_tx_queue;
+ } pktios[MAX_PKTIOS];
+
+ /* Destination port lookup table.
+ * Table index is pktio_index of the API. This is used by the sched
+ * mode. */
+ uint8_t dst_port_from_idx[ODP_PKTIO_MAX_INDEX + 1];
+ /* Break workers loop if set to 1 */
+ odp_atomic_u32_t exit_threads;
+
+ uint32_t pkt_len;
+ uint32_t num_pkt;
+ uint32_t seg_len;
+ uint32_t vector_num;
+ uint32_t vector_max_size;
+ char cpumaskstr[ODP_CPUMASK_STR_SIZE];
+
+} args_t;
+
+/* Global pointer to args */
+static args_t *gbl_args;
+
+static void sig_handler(int signo ODP_UNUSED)
+{
+ if (gbl_args == NULL)
+ return;
+ odp_atomic_store_u32(&gbl_args->exit_threads, 1);
+}
+
+/*
+ * Drop packets which input parsing marked as containing errors.
+ *
+ * Frees packets with error and modifies pkt_tbl[] to only contain packets with
+ * no detected errors.
+ *
+ * pkt_tbl Array of packets
+ * num Number of packets in pkt_tbl[]
+ *
+ * Returns number of packets dropped
+ */
+static inline int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned num)
+{
+ odp_packet_t pkt;
+ unsigned dropped = 0;
+ unsigned i, j;
+
+ for (i = 0, j = 0; i < num; ++i) {
+ pkt = pkt_tbl[i];
+
+ if (odp_unlikely(odp_packet_has_error(pkt))) {
+ odp_packet_free(pkt); /* Drop */
+ dropped++;
+ } else if (odp_unlikely(i != j++)) {
+ pkt_tbl[j - 1] = pkt;
+ }
+ }
+
+ return dropped;
+}
+
+static inline void prefetch_data(uint8_t prefetch, odp_packet_t pkt_tbl[], uint32_t num)
+{
+ if (prefetch == 0)
+ return;
+
+ for (uint32_t i = 0; i < num; i++)
+ odp_packet_prefetch(pkt_tbl[i], 0, prefetch * 64);
+}
+
+/*
+ * Fill packets' eth addresses according to the destination port
+ *
+ * pkt_tbl Array of packets
+ * num Number of packets in the array
+ * dst_port Destination port
+ */
+static inline void fill_eth_addrs(odp_packet_t pkt_tbl[],
+ unsigned num, int dst_port)
+{
+ odp_packet_t pkt;
+ odph_ethhdr_t *eth;
+ unsigned i;
+
+ if (!gbl_args->appl.dst_change && !gbl_args->appl.src_change)
+ return;
+
+ for (i = 0; i < num; ++i) {
+ pkt = pkt_tbl[i];
+ eth = odp_packet_data(pkt);
+
+ if (gbl_args->appl.src_change)
+ eth->src = gbl_args->port_eth_addr[dst_port];
+
+ if (gbl_args->appl.dst_change)
+ eth->dst = gbl_args->dst_eth_addr[dst_port];
+ }
+}
+
+static inline int event_queue_send(odp_queue_t queue, odp_packet_t *pkt_tbl,
+ unsigned pkts)
+{
+ int ret;
+ unsigned sent = 0;
+ odp_event_t ev_tbl[pkts];
+
+ odp_packet_to_event_multi(pkt_tbl, ev_tbl, pkts);
+
+ while (sent < pkts) {
+ ret = odp_queue_enq_multi(queue, &ev_tbl[sent], pkts - sent);
+
+ if (odp_unlikely(ret <= 0)) {
+ if (ret < 0 || odp_atomic_load_u32(&gbl_args->exit_threads))
+ break;
+ }
+
+ sent += ret;
+ }
+
+ return sent;
+}
+
+static inline void chksum_insert(odp_packet_t *pkt_tbl, int pkts)
+{
+ odp_packet_t pkt;
+ int i;
+
+ for (i = 0; i < pkts; i++) {
+ pkt = pkt_tbl[i];
+ odp_packet_l3_chksum_insert(pkt, 1);
+ odp_packet_l4_chksum_insert(pkt, 1);
+ }
+}
+
+static void print_packets(odp_packet_t *pkt_tbl, int num)
+{
+ odp_packet_t pkt;
+ uintptr_t data_ptr;
+ uint32_t bit, align;
+
+ for (int i = 0; i < num; i++) {
+ pkt = pkt_tbl[i];
+ data_ptr = (uintptr_t)odp_packet_data(pkt);
+
+ for (bit = 0, align = 1; bit < 32; bit++, align *= 2)
+ if (data_ptr & (0x1 << bit))
+ break;
+
+ printf(" Packet data: 0x%" PRIxPTR "\n"
+ " Packet len: %u\n"
+ " Packet seg len: %u\n"
+ " Data align: %u\n"
+ " Num segments: %i\n"
+ " Headroom size: %u\n"
+ " User area size: %u\n\n",
+ data_ptr, odp_packet_len(pkt), odp_packet_seg_len(pkt), align,
+ odp_packet_num_segs(pkt), odp_packet_headroom(pkt),
+ odp_packet_user_area_size(pkt));
+ }
+}
+
+static inline void data_rd(odp_packet_t *pkt_tbl, int num, uint16_t rd_words, stats_t *stats)
+{
+ odp_packet_t pkt;
+ uint64_t *data;
+ int i;
+ uint32_t len, words, j;
+ uint64_t sum = 0;
+
+ for (i = 0; i < num; i++) {
+ pkt = pkt_tbl[i];
+ data = odp_packet_data(pkt);
+ len = odp_packet_seg_len(pkt);
+
+ words = rd_words;
+ if (rd_words * 8 > len)
+ words = len / 8;
+
+ for (j = 0; j < words; j++)
+ sum += data[j];
+ }
+
+ stats->s.dummy_sum += sum;
+}
+
+static inline int copy_packets(odp_packet_t *pkt_tbl, int pkts)
+{
+ odp_packet_t old_pkt, new_pkt;
+ odp_pool_t pool;
+ int i;
+ int copy_fails = 0;
+
+ for (i = 0; i < pkts; i++) {
+ old_pkt = pkt_tbl[i];
+ pool = odp_packet_pool(old_pkt);
+ new_pkt = odp_packet_copy(old_pkt, pool);
+ if (odp_likely(new_pkt != ODP_PACKET_INVALID)) {
+ pkt_tbl[i] = new_pkt;
+ odp_packet_free(old_pkt);
+ } else {
+ copy_fails++;
+ }
+ }
+
+ return copy_fails;
+}
+
+/*
+ * Return number of packets remaining in the pkt_tbl
+ */
+static inline int process_extra_features(const appl_args_t *appl_args, odp_packet_t *pkt_tbl,
+ int pkts, stats_t *stats)
+{
+ if (odp_unlikely(appl_args->extra_feat)) {
+ uint16_t rd_words = appl_args->data_rd;
+
+ if (appl_args->verbose_pkt)
+ print_packets(pkt_tbl, pkts);
+
+ if (rd_words)
+ data_rd(pkt_tbl, pkts, rd_words, stats);
+
+ if (appl_args->packet_copy) {
+ int fails;
+
+ fails = copy_packets(pkt_tbl, pkts);
+ stats->s.copy_fails += fails;
+ }
+
+ if (appl_args->chksum)
+ chksum_insert(pkt_tbl, pkts);
+
+ if (appl_args->error_check) {
+ int rx_drops;
+
+ /* Drop packets with errors */
+ rx_drops = drop_err_pkts(pkt_tbl, pkts);
+
+ if (odp_unlikely(rx_drops)) {
+ stats->s.rx_drops += rx_drops;
+ if (pkts == rx_drops)
+ return 0;
+
+ pkts -= rx_drops;
+ }
+ }
+ }
+ return pkts;
+}
+
+static inline void send_packets(odp_packet_t *pkt_tbl,
+ int pkts,
+ int use_event_queue,
+ odp_queue_t tx_queue,
+ odp_pktout_queue_t pktout_queue,
+ stats_t *stats)
+{
+ int sent;
+ unsigned int tx_drops;
+ int i;
+
+ if (odp_unlikely(use_event_queue))
+ sent = event_queue_send(tx_queue, pkt_tbl, pkts);
+ else
+ sent = odp_pktout_send(pktout_queue, pkt_tbl, pkts);
+
+ sent = odp_unlikely(sent < 0) ? 0 : sent;
+ tx_drops = pkts - sent;
+
+ if (odp_unlikely(tx_drops)) {
+ stats->s.tx_drops += tx_drops;
+
+ /* Drop rejected packets */
+ for (i = sent; i < pkts; i++)
+ odp_packet_free(pkt_tbl[i]);
+ }
+
+ stats->s.packets += pkts;
+}
+
+/*
+ * Packet IO worker thread using scheduled queues and vector mode.
+ *
+ * arg thread arguments of type 'thread_args_t *'
+ */
+static int run_worker_sched_mode_vector(void *arg)
+{
+ int thr;
+ int i;
+ int pktio, num_pktio;
+ uint16_t max_burst;
+ odp_thrmask_t mask;
+ odp_pktout_queue_t pktout[MAX_PKTIOS];
+ odp_queue_t tx_queue[MAX_PKTIOS];
+ thread_args_t *thr_args = arg;
+ stats_t *stats = &thr_args->stats;
+ const appl_args_t *appl_args = &gbl_args->appl;
+ int use_event_queue = gbl_args->appl.out_mode;
+ pktin_mode_t in_mode = gbl_args->appl.in_mode;
+
+ thr = odp_thread_id();
+ max_burst = gbl_args->appl.burst_rx;
+
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr);
+
+ /* Join non-default groups */
+ for (i = 0; i < thr_args->num_grp_join; i++) {
+ if (odp_schedule_group_join(thr_args->group[i], &mask)) {
+ ODPH_ERR("Join failed: %i\n", i);
+ return -1;
+ }
+ }
+
+ num_pktio = thr_args->num_pktio;
+
+ if (num_pktio > MAX_PKTIOS) {
+ ODPH_ERR("Too many pktios %i\n", num_pktio);
+ return -1;
+ }
+
+ for (pktio = 0; pktio < num_pktio; pktio++) {
+ tx_queue[pktio] = thr_args->pktio[pktio].tx_queue;
+ pktout[pktio] = thr_args->pktio[pktio].pktout;
+ }
+
+ printf("[%02i] PKTIN_SCHED_%s_VECTOR, %s\n", thr,
+ (in_mode == SCHED_PARALLEL) ? "PARALLEL" :
+ ((in_mode == SCHED_ATOMIC) ? "ATOMIC" : "ORDERED"),
+ (use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT");
+
+ odp_barrier_wait(&gbl_args->init_barrier);
+
+ /* Loop packets */
+ while (!odp_atomic_load_u32(&gbl_args->exit_threads)) {
+ odp_event_t ev_tbl[MAX_PKT_BURST];
+ int events;
+
+ events = odp_schedule_multi_no_wait(NULL, ev_tbl, max_burst);
+
+ if (events <= 0)
+ continue;
+
+ for (i = 0; i < events; i++) {
+ odp_packet_vector_t pkt_vec = ODP_PACKET_VECTOR_INVALID;
+ odp_packet_t *pkt_tbl;
+ odp_packet_t pkt;
+ int src_idx, dst_idx;
+ int pkts;
+
+ if (odp_event_type(ev_tbl[i]) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev_tbl[i]);
+ pkt_tbl = &pkt;
+ pkts = 1;
+ } else {
+ ODPH_ASSERT(odp_event_type(ev_tbl[i]) == ODP_EVENT_PACKET_VECTOR);
+ pkt_vec = odp_packet_vector_from_event(ev_tbl[i]);
+ pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl);
+ }
+
+ prefetch_data(appl_args->prefetch, pkt_tbl, pkts);
+
+ pkts = process_extra_features(appl_args, pkt_tbl, pkts, stats);
+
+ if (odp_unlikely(pkts) == 0) {
+ if (pkt_vec != ODP_PACKET_VECTOR_INVALID)
+ odp_packet_vector_free(pkt_vec);
+ continue;
+ }
+
+ /* packets from the same queue are from the same interface */
+ src_idx = odp_packet_input_index(pkt_tbl[0]);
+ ODPH_ASSERT(src_idx >= 0);
+ dst_idx = gbl_args->dst_port_from_idx[src_idx];
+ fill_eth_addrs(pkt_tbl, pkts, dst_idx);
+
+ send_packets(pkt_tbl, pkts,
+ use_event_queue,
+ tx_queue[dst_idx],
+ pktout[dst_idx],
+ stats);
+
+ if (pkt_vec != ODP_PACKET_VECTOR_INVALID)
+ odp_packet_vector_free(pkt_vec);
+ }
+ }
+
+ /*
+ * Free prefetched packets before entering the thread barrier.
+ * Such packets can block sending of later packets in other threads
+ * that then would never enter the thread barrier and we would
+ * end up in a dead-lock.
+ */
+ odp_schedule_pause();
+ while (1) {
+ odp_event_t ev;
+
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ odp_event_free(ev);
+ }
+
+ /* Make sure that latest stat writes are visible to other threads */
+ odp_mb_full();
+
+ /* Wait until pktio devices are stopped */
+ odp_barrier_wait(&gbl_args->term_barrier);
+
+ /* Free remaining events in queues */
+ odp_schedule_resume();
+ while (1) {
+ odp_event_t ev;
+
+ ev = odp_schedule(NULL,
+ odp_schedule_wait_time(ODP_TIME_SEC_IN_NS));
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ odp_event_free(ev);
+ }
+
+ return 0;
+}
+
+/*
+ * Packet IO worker thread using scheduled queues
+ *
+ * arg thread arguments of type 'thread_args_t *'
+ */
+static int run_worker_sched_mode(void *arg)
+{
+ int pkts;
+ int thr;
+ int dst_idx;
+ int i;
+ int pktio, num_pktio;
+ uint16_t max_burst;
+ odp_thrmask_t mask;
+ odp_pktout_queue_t pktout[MAX_PKTIOS];
+ odp_queue_t tx_queue[MAX_PKTIOS];
+ char extra_str[EXTRA_STR_LEN];
+ thread_args_t *thr_args = arg;
+ stats_t *stats = &thr_args->stats;
+ const appl_args_t *appl_args = &gbl_args->appl;
+ int use_event_queue = gbl_args->appl.out_mode;
+ pktin_mode_t in_mode = gbl_args->appl.in_mode;
+
+ thr = odp_thread_id();
+ max_burst = gbl_args->appl.burst_rx;
+
+ memset(extra_str, 0, EXTRA_STR_LEN);
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr);
+
+ /* Join non-default groups */
+ for (i = 0; i < thr_args->num_grp_join; i++) {
+ if (odp_schedule_group_join(thr_args->group[i], &mask)) {
+ ODPH_ERR("Join failed: %i\n", i);
+ return -1;
+ }
+
+ if (gbl_args->appl.verbose) {
+ uint64_t tmp = (uint64_t)(uintptr_t)thr_args->group[i];
+
+ printf("[%02i] Joined group 0x%" PRIx64 "\n", thr, tmp);
+ }
+ }
+
+ if (thr_args->num_grp_join)
+ snprintf(extra_str, EXTRA_STR_LEN, ", joined %i groups", thr_args->num_grp_join);
+ else if (gbl_args->appl.num_groups == 0)
+ snprintf(extra_str, EXTRA_STR_LEN, ", GROUP_ALL");
+ else if (gbl_args->appl.num_groups)
+ snprintf(extra_str, EXTRA_STR_LEN, ", GROUP_WORKER");
+
+ num_pktio = thr_args->num_pktio;
+
+ if (num_pktio > MAX_PKTIOS) {
+ ODPH_ERR("Too many pktios %i\n", num_pktio);
+ return -1;
+ }
+
+ for (pktio = 0; pktio < num_pktio; pktio++) {
+ tx_queue[pktio] = thr_args->pktio[pktio].tx_queue;
+ pktout[pktio] = thr_args->pktio[pktio].pktout;
+ }
+
+ printf("[%02i] PKTIN_SCHED_%s, %s%s\n", thr,
+ (in_mode == SCHED_PARALLEL) ? "PARALLEL" :
+ ((in_mode == SCHED_ATOMIC) ? "ATOMIC" : "ORDERED"),
+ (use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT", extra_str);
+
+ odp_barrier_wait(&gbl_args->init_barrier);
+
+ /* Loop packets */
+ while (!odp_atomic_load_u32(&gbl_args->exit_threads)) {
+ odp_event_t ev_tbl[MAX_PKT_BURST];
+ odp_packet_t pkt_tbl[MAX_PKT_BURST];
+ int src_idx;
+
+ pkts = odp_schedule_multi_no_wait(NULL, ev_tbl, max_burst);
+
+ if (pkts <= 0)
+ continue;
+
+ odp_packet_from_event_multi(pkt_tbl, ev_tbl, pkts);
+
+ prefetch_data(appl_args->prefetch, pkt_tbl, pkts);
+
+ pkts = process_extra_features(appl_args, pkt_tbl, pkts, stats);
+
+ if (odp_unlikely(pkts) == 0)
+ continue;
+
+ /* packets from the same queue are from the same interface */
+ src_idx = odp_packet_input_index(pkt_tbl[0]);
+ ODPH_ASSERT(src_idx >= 0);
+ dst_idx = gbl_args->dst_port_from_idx[src_idx];
+ fill_eth_addrs(pkt_tbl, pkts, dst_idx);
+
+ send_packets(pkt_tbl, pkts,
+ use_event_queue,
+ tx_queue[dst_idx],
+ pktout[dst_idx],
+ stats);
+ }
+
+ /*
+ * Free prefetched packets before entering the thread barrier.
+ * Such packets can block sending of later packets in other threads
+ * that then would never enter the thread barrier and we would
+ * end up in a dead-lock.
+ */
+ odp_schedule_pause();
+ while (1) {
+ odp_event_t ev;
+
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ if (ev == ODP_EVENT_INVALID)
+ break;
+ odp_event_free(ev);
+ }
+
+ /* Make sure that latest stat writes are visible to other threads */
+ odp_mb_full();
+
+ /* Wait until pktio devices are stopped */
+ odp_barrier_wait(&gbl_args->term_barrier);
+
+ /* Free remaining events in queues */
+ odp_schedule_resume();
+ while (1) {
+ odp_event_t ev;
+
+ ev = odp_schedule(NULL,
+ odp_schedule_wait_time(ODP_TIME_SEC_IN_NS));
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ odp_event_free(ev);
+ }
+
+ return 0;
+}
+
+/*
+ * Packet IO worker thread using plain queues
+ *
+ * arg thread arguments of type 'thread_args_t *'
+ */
+static int run_worker_plain_queue_mode(void *arg)
+{
+ int thr;
+ int pkts;
+ uint16_t max_burst;
+ odp_packet_t pkt_tbl[MAX_PKT_BURST];
+ int dst_idx, num_pktio;
+ odp_queue_t queue;
+ odp_pktout_queue_t pktout;
+ odp_queue_t tx_queue;
+ int pktio = 0;
+ thread_args_t *thr_args = arg;
+ stats_t *stats = &thr_args->stats;
+ const appl_args_t *appl_args = &gbl_args->appl;
+ int use_event_queue = gbl_args->appl.out_mode;
+ int i;
+
+ thr = odp_thread_id();
+ max_burst = gbl_args->appl.burst_rx;
+
+ num_pktio = thr_args->num_pktio;
+ dst_idx = thr_args->pktio[pktio].tx_idx;
+ queue = thr_args->pktio[pktio].rx_queue;
+ pktout = thr_args->pktio[pktio].pktout;
+ tx_queue = thr_args->pktio[pktio].tx_queue;
+
+ printf("[%02i] num pktios %i, PKTIN_QUEUE, %s\n", thr, num_pktio,
+ (use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT");
+
+ odp_barrier_wait(&gbl_args->init_barrier);
+
+ /* Loop packets */
+ while (!odp_atomic_load_u32(&gbl_args->exit_threads)) {
+ odp_event_t event[MAX_PKT_BURST];
+
+ if (num_pktio > 1) {
+ dst_idx = thr_args->pktio[pktio].tx_idx;
+ queue = thr_args->pktio[pktio].rx_queue;
+ pktout = thr_args->pktio[pktio].pktout;
+ if (odp_unlikely(use_event_queue))
+ tx_queue = thr_args->pktio[pktio].tx_queue;
+
+ pktio++;
+ if (pktio == num_pktio)
+ pktio = 0;
+ }
+
+ pkts = odp_queue_deq_multi(queue, event, max_burst);
+ if (odp_unlikely(pkts <= 0))
+ continue;
+
+ odp_packet_from_event_multi(pkt_tbl, event, pkts);
+
+ prefetch_data(appl_args->prefetch, pkt_tbl, pkts);
+
+ pkts = process_extra_features(appl_args, pkt_tbl, pkts, stats);
+
+ if (odp_unlikely(pkts) == 0)
+ continue;
+
+ fill_eth_addrs(pkt_tbl, pkts, dst_idx);
+
+ send_packets(pkt_tbl, pkts,
+ use_event_queue,
+ tx_queue,
+ pktout,
+ stats);
+ }
+
+ /* Make sure that latest stat writes are visible to other threads */
+ odp_mb_full();
+
+ /* Wait until pktio devices are stopped */
+ odp_barrier_wait(&gbl_args->term_barrier);
+
+ /* Free remaining events in queues */
+ for (i = 0; i < num_pktio; i++) {
+ odp_time_t recv_last = odp_time_local();
+ odp_time_t since_last;
+
+ queue = thr_args->pktio[i].rx_queue;
+ do {
+ odp_event_t ev = odp_queue_deq(queue);
+
+ if (ev != ODP_EVENT_INVALID) {
+ recv_last = odp_time_local();
+ odp_event_free(ev);
+ }
+
+ since_last = odp_time_diff(odp_time_local(), recv_last);
+ } while (odp_time_to_ns(since_last) < ODP_TIME_SEC_IN_NS);
+ }
+
+ return 0;
+}
+
+/*
+ * Packet IO worker thread accessing IO resources directly
+ *
+ * arg thread arguments of type 'thread_args_t *'
+ */
+static int run_worker_direct_mode(void *arg)
+{
+ int thr;
+ int pkts;
+ uint16_t max_burst;
+ odp_packet_t pkt_tbl[MAX_PKT_BURST];
+ int dst_idx, num_pktio;
+ odp_pktin_queue_t pktin;
+ odp_pktout_queue_t pktout;
+ odp_queue_t tx_queue;
+ int pktio = 0;
+ thread_args_t *thr_args = arg;
+ stats_t *stats = &thr_args->stats;
+ const appl_args_t *appl_args = &gbl_args->appl;
+ int use_event_queue = gbl_args->appl.out_mode;
+
+ thr = odp_thread_id();
+ max_burst = gbl_args->appl.burst_rx;
+
+ num_pktio = thr_args->num_pktio;
+ dst_idx = thr_args->pktio[pktio].tx_idx;
+ pktin = thr_args->pktio[pktio].pktin;
+ pktout = thr_args->pktio[pktio].pktout;
+ tx_queue = thr_args->pktio[pktio].tx_queue;
+
+ printf("[%02i] num pktios %i, PKTIN_DIRECT, %s\n", thr, num_pktio,
+ (use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT");
+
+ odp_barrier_wait(&gbl_args->init_barrier);
+
+ /* Loop packets */
+ while (!odp_atomic_load_u32(&gbl_args->exit_threads)) {
+ if (num_pktio > 1) {
+ dst_idx = thr_args->pktio[pktio].tx_idx;
+ pktin = thr_args->pktio[pktio].pktin;
+ pktout = thr_args->pktio[pktio].pktout;
+ if (odp_unlikely(use_event_queue))
+ tx_queue = thr_args->pktio[pktio].tx_queue;
+
+ pktio++;
+ if (pktio == num_pktio)
+ pktio = 0;
+ }
+
+ pkts = odp_pktin_recv(pktin, pkt_tbl, max_burst);
+ if (odp_unlikely(pkts <= 0))
+ continue;
+
+ prefetch_data(appl_args->prefetch, pkt_tbl, pkts);
+
+ pkts = process_extra_features(appl_args, pkt_tbl, pkts, stats);
+
+ if (odp_unlikely(pkts) == 0)
+ continue;
+
+ fill_eth_addrs(pkt_tbl, pkts, dst_idx);
+
+ send_packets(pkt_tbl, pkts,
+ use_event_queue,
+ tx_queue,
+ pktout,
+ stats);
+ }
+
+ /* Make sure that latest stat writes are visible to other threads */
+ odp_mb_full();
+
+ return 0;
+}
+
+static int set_pktin_vector_params(odp_pktin_queue_param_t *pktin_param, odp_pool_t vec_pool,
+ odp_pktio_capability_t pktio_capa)
+{
+ uint64_t vec_tmo_ns;
+ uint32_t vec_size;
+
+ pktin_param->vector.enable = true;
+ pktin_param->vector.pool = vec_pool;
+
+ if (gbl_args->appl.vec_size == 0)
+ vec_size = DEFAULT_VEC_SIZE;
+ else
+ vec_size = gbl_args->appl.vec_size;
+
+ if (vec_size > pktio_capa.vector.max_size ||
+ vec_size < pktio_capa.vector.min_size) {
+ if (gbl_args->appl.vec_size == 0) {
+ vec_size = (vec_size > pktio_capa.vector.max_size) ?
+ pktio_capa.vector.max_size : pktio_capa.vector.min_size;
+ printf("\nWarning: Modified vector size to %u\n\n", vec_size);
+ } else {
+ ODPH_ERR("Invalid pktio vector size %u, valid range [%u, %u]\n",
+ vec_size, pktio_capa.vector.min_size, pktio_capa.vector.max_size);
+ return -1;
+ }
+ }
+ pktin_param->vector.max_size = vec_size;
+
+ if (gbl_args->appl.vec_tmo_ns == 0)
+ vec_tmo_ns = DEFAULT_VEC_TMO;
+ else
+ vec_tmo_ns = gbl_args->appl.vec_tmo_ns;
+
+ if (vec_tmo_ns > pktio_capa.vector.max_tmo_ns ||
+ vec_tmo_ns < pktio_capa.vector.min_tmo_ns) {
+ if (gbl_args->appl.vec_tmo_ns == 0) {
+ vec_tmo_ns = (vec_tmo_ns > pktio_capa.vector.max_tmo_ns) ?
+ pktio_capa.vector.max_tmo_ns : pktio_capa.vector.min_tmo_ns;
+ printf("\nWarning: Modified vector timeout to %" PRIu64 "\n\n", vec_tmo_ns);
+ } else {
+ ODPH_ERR("Invalid vector timeout %" PRIu64 ", valid range [%" PRIu64
+ ", %" PRIu64 "]\n", vec_tmo_ns,
+ pktio_capa.vector.min_tmo_ns, pktio_capa.vector.max_tmo_ns);
+ return -1;
+ }
+ }
+ pktin_param->vector.max_tmo_ns = vec_tmo_ns;
+
+ return 0;
+}
+
+/*
+ * Create a pktio handle, optionally associating a default input queue.
+ *
+ * dev Name of device to open
+ * index Pktio index
+ * pool Pool to associate with device for packet RX/TX
+ *
+ * Returns 0 on success, -1 on failure
+ */
+static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_pool_t pool,
+ odp_pool_t vec_pool, odp_schedule_group_t group)
+{
+ odp_pktio_t pktio;
+ odp_pktio_param_t pktio_param;
+ odp_schedule_sync_t sync_mode;
+ odp_pktio_capability_t pktio_capa;
+ odp_pktio_config_t config;
+ odp_pktin_queue_param_t pktin_param;
+ odp_pktout_queue_param_t pktout_param;
+ odp_pktio_op_mode_t mode_rx;
+ odp_pktio_op_mode_t mode_tx;
+ pktin_mode_t in_mode = gbl_args->appl.in_mode;
+ odp_pktio_info_t info;
+ uint8_t *addr;
+
+ odp_pktio_param_init(&pktio_param);
+
+ if (in_mode == PLAIN_QUEUE)
+ pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE;
+ else if (in_mode != DIRECT_RECV) /* pktin_mode SCHED_* */
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ if (gbl_args->appl.out_mode != PKTOUT_DIRECT)
+ pktio_param.out_mode = ODP_PKTOUT_MODE_QUEUE;
+
+ pktio = odp_pktio_open(dev, pool, &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID) {
+ ODPH_ERR("Pktio open failed: %s\n", dev);
+ return -1;
+ }
+
+ if (odp_pktio_info(pktio, &info)) {
+ ODPH_ERR("Pktio info failed: %s\n", dev);
+ return -1;
+ }
+
+ if (gbl_args->appl.verbose)
+ odp_pktio_print(pktio);
+
+ if (odp_pktio_capability(pktio, &pktio_capa)) {
+ ODPH_ERR("Pktio capability query failed: %s\n", dev);
+ return -1;
+ }
+
+ odp_pktio_config_init(&config);
+
+ if (gbl_args->appl.input_ts) {
+ if (!pktio_capa.config.pktin.bit.ts_all) {
+ ODPH_ERR("Packet input timestamping not supported: %s\n", dev);
+ return -1;
+ }
+ config.pktin.bit.ts_all = 1;
+ }
+
+ config.parser.layer = ODP_PROTO_LAYER_NONE;
+ if (gbl_args->appl.error_check || gbl_args->appl.chksum)
+ config.parser.layer = ODP_PROTO_LAYER_ALL;
+
+ if (gbl_args->appl.chksum) {
+ config.pktout.bit.ipv4_chksum_ena = 1;
+ config.pktout.bit.udp_chksum_ena = 1;
+ config.pktout.bit.tcp_chksum_ena = 1;
+ }
+
+ /* Provide hint to pktio that packet references are not used */
+ config.pktout.bit.no_packet_refs = 1;
+
+ odp_pktio_config(pktio, &config);
+
+ if (gbl_args->appl.promisc_mode && odp_pktio_promisc_mode(pktio) != 1) {
+ if (!pktio_capa.set_op.op.promisc_mode) {
+ ODPH_ERR("Promisc mode set not supported: %s\n", dev);
+ return -1;
+ }
+
+ /* Enable promisc mode */
+ if (odp_pktio_promisc_mode_set(pktio, true)) {
+ ODPH_ERR("Promisc mode enable failed: %s\n", dev);
+ return -1;
+ }
+ }
+
+ if (gbl_args->appl.mtu) {
+ uint32_t maxlen_input = pktio_capa.maxlen.max_input ? gbl_args->appl.mtu : 0;
+ uint32_t maxlen_output = pktio_capa.maxlen.max_output ? gbl_args->appl.mtu : 0;
+
+ if (!pktio_capa.set_op.op.maxlen) {
+ ODPH_ERR("Modifying interface MTU not supported: %s\n", dev);
+ return -1;
+ }
+
+ if (maxlen_input &&
+ (maxlen_input < pktio_capa.maxlen.min_input ||
+ maxlen_input > pktio_capa.maxlen.max_input)) {
+ ODPH_ERR("Unsupported MTU value %" PRIu32 " for %s "
+ "(min %" PRIu32 ", max %" PRIu32 ")\n", maxlen_input, dev,
+ pktio_capa.maxlen.min_input, pktio_capa.maxlen.max_input);
+ return -1;
+ }
+ if (maxlen_output &&
+ (maxlen_output < pktio_capa.maxlen.min_output ||
+ maxlen_output > pktio_capa.maxlen.max_output)) {
+ ODPH_ERR("Unsupported MTU value %" PRIu32 " for %s "
+ "(min %" PRIu32 ", max %" PRIu32 ")\n", maxlen_output, dev,
+ pktio_capa.maxlen.min_output, pktio_capa.maxlen.max_output);
+ return -1;
+ }
+
+ if (odp_pktio_maxlen_set(pktio, maxlen_input, maxlen_output)) {
+ ODPH_ERR("Setting MTU failed: %s\n", dev);
+ return -1;
+ }
+ }
+
+ odp_pktin_queue_param_init(&pktin_param);
+ odp_pktout_queue_param_init(&pktout_param);
+
+ /* By default use a queue per worker. Sched mode ignores rx side
+ * setting. */
+ mode_rx = ODP_PKTIO_OP_MT_UNSAFE;
+ mode_tx = ODP_PKTIO_OP_MT_UNSAFE;
+
+ if (gbl_args->appl.sched_mode) {
+ odp_schedule_prio_t prio;
+
+ if (gbl_args->appl.num_prio) {
+ prio = gbl_args->appl.prio[idx];
+ } else {
+ prio = odp_schedule_default_prio();
+ gbl_args->appl.prio[idx] = prio;
+ }
+
+ if (gbl_args->appl.in_mode == SCHED_ATOMIC)
+ sync_mode = ODP_SCHED_SYNC_ATOMIC;
+ else if (gbl_args->appl.in_mode == SCHED_ORDERED)
+ sync_mode = ODP_SCHED_SYNC_ORDERED;
+ else
+ sync_mode = ODP_SCHED_SYNC_PARALLEL;
+
+ pktin_param.queue_param.sched.prio = prio;
+ pktin_param.queue_param.sched.sync = sync_mode;
+ pktin_param.queue_param.sched.group = group;
+ }
+
+ if (num_rx > (int)pktio_capa.max_input_queues) {
+ num_rx = pktio_capa.max_input_queues;
+ mode_rx = ODP_PKTIO_OP_MT;
+ printf("Warning: %s: maximum number of input queues: %i\n", dev, num_rx);
+ }
+
+ if (num_rx < gbl_args->appl.num_workers)
+ printf("Warning: %s: sharing %i input queues between %i workers\n",
+ dev, num_rx, gbl_args->appl.num_workers);
+
+ if (num_tx > (int)pktio_capa.max_output_queues) {
+ printf("Warning: %s: sharing %i output queues between %i workers\n",
+ dev, pktio_capa.max_output_queues, num_tx);
+ num_tx = pktio_capa.max_output_queues;
+ mode_tx = ODP_PKTIO_OP_MT;
+ }
+
+ pktin_param.hash_enable = (num_rx > 1 || gbl_args->appl.flow_aware) ? 1 : 0;
+ pktin_param.hash_proto.proto.ipv4_udp = 1;
+ pktin_param.num_queues = num_rx;
+ pktin_param.op_mode = mode_rx;
+
+ pktout_param.op_mode = mode_tx;
+ pktout_param.num_queues = num_tx;
+
+ if (gbl_args->appl.vector_mode) {
+ if (!pktio_capa.vector.supported) {
+ ODPH_ERR("Packet vector input not supported: %s\n", dev);
+ return -1;
+ }
+ if (set_pktin_vector_params(&pktin_param, vec_pool, pktio_capa))
+ return -1;
+ }
+
+ if (odp_pktin_queue_config(pktio, &pktin_param)) {
+ ODPH_ERR("Input queue config failed: %s\n", dev);
+ return -1;
+ }
+
+ if (odp_pktout_queue_config(pktio, &pktout_param)) {
+ ODPH_ERR("Output queue config failed: %s\n", dev);
+ return -1;
+ }
+
+ if (gbl_args->appl.in_mode == DIRECT_RECV) {
+ if (odp_pktin_queue(pktio, gbl_args->pktios[idx].pktin, num_rx) != num_rx) {
+ ODPH_ERR("Pktin queue query failed: %s\n", dev);
+ return -1;
+ }
+ } else {
+ if (odp_pktin_event_queue(pktio, gbl_args->pktios[idx].rx_q, num_rx) != num_rx) {
+ ODPH_ERR("Pktin event queue query failed: %s\n", dev);
+ return -1;
+ }
+ }
+
+ if (gbl_args->appl.out_mode == PKTOUT_DIRECT) {
+ if (odp_pktout_queue(pktio, gbl_args->pktios[idx].pktout, num_tx) != num_tx) {
+ ODPH_ERR("Pktout queue query failed: %s\n", dev);
+ return -1;
+ }
+ } else {
+ if (odp_pktout_event_queue(pktio, gbl_args->pktios[idx].tx_q, num_tx) != num_tx) {
+ ODPH_ERR("Event queue query failed: %s\n", dev);
+ return -1;
+ }
+ }
+
+ if (odp_pktio_mac_addr(pktio, gbl_args->port_eth_addr[idx].addr,
+ ODPH_ETHADDR_LEN) != ODPH_ETHADDR_LEN) {
+ ODPH_ERR("Reading interface Ethernet address failed: %s\n", dev);
+ return -1;
+ }
+ addr = gbl_args->port_eth_addr[idx].addr;
+
+ printf(" dev: %s, drv: %s, rx_queues: %i, tx_queues: %i, mac: "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n", dev, info.drv_name, num_rx, num_tx,
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ gbl_args->pktios[idx].num_rx_queue = num_rx;
+ gbl_args->pktios[idx].num_tx_queue = num_tx;
+ gbl_args->pktios[idx].pktio = pktio;
+
+ return 0;
+}
+
+/*
+ * Print statistics
+ *
+ * num_workers Number of worker threads
+ * thr_stats Pointers to stats storage
+ * duration Number of seconds to loop in
+ * timeout Number of seconds for stats calculation
+ */
+static int print_speed_stats(int num_workers, stats_t **thr_stats,
+ int duration, int timeout)
+{
+ uint64_t pkts = 0;
+ uint64_t pkts_prev = 0;
+ uint64_t pps;
+ uint64_t rx_drops, tx_drops, copy_fails;
+ uint64_t maximum_pps = 0;
+ int i;
+ int elapsed = 0;
+ int stats_enabled = 1;
+ int loop_forever = (duration == 0);
+
+ if (timeout <= 0) {
+ stats_enabled = 0;
+ timeout = 1;
+ }
+ /* Wait for all threads to be ready*/
+ odp_barrier_wait(&gbl_args->init_barrier);
+
+ do {
+ pkts = 0;
+ rx_drops = 0;
+ tx_drops = 0;
+ copy_fails = 0;
+
+ sleep(timeout);
+
+ for (i = 0; i < num_workers; i++) {
+ pkts += thr_stats[i]->s.packets;
+ rx_drops += thr_stats[i]->s.rx_drops;
+ tx_drops += thr_stats[i]->s.tx_drops;
+ copy_fails += thr_stats[i]->s.copy_fails;
+ }
+ if (stats_enabled) {
+ pps = (pkts - pkts_prev) / timeout;
+ if (pps > maximum_pps)
+ maximum_pps = pps;
+ printf("%" PRIu64 " pps, %" PRIu64 " max pps, ", pps,
+ maximum_pps);
+
+ if (gbl_args->appl.packet_copy)
+ printf("%" PRIu64 " copy fails, ", copy_fails);
+
+ printf("%" PRIu64 " rx drops, %" PRIu64 " tx drops\n",
+ rx_drops, tx_drops);
+
+ pkts_prev = pkts;
+ }
+ elapsed += timeout;
+ } while (!odp_atomic_load_u32(&gbl_args->exit_threads) && (loop_forever ||
+ (elapsed < duration)));
+
+ if (stats_enabled)
+ printf("TEST RESULT: %" PRIu64 " maximum packets per second.\n",
+ maximum_pps);
+
+ return pkts > 100 ? 0 : -1;
+}
+
+static void print_port_mapping(void)
+{
+ int if_count;
+ int pktio;
+
+ if_count = gbl_args->appl.if_count;
+
+ printf("\nPort config\n--------------------\n");
+
+ for (pktio = 0; pktio < if_count; pktio++) {
+ const char *dev = gbl_args->appl.if_names[pktio];
+
+ printf("Port %i (%s)\n", pktio, dev);
+ printf(" rx workers %i\n",
+ gbl_args->pktios[pktio].num_rx_thr);
+ printf(" tx workers %i\n",
+ gbl_args->pktios[pktio].num_tx_thr);
+ printf(" rx queues %i\n",
+ gbl_args->pktios[pktio].num_rx_queue);
+ printf(" tx queues %i\n",
+ gbl_args->pktios[pktio].num_tx_queue);
+ }
+
+ printf("\n");
+}
+
+/*
+ * Find the destination port for a given input port
+ *
+ * port Input port index
+ */
+static int find_dest_port(int port)
+{
+ /* Even number of ports */
+ if (gbl_args->appl.if_count % 2 == 0)
+ return (port % 2 == 0) ? port + 1 : port - 1;
+
+ /* Odd number of ports */
+ if (port == gbl_args->appl.if_count - 1)
+ return 0;
+ else
+ return port + 1;
+}
+
+/*
+ * Bind worker threads to interfaces and calculate number of queues needed
+ *
+ * less workers (N) than interfaces (M)
+ * - assign each worker to process every Nth interface
+ * - workers process inequal number of interfaces, when M is not divisible by N
+ * - needs only single queue per interface
+ * otherwise
+ * - assign an interface to every Mth worker
+ * - interfaces are processed by inequal number of workers, when N is not
+ * divisible by M
+ * - tries to configure a queue per worker per interface
+ * - shares queues, if interface capability does not allows a queue per worker
+ */
+static void bind_workers(void)
+{
+ int if_count, num_workers;
+ int rx_idx, tx_idx, thr, pktio, i;
+ thread_args_t *thr_args;
+
+ if_count = gbl_args->appl.if_count;
+ num_workers = gbl_args->appl.num_workers;
+
+ if (gbl_args->appl.sched_mode) {
+ /* all threads receive and send on all pktios */
+ for (i = 0; i < if_count; i++) {
+ gbl_args->pktios[i].num_rx_thr = num_workers;
+ gbl_args->pktios[i].num_tx_thr = num_workers;
+ }
+
+ for (thr = 0; thr < num_workers; thr++) {
+ thr_args = &gbl_args->thread_args[thr];
+ thr_args->num_pktio = if_count;
+
+ /* In sched mode, pktios are not cross connected with
+ * local pktio indexes */
+ for (i = 0; i < if_count; i++) {
+ thr_args->pktio[i].rx_idx = i;
+ thr_args->pktio[i].tx_idx = i;
+ }
+ }
+ } else {
+ /* initialize port forwarding table */
+ for (rx_idx = 0; rx_idx < if_count; rx_idx++)
+ gbl_args->dst_port[rx_idx] = find_dest_port(rx_idx);
+
+ if (if_count > num_workers) {
+ /* Less workers than pktios. Assign single worker per
+ * pktio. */
+ thr = 0;
+
+ for (rx_idx = 0; rx_idx < if_count; rx_idx++) {
+ thr_args = &gbl_args->thread_args[thr];
+ pktio = thr_args->num_pktio;
+ /* Cross connect rx to tx */
+ tx_idx = gbl_args->dst_port[rx_idx];
+ thr_args->pktio[pktio].rx_idx = rx_idx;
+ thr_args->pktio[pktio].tx_idx = tx_idx;
+ thr_args->num_pktio++;
+
+ gbl_args->pktios[rx_idx].num_rx_thr++;
+ gbl_args->pktios[tx_idx].num_tx_thr++;
+
+ thr++;
+ if (thr >= num_workers)
+ thr = 0;
+ }
+ } else {
+ /* More workers than pktios. Assign at least one worker
+ * per pktio. */
+ rx_idx = 0;
+
+ for (thr = 0; thr < num_workers; thr++) {
+ thr_args = &gbl_args->thread_args[thr];
+ pktio = thr_args->num_pktio;
+ /* Cross connect rx to tx */
+ tx_idx = gbl_args->dst_port[rx_idx];
+ thr_args->pktio[pktio].rx_idx = rx_idx;
+ thr_args->pktio[pktio].tx_idx = tx_idx;
+ thr_args->num_pktio++;
+
+ gbl_args->pktios[rx_idx].num_rx_thr++;
+ gbl_args->pktios[tx_idx].num_tx_thr++;
+
+ rx_idx++;
+ if (rx_idx >= if_count)
+ rx_idx = 0;
+ }
+ }
+ }
+}
+
+/*
+ * Bind queues to threads and fill in missing thread arguments (handles)
+ */
+static void bind_queues(void)
+{
+ int num_workers;
+ int thr, i;
+
+ num_workers = gbl_args->appl.num_workers;
+
+ printf("\nQueue binding (indexes)\n-----------------------\n");
+
+ for (thr = 0; thr < num_workers; thr++) {
+ int rx_idx, tx_idx;
+ thread_args_t *thr_args = &gbl_args->thread_args[thr];
+ int num = thr_args->num_pktio;
+
+ printf("worker %i\n", thr);
+
+ for (i = 0; i < num; i++) {
+ int rx_queue, tx_queue;
+
+ rx_idx = thr_args->pktio[i].rx_idx;
+ tx_idx = thr_args->pktio[i].tx_idx;
+ rx_queue = gbl_args->pktios[rx_idx].next_rx_queue;
+ tx_queue = gbl_args->pktios[tx_idx].next_tx_queue;
+
+ thr_args->pktio[i].rx_queue_idx = rx_queue;
+ thr_args->pktio[i].tx_queue_idx = tx_queue;
+ thr_args->pktio[i].pktin =
+ gbl_args->pktios[rx_idx].pktin[rx_queue];
+ thr_args->pktio[i].rx_queue =
+ gbl_args->pktios[rx_idx].rx_q[rx_queue];
+ thr_args->pktio[i].pktout =
+ gbl_args->pktios[tx_idx].pktout[tx_queue];
+ thr_args->pktio[i].tx_queue =
+ gbl_args->pktios[tx_idx].tx_q[tx_queue];
+
+ if (!gbl_args->appl.sched_mode)
+ printf(" rx: pktio %i, queue %i\n",
+ rx_idx, rx_queue);
+
+ printf(" tx: pktio %i, queue %i\n",
+ tx_idx, tx_queue);
+
+ rx_queue++;
+ tx_queue++;
+
+ if (rx_queue >= gbl_args->pktios[rx_idx].num_rx_queue)
+ rx_queue = 0;
+ if (tx_queue >= gbl_args->pktios[tx_idx].num_tx_queue)
+ tx_queue = 0;
+
+ gbl_args->pktios[rx_idx].next_rx_queue = rx_queue;
+ gbl_args->pktios[tx_idx].next_tx_queue = tx_queue;
+ }
+ }
+
+ printf("\n");
+}
+
+static void init_port_lookup_tbl(void)
+{
+ int rx_idx, if_count;
+
+ if_count = gbl_args->appl.if_count;
+
+ for (rx_idx = 0; rx_idx < if_count; rx_idx++) {
+ odp_pktio_t pktio = gbl_args->pktios[rx_idx].pktio;
+ int pktio_idx = odp_pktio_index(pktio);
+ int dst_port = find_dest_port(rx_idx);
+
+ if (pktio_idx < 0) {
+ ODPH_ERR("Reading pktio (%s) index failed: %i\n",
+ gbl_args->appl.if_names[rx_idx], pktio_idx);
+
+ exit(EXIT_FAILURE);
+ }
+
+ gbl_args->dst_port_from_idx[pktio_idx] = dst_port;
+ }
+}
+
+/*
+ * Print usage information
+ */
+static void usage(char *progname)
+{
+ printf("\n"
+ "OpenDataPlane L2 forwarding application.\n"
+ "\n"
+ "Usage: %s [options]\n"
+ "\n"
+ " E.g. %s -i eth0,eth1,eth2,eth3 -m 0 -t 1\n"
+ " In the above example,\n"
+ " eth0 will send pkts to eth1 and vice versa\n"
+ " eth2 will send pkts to eth3 and vice versa\n"
+ "\n"
+ "Mandatory OPTIONS:\n"
+ " -i, --interface <name> Eth interfaces (comma-separated, no spaces)\n"
+ " Interface count min 1, max %i\n"
+ "\n"
+ "Optional OPTIONS:\n"
+ " -m, --mode <arg> Packet input mode\n"
+ " 0: Direct mode: PKTIN_MODE_DIRECT (default)\n"
+ " 1: Scheduler mode with parallel queues:\n"
+ " PKTIN_MODE_SCHED + SCHED_SYNC_PARALLEL\n"
+ " 2: Scheduler mode with atomic queues:\n"
+ " PKTIN_MODE_SCHED + SCHED_SYNC_ATOMIC\n"
+ " 3: Scheduler mode with ordered queues:\n"
+ " PKTIN_MODE_SCHED + SCHED_SYNC_ORDERED\n"
+ " 4: Plain queue mode: PKTIN_MODE_QUEUE\n"
+ " -o, --out_mode <arg> Packet output mode\n"
+ " 0: Direct mode: PKTOUT_MODE_DIRECT (default)\n"
+ " 1: Queue mode: PKTOUT_MODE_QUEUE\n"
+ " -c, --count <num> CPU count, 0=all available, default=1\n"
+ " -t, --time <sec> Time in seconds to run.\n"
+ " -a, --accuracy <sec> Time in seconds get print statistics\n"
+ " (default is 1 second).\n"
+ " -d, --dst_change <arg> 0: Don't change packets' dst eth addresses\n"
+ " 1: Change packets' dst eth addresses (default)\n"
+ " -s, --src_change <arg> 0: Don't change packets' src eth addresses\n"
+ " 1: Change packets' src eth addresses (default)\n"
+ " -r, --dst_addr <addr> Destination addresses (comma-separated, no spaces)\n"
+ " Requires also the -d flag to be set\n"
+ " -e, --error_check <arg> 0: Don't check packet errors (default)\n"
+ " 1: Check packet errors\n"
+ " -k, --chksum <arg> 0: Don't use checksum offload (default)\n"
+ " 1: Use checksum offload\n",
+ NO_PATH(progname), NO_PATH(progname), MAX_PKTIOS);
+
+ printf(" -g, --groups <num> Number of new groups to create (1 ... num). Interfaces\n"
+ " are placed into the groups in round robin.\n"
+ " 0: Use SCHED_GROUP_ALL (default)\n"
+ " -1: Use SCHED_GROUP_WORKER\n"
+ " -G, --group_mode <arg> Select how threads join new groups (when -g > 0)\n"
+ " 0: All threads join all created groups (default)\n"
+ " 1: All threads join first N created groups.\n"
+ " N is number of interfaces (== active groups).\n"
+ " 2: Each thread joins a part of the first N groups\n"
+ " (in round robin).\n"
+ " -I, --prio <prio list> Schedule priority of packet input queues.\n"
+ " Comma separated list of priorities (no spaces). A value\n"
+ " per interface. All queues of an interface have the same\n"
+ " priority. Values must be between odp_schedule_min_prio\n"
+ " and odp_schedule_max_prio. odp_schedule_default_prio is\n"
+ " used by default.\n"
+ " -b, --burst_rx <num> 0: Use max burst size (default)\n"
+ " num: Max number of packets per receive call\n"
+ " -q, --rx_queues <num> Number of RX queues per interface in scheduler mode\n"
+ " 0: RX queue per worker CPU (default)\n"
+ " -p, --packet_copy 0: Don't copy packet (default)\n"
+ " 1: Create and send copy of the received packet.\n"
+ " Free the original packet.\n"
+ " -R, --data_rd <num> Number of packet data words (uint64_t) to read from\n"
+ " every received packet. Number of words is rounded down\n"
+ " to fit into the first segment of a packet. Default\n"
+ " is 0.\n"
+ " -y, --pool_per_if Create a packet (and packet vector) pool per interface.\n"
+ " 0: Share a single pool between all interfaces (default)\n"
+ " 1: Create a pool per interface\n"
+ " -n, --num_pkt <num> Number of packets per pool. Default is 16k or\n"
+ " the maximum capability. Use 0 for the default.\n"
+ " -u, --vector_mode Enable vector mode.\n"
+ " Supported only with scheduler packet input modes (1-3).\n"
+ " -w, --num_vec <num> Number of vectors per pool.\n"
+ " Default is num_pkts divided by vec_size.\n"
+ " -x, --vec_size <num> Vector size (default %i).\n"
+ " -z, --vec_tmo_ns <ns> Vector timeout in ns (default %llu ns).\n"
+ " -M, --mtu <len> Interface MTU in bytes.\n"
+ " -P, --promisc_mode Enable promiscuous mode.\n"
+ " -l, --packet_len <len> Maximum length of packets supported (default %d).\n"
+ " -L, --seg_len <len> Packet pool segment length\n"
+ " (default equal to packet length).\n"
+ " -F, --prefetch <num> Prefetch packet data in 64 byte multiples (default 1).\n"
+ " -f, --flow_aware Enable flow aware scheduling.\n"
+ " -T, --input_ts Enable packet input timestamping.\n"
+ " -v, --verbose Verbose output.\n"
+ " -V, --verbose_pkt Print debug information on every received packet.\n"
+ " -h, --help Display help and exit.\n\n"
+ "\n", DEFAULT_VEC_SIZE, DEFAULT_VEC_TMO, POOL_PKT_LEN);
+}
+
+/*
+ * Parse and store the command line arguments
+ *
+ * argc argument count
+ * argv[] argument vector
+ * appl_args Store application arguments here
+ */
+static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
+{
+ int opt;
+ int long_index;
+ char *token;
+ char *tmp_str;
+ size_t str_len, len;
+ int i;
+ static const struct option longopts[] = {
+ {"count", required_argument, NULL, 'c'},
+ {"time", required_argument, NULL, 't'},
+ {"accuracy", required_argument, NULL, 'a'},
+ {"interface", required_argument, NULL, 'i'},
+ {"mode", required_argument, NULL, 'm'},
+ {"out_mode", required_argument, NULL, 'o'},
+ {"dst_addr", required_argument, NULL, 'r'},
+ {"dst_change", required_argument, NULL, 'd'},
+ {"src_change", required_argument, NULL, 's'},
+ {"error_check", required_argument, NULL, 'e'},
+ {"chksum", required_argument, NULL, 'k'},
+ {"groups", required_argument, NULL, 'g'},
+ {"group_mode", required_argument, NULL, 'G'},
+ {"prio", required_argument, NULL, 'I'},
+ {"burst_rx", required_argument, NULL, 'b'},
+ {"rx_queues", required_argument, NULL, 'q'},
+ {"packet_copy", required_argument, NULL, 'p'},
+ {"data_rd", required_argument, NULL, 'R'},
+ {"pool_per_if", required_argument, NULL, 'y'},
+ {"num_pkt", required_argument, NULL, 'n'},
+ {"num_vec", required_argument, NULL, 'w'},
+ {"vec_size", required_argument, NULL, 'x'},
+ {"vec_tmo_ns", required_argument, NULL, 'z'},
+ {"vector_mode", no_argument, NULL, 'u'},
+ {"mtu", required_argument, NULL, 'M'},
+ {"promisc_mode", no_argument, NULL, 'P'},
+ {"packet_len", required_argument, NULL, 'l'},
+ {"seg_len", required_argument, NULL, 'L'},
+ {"prefetch", required_argument, NULL, 'F'},
+ {"flow_aware", no_argument, NULL, 'f'},
+ {"input_ts", no_argument, NULL, 'T'},
+ {"verbose", no_argument, NULL, 'v'},
+ {"verbose_pkt", no_argument, NULL, 'V'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "+c:t:a:i:m:o:r:d:s:e:k:g:G:I:"
+ "b:q:p:R:y:n:l:L:w:x:z:M:F:uPfTvVh";
+
+ appl_args->time = 0; /* loop forever if time to run is 0 */
+ appl_args->accuracy = 1; /* get and print pps stats second */
+ appl_args->cpu_count = 1; /* use one worker by default */
+ appl_args->dst_change = 1; /* change eth dst address by default */
+ appl_args->src_change = 1; /* change eth src address by default */
+ appl_args->num_groups = 0; /* use default group */
+ appl_args->group_mode = 0;
+ appl_args->error_check = 0; /* don't check packet errors by default */
+ appl_args->packet_copy = 0;
+ appl_args->burst_rx = 0;
+ appl_args->rx_queues = 0;
+ appl_args->verbose = 0;
+ appl_args->verbose_pkt = 0;
+ appl_args->chksum = 0; /* don't use checksum offload by default */
+ appl_args->pool_per_if = 0;
+ appl_args->num_pkt = 0;
+ appl_args->packet_len = POOL_PKT_LEN;
+ appl_args->seg_len = UINT32_MAX;
+ appl_args->mtu = 0;
+ appl_args->promisc_mode = 0;
+ appl_args->vector_mode = 0;
+ appl_args->num_vec = 0;
+ appl_args->vec_size = 0;
+ appl_args->vec_tmo_ns = 0;
+ appl_args->flow_aware = 0;
+ appl_args->input_ts = 0;
+ appl_args->num_prio = 0;
+ appl_args->prefetch = 1;
+ appl_args->data_rd = 0;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break; /* No more options */
+
+ switch (opt) {
+ case 'c':
+ appl_args->cpu_count = atoi(optarg);
+ break;
+ case 't':
+ appl_args->time = atoi(optarg);
+ break;
+ case 'a':
+ appl_args->accuracy = atoi(optarg);
+ break;
+ case 'r':
+ len = strlen(optarg);
+ if (len == 0) {
+ ODPH_ERR("Bad dest address string\n");
+ exit(EXIT_FAILURE);
+ }
+
+ str_len = len + 1;
+
+ tmp_str = malloc(str_len);
+ if (tmp_str == NULL) {
+ ODPH_ERR("Dest address malloc() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* store the mac addresses names */
+ memcpy(tmp_str, optarg, str_len);
+ for (token = strtok(tmp_str, ","), i = 0;
+ token != NULL; token = strtok(NULL, ","), i++) {
+ if (i >= MAX_PKTIOS) {
+ ODPH_ERR("Too many MAC addresses\n");
+ exit(EXIT_FAILURE);
+ }
+ if (odph_eth_addr_parse(&appl_args->addrs[i], token) != 0) {
+ ODPH_ERR("Invalid MAC address\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+ appl_args->addr_count = i;
+ if (appl_args->addr_count < 1) {
+ ODPH_ERR("Bad dest address count\n");
+ exit(EXIT_FAILURE);
+ }
+ free(tmp_str);
+ break;
+ case 'i':
+ len = strlen(optarg);
+ if (len == 0) {
+ ODPH_ERR("Bad pktio interface string\n");
+ exit(EXIT_FAILURE);
+ }
+
+ str_len = len + 1;
+
+ appl_args->if_str = malloc(str_len);
+ if (appl_args->if_str == NULL) {
+ ODPH_ERR("Pktio interface malloc() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* count the number of tokens separated by ',' */
+ memcpy(appl_args->if_str, optarg, str_len);
+ for (token = strtok(appl_args->if_str, ","), i = 0;
+ token != NULL;
+ token = strtok(NULL, ","), i++)
+ ;
+
+ appl_args->if_count = i;
+
+ if (appl_args->if_count < 1 || appl_args->if_count > MAX_PKTIOS) {
+ ODPH_ERR("Bad pktio interface count: %i\n", appl_args->if_count);
+ exit(EXIT_FAILURE);
+ }
+
+ /* allocate storage for the if names */
+ appl_args->if_names = calloc(appl_args->if_count, sizeof(char *));
+
+ /* store the if names (reset names string) */
+ memcpy(appl_args->if_str, optarg, str_len);
+ for (token = strtok(appl_args->if_str, ","), i = 0;
+ token != NULL; token = strtok(NULL, ","), i++) {
+ appl_args->if_names[i] = token;
+ }
+ break;
+ case 'm':
+ i = atoi(optarg);
+ if (i == 1)
+ appl_args->in_mode = SCHED_PARALLEL;
+ else if (i == 2)
+ appl_args->in_mode = SCHED_ATOMIC;
+ else if (i == 3)
+ appl_args->in_mode = SCHED_ORDERED;
+ else if (i == 4)
+ appl_args->in_mode = PLAIN_QUEUE;
+ else
+ appl_args->in_mode = DIRECT_RECV;
+ break;
+ case 'o':
+ i = atoi(optarg);
+ if (i != 0)
+ appl_args->out_mode = PKTOUT_QUEUE;
+ break;
+ case 'd':
+ appl_args->dst_change = atoi(optarg);
+ break;
+ case 's':
+ appl_args->src_change = atoi(optarg);
+ break;
+ case 'e':
+ appl_args->error_check = atoi(optarg);
+ break;
+ case 'k':
+ appl_args->chksum = atoi(optarg);
+ break;
+ case 'g':
+ appl_args->num_groups = atoi(optarg);
+ break;
+ case 'G':
+ appl_args->group_mode = atoi(optarg);
+ break;
+ case 'I':
+ len = strlen(optarg);
+ if (len == 0) {
+ ODPH_ERR("Bad priority list\n");
+ exit(EXIT_FAILURE);
+ }
+
+ str_len = len + 1;
+
+ tmp_str = malloc(str_len);
+ if (tmp_str == NULL) {
+ ODPH_ERR("Priority list malloc() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memcpy(tmp_str, optarg, str_len);
+ token = strtok(tmp_str, ",");
+
+ for (i = 0; token != NULL; token = strtok(NULL, ","), i++) {
+ if (i >= MAX_PKTIOS) {
+ ODPH_ERR("Too many priorities\n");
+ exit(EXIT_FAILURE);
+ }
+
+ appl_args->prio[i] = atoi(token);
+ appl_args->num_prio++;
+ }
+
+ if (appl_args->num_prio == 0) {
+ ODPH_ERR("Bad priority list\n");
+ exit(EXIT_FAILURE);
+ }
+
+ free(tmp_str);
+ break;
+ case 'b':
+ appl_args->burst_rx = atoi(optarg);
+ break;
+ case 'q':
+ appl_args->rx_queues = atoi(optarg);
+ break;
+ case 'p':
+ appl_args->packet_copy = atoi(optarg);
+ break;
+ case 'R':
+ appl_args->data_rd = atoi(optarg);
+ break;
+ case 'y':
+ appl_args->pool_per_if = atoi(optarg);
+ break;
+ case 'n':
+ appl_args->num_pkt = atoi(optarg);
+ break;
+ case 'l':
+ appl_args->packet_len = atoi(optarg);
+ break;
+ case 'L':
+ appl_args->seg_len = atoi(optarg);
+ break;
+ case 'M':
+ appl_args->mtu = atoi(optarg);
+ break;
+ case 'P':
+ appl_args->promisc_mode = 1;
+ break;
+ case 'u':
+ appl_args->vector_mode = 1;
+ break;
+ case 'w':
+ appl_args->num_vec = atoi(optarg);
+ break;
+ case 'x':
+ appl_args->vec_size = atoi(optarg);
+ break;
+ case 'z':
+ appl_args->vec_tmo_ns = atoi(optarg);
+ break;
+ case 'F':
+ appl_args->prefetch = atoi(optarg);
+ break;
+ case 'f':
+ appl_args->flow_aware = 1;
+ break;
+ case 'T':
+ appl_args->input_ts = 1;
+ break;
+ case 'v':
+ appl_args->verbose = 1;
+ break;
+ case 'V':
+ appl_args->verbose_pkt = 1;
+ break;
+ case 'h':
+ usage(argv[0]);
+ exit(EXIT_SUCCESS);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (appl_args->if_count == 0) {
+ ODPH_ERR("No pktio interfaces\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (appl_args->num_prio && appl_args->num_prio != appl_args->if_count) {
+ ODPH_ERR("Different number of priorities and pktio interfaces\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (appl_args->addr_count != 0 && appl_args->addr_count != appl_args->if_count) {
+ ODPH_ERR("Number of dest addresses differs from number of interfaces\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (appl_args->burst_rx > MAX_PKT_BURST) {
+ ODPH_ERR("Burst size (%i) too large. Maximum is %i.\n",
+ appl_args->burst_rx, MAX_PKT_BURST);
+ exit(EXIT_FAILURE);
+ }
+
+ if (appl_args->burst_rx == 0)
+ appl_args->burst_rx = MAX_PKT_BURST;
+
+ appl_args->extra_feat = 0;
+ if (appl_args->error_check || appl_args->chksum ||
+ appl_args->packet_copy || appl_args->data_rd || appl_args->verbose_pkt)
+ appl_args->extra_feat = 1;
+
+ optind = 1; /* reset 'extern optind' from the getopt lib */
+}
+
+static void print_options(void)
+{
+ int i;
+ appl_args_t *appl_args = &gbl_args->appl;
+
+ printf("\n"
+ "odp_l2fwd options\n"
+ "-----------------\n"
+ "IF-count: %i\n"
+ "Using IFs: ", appl_args->if_count);
+
+ for (i = 0; i < appl_args->if_count; ++i)
+ printf(" %s", appl_args->if_names[i]);
+ printf("\n"
+ "Mode: ");
+ if (appl_args->in_mode == DIRECT_RECV)
+ printf("PKTIN_DIRECT, ");
+ else if (appl_args->in_mode == PLAIN_QUEUE)
+ printf("PKTIN_QUEUE, ");
+ else if (appl_args->in_mode == SCHED_PARALLEL)
+ printf("PKTIN_SCHED_PARALLEL, ");
+ else if (appl_args->in_mode == SCHED_ATOMIC)
+ printf("PKTIN_SCHED_ATOMIC, ");
+ else if (appl_args->in_mode == SCHED_ORDERED)
+ printf("PKTIN_SCHED_ORDERED, ");
+
+ if (appl_args->out_mode)
+ printf("PKTOUT_QUEUE\n");
+ else
+ printf("PKTOUT_DIRECT\n");
+
+ printf("MTU: ");
+ if (appl_args->mtu)
+ printf("%i bytes\n", appl_args->mtu);
+ else
+ printf("interface default\n");
+ printf("Promisc mode: %s\n", appl_args->promisc_mode ?
+ "enabled" : "disabled");
+ printf("Flow aware: %s\n", appl_args->flow_aware ?
+ "yes" : "no");
+ printf("Input TS: %s\n", appl_args->input_ts ? "yes" : "no");
+ printf("Burst size: %i\n", appl_args->burst_rx);
+ printf("RX queues per IF: %i\n", appl_args->rx_queues);
+ printf("Number of pools: %i\n", appl_args->pool_per_if ?
+ appl_args->if_count : 1);
+
+ if (appl_args->extra_feat) {
+ printf("Extra features: %s%s%s%s%s\n",
+ appl_args->error_check ? "error_check " : "",
+ appl_args->chksum ? "chksum " : "",
+ appl_args->packet_copy ? "packet_copy " : "",
+ appl_args->data_rd ? "data_rd" : "",
+ appl_args->verbose_pkt ? "verbose_pkt" : "");
+ }
+
+ printf("Num worker threads: %i\n", appl_args->num_workers);
+ printf("CPU mask: %s\n", gbl_args->cpumaskstr);
+
+ if (appl_args->num_groups > 0)
+ printf("num groups: %i\n", appl_args->num_groups);
+ else if (appl_args->num_groups == 0)
+ printf("group: ODP_SCHED_GROUP_ALL\n");
+ else
+ printf("group: ODP_SCHED_GROUP_WORKER\n");
+
+ printf("Packets per pool: %u\n", appl_args->num_pkt);
+ printf("Packet length: %u\n", appl_args->packet_len);
+ printf("Segment length: %u\n", appl_args->seg_len == UINT32_MAX ? 0 :
+ appl_args->seg_len);
+ printf("Read data: %u bytes\n", appl_args->data_rd * 8);
+ printf("Prefetch data %u bytes\n", appl_args->prefetch * 64);
+ printf("Vectors per pool: %u\n", appl_args->num_vec);
+ printf("Vector size: %u\n", appl_args->vec_size);
+ printf("Priority per IF: ");
+
+ for (i = 0; i < appl_args->if_count; i++)
+ printf(" %i", appl_args->prio[i]);
+
+ printf("\n\n");
+}
+
+static void gbl_args_init(args_t *args)
+{
+ int pktio, queue;
+
+ memset(args, 0, sizeof(args_t));
+ odp_atomic_init_u32(&args->exit_threads, 0);
+
+ for (pktio = 0; pktio < MAX_PKTIOS; pktio++) {
+ args->pktios[pktio].pktio = ODP_PKTIO_INVALID;
+
+ for (queue = 0; queue < MAX_QUEUES; queue++)
+ args->pktios[pktio].rx_q[queue] = ODP_QUEUE_INVALID;
+ }
+}
+
+static void create_groups(int num, odp_schedule_group_t *group)
+{
+ int i;
+ odp_thrmask_t zero;
+
+ odp_thrmask_zero(&zero);
+
+ /* Create groups */
+ for (i = 0; i < num; i++) {
+ group[i] = odp_schedule_group_create(NULL, &zero);
+
+ if (group[i] == ODP_SCHED_GROUP_INVALID) {
+ ODPH_ERR("Group create failed\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+}
+
+static int set_vector_pool_params(odp_pool_param_t *params, const odp_pool_capability_t *pool_capa)
+{
+ uint32_t num_vec, vec_size;
+
+ if (gbl_args->appl.vec_size == 0)
+ vec_size = DEFAULT_VEC_SIZE;
+ else
+ vec_size = gbl_args->appl.vec_size;
+
+ ODPH_ASSERT(pool_capa->vector.max_size > 0);
+ if (vec_size > pool_capa->vector.max_size) {
+ if (gbl_args->appl.vec_size == 0) {
+ vec_size = pool_capa->vector.max_size;
+ printf("\nWarning: Vector size reduced to %u\n\n", vec_size);
+ } else {
+ ODPH_ERR("Vector size too big %u. Maximum is %u.\n",
+ vec_size, pool_capa->vector.max_size);
+ return -1;
+ }
+ }
+
+ if (gbl_args->appl.num_vec == 0) {
+ uint32_t num_pkt = gbl_args->appl.num_pkt ?
+ gbl_args->appl.num_pkt : DEFAULT_NUM_PKT;
+
+ num_vec = (num_pkt + vec_size - 1) / vec_size;
+ } else {
+ num_vec = gbl_args->appl.num_vec;
+ }
+
+ if (pool_capa->vector.max_num && num_vec > pool_capa->vector.max_num) {
+ if (gbl_args->appl.num_vec == 0) {
+ num_vec = pool_capa->vector.max_num;
+ printf("\nWarning: number of vectors reduced to %u\n\n", num_vec);
+ } else {
+ ODPH_ERR("Too many vectors (%u) per pool. Maximum is %u.\n",
+ num_vec, pool_capa->vector.max_num);
+ return -1;
+ }
+ }
+
+ params->vector.num = num_vec;
+ params->vector.max_size = vec_size;
+ params->type = ODP_POOL_VECTOR;
+
+ return 0;
+}
+
+/*
+ * L2 forwarding main function
+ */
+int main(int argc, char *argv[])
+{
+ odph_helper_options_t helper_options;
+ odph_thread_param_t thr_param[MAX_WORKERS];
+ odph_thread_common_param_t thr_common;
+ int i;
+ int num_workers, num_thr;
+ odp_shm_t shm;
+ odp_cpumask_t cpumask;
+ odph_ethaddr_t new_addr;
+ odp_pool_param_t params;
+ int ret;
+ stats_t *stats[MAX_WORKERS];
+ int if_count, num_pools, num_vec_pools;
+ int (*thr_run_func)(void *);
+ odp_instance_t instance;
+ int num_groups, max_groups;
+ odp_schedule_group_t group[MAX_GROUPS];
+ odp_pool_t pool_tbl[MAX_PKTIOS], vec_pool_tbl[MAX_PKTIOS];
+ odp_pool_t pool, vec_pool;
+ odp_init_t init;
+ odp_pool_capability_t pool_capa;
+ odp_schedule_config_t sched_config;
+ odp_schedule_capability_t sched_capa;
+ uint32_t pkt_len, num_pkt, seg_len;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init);
+
+ /* List features not to be used (may optimize performance) */
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ init.mem_model = helper_options.mem_model;
+
+ /* Signal handler has to be registered before global init in case ODP
+ * implementation creates internal threads/processes. */
+ signal(SIGINT, sig_handler);
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("ODP global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("ODP local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Reserve memory for args from shared mem */
+ shm = odp_shm_reserve("shm_args", sizeof(args_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Shared mem reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ gbl_args = odp_shm_addr(shm);
+
+ if (gbl_args == NULL) {
+ ODPH_ERR("Shared mem addr failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ gbl_args_init(gbl_args);
+
+ /* Parse and store the application arguments */
+ parse_args(argc, argv, &gbl_args->appl);
+
+ odp_sys_info_print();
+
+ if (sched_mode(gbl_args->appl.in_mode))
+ gbl_args->appl.sched_mode = 1;
+
+ num_workers = MAX_WORKERS;
+ if (gbl_args->appl.cpu_count && gbl_args->appl.cpu_count < MAX_WORKERS)
+ num_workers = gbl_args->appl.cpu_count;
+
+ /* Get default worker cpumask */
+ num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
+ (void)odp_cpumask_to_str(&cpumask, gbl_args->cpumaskstr, sizeof(gbl_args->cpumaskstr));
+
+ gbl_args->appl.num_workers = num_workers;
+
+ print_options();
+
+ for (i = 0; i < num_workers; i++)
+ gbl_args->thread_args[i].thr_idx = i;
+
+ if_count = gbl_args->appl.if_count;
+
+ num_pools = 1;
+ if (gbl_args->appl.pool_per_if)
+ num_pools = if_count;
+
+ if (odp_pool_capability(&pool_capa)) {
+ ODPH_ERR("Pool capability failed\n");
+ return -1;
+ }
+
+ if (num_pools > (int)pool_capa.pkt.max_pools) {
+ ODPH_ERR("Too many pools %i\n", num_pools);
+ return -1;
+ }
+
+ pkt_len = gbl_args->appl.packet_len;
+
+ if (pool_capa.pkt.max_len && pkt_len > pool_capa.pkt.max_len) {
+ pkt_len = pool_capa.pkt.max_len;
+ printf("\nWarning: packet length reduced to %u\n\n", pkt_len);
+ }
+
+ if (gbl_args->appl.seg_len == UINT32_MAX)
+ seg_len = gbl_args->appl.packet_len;
+ else
+ seg_len = gbl_args->appl.seg_len;
+
+ /* Check whether we have sufficient segments to support requested packet
+ * length, if not adjust to bigger segment size */
+ if (seg_len < (pkt_len / pool_capa.pkt.max_segs_per_pkt))
+ seg_len = pkt_len / pool_capa.pkt.max_segs_per_pkt;
+
+ if (pool_capa.pkt.min_seg_len && seg_len < pool_capa.pkt.min_seg_len)
+ seg_len = pool_capa.pkt.min_seg_len;
+
+ if (pool_capa.pkt.max_seg_len && seg_len > pool_capa.pkt.max_seg_len)
+ seg_len = pool_capa.pkt.max_seg_len;
+
+ if ((gbl_args->appl.seg_len != UINT32_MAX) && (seg_len != gbl_args->appl.seg_len))
+ printf("\nWarning: Segment length requested %d configured %d\n",
+ gbl_args->appl.seg_len, seg_len);
+
+ if (seg_len < gbl_args->appl.data_rd * 8) {
+ ODPH_ERR("Requested data read length %u exceeds maximum segment length %u\n",
+ gbl_args->appl.data_rd * 8, seg_len);
+ return -1;
+ }
+
+ /* zero means default number of packets */
+ if (gbl_args->appl.num_pkt == 0)
+ num_pkt = DEFAULT_NUM_PKT;
+ else
+ num_pkt = gbl_args->appl.num_pkt;
+
+ if (pool_capa.pkt.max_num && num_pkt > pool_capa.pkt.max_num) {
+ if (gbl_args->appl.num_pkt == 0) {
+ num_pkt = pool_capa.pkt.max_num;
+ printf("\nWarning: number of packets reduced to %u\n\n",
+ num_pkt);
+ } else {
+ ODPH_ERR("Too many packets %u. Maximum is %u.\n",
+ num_pkt, pool_capa.pkt.max_num);
+ return -1;
+ }
+ }
+
+ gbl_args->num_pkt = num_pkt;
+ gbl_args->pkt_len = pkt_len;
+ gbl_args->seg_len = seg_len;
+
+ printf("Resulting pool parameter values:\n");
+ printf("Packets per pool: %u\n", num_pkt);
+ printf("Packet length: %u\n", pkt_len);
+ printf("Segment length: %u\n", seg_len);
+
+ /* Create packet pool */
+ odp_pool_param_init(&params);
+ params.pkt.seg_len = seg_len;
+ params.pkt.len = pkt_len;
+ params.pkt.num = num_pkt;
+ params.type = ODP_POOL_PACKET;
+
+ for (i = 0; i < num_pools; i++) {
+ pool_tbl[i] = odp_pool_create("packet pool", &params);
+
+ if (pool_tbl[i] == ODP_POOL_INVALID) {
+ ODPH_ERR("Pool create failed %i\n", i);
+ exit(EXIT_FAILURE);
+ }
+
+ if (gbl_args->appl.verbose)
+ odp_pool_print(pool_tbl[i]);
+ }
+
+ /* Create vector pool */
+ num_vec_pools = 0;
+ if (gbl_args->appl.vector_mode) {
+ if (!sched_mode(gbl_args->appl.in_mode)) {
+ ODPH_ERR("Vector mode only supports scheduler pktin modes (1-3)\n");
+ return -1;
+ }
+
+ num_vec_pools = gbl_args->appl.pool_per_if ? if_count : 1;
+ if (num_vec_pools > (int)pool_capa.vector.max_pools) {
+ ODPH_ERR("Too many vector pools %i\n", num_vec_pools);
+ return -1;
+ }
+
+ odp_pool_param_init(&params);
+ if (set_vector_pool_params(&params, &pool_capa))
+ return -1;
+
+ gbl_args->vector_num = params.vector.num;
+ gbl_args->vector_max_size = params.vector.max_size;
+
+ /* Print resulting values */
+ printf("Vectors per pool: %u\n", gbl_args->vector_num);
+ printf("Vector size: %u\n", gbl_args->vector_max_size);
+
+ for (i = 0; i < num_vec_pools; i++) {
+ vec_pool_tbl[i] = odp_pool_create("vector pool", &params);
+
+ if (vec_pool_tbl[i] == ODP_POOL_INVALID) {
+ ODPH_ERR("Vector pool create failed %i\n", i);
+ exit(EXIT_FAILURE);
+ }
+
+ if (gbl_args->appl.verbose)
+ odp_pool_print(vec_pool_tbl[i]);
+ }
+ }
+
+ printf("\n");
+
+ bind_workers();
+
+ odp_schedule_config_init(&sched_config);
+
+ if (odp_schedule_capability(&sched_capa)) {
+ ODPH_ERR("Schedule capability failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (gbl_args->appl.flow_aware) {
+ if (sched_capa.max_flow_id) {
+ sched_config.max_flow_id = sched_capa.max_flow_id;
+ } else {
+ ODPH_ERR("Flow aware mode not supported\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ num_groups = gbl_args->appl.num_groups;
+ /* Predefined groups are enabled by default */
+ max_groups = sched_capa.max_groups - 3;
+ if (max_groups > MAX_GROUPS)
+ max_groups = MAX_GROUPS;
+
+ if (num_groups > max_groups) {
+ ODPH_ERR("Too many groups. Maximum is %i.\n", max_groups);
+ exit(EXIT_FAILURE);
+ }
+
+ odp_schedule_config(&sched_config);
+
+ /* Default */
+ if (num_groups == 0) {
+ group[0] = ODP_SCHED_GROUP_ALL;
+ num_groups = 1;
+ } else if (num_groups == -1) {
+ group[0] = ODP_SCHED_GROUP_WORKER;
+ num_groups = 1;
+ } else {
+ create_groups(num_groups, group);
+ }
+
+ pool = pool_tbl[0];
+ vec_pool = vec_pool_tbl[0];
+
+ printf("\nInterfaces\n----------\n");
+
+ for (i = 0; i < if_count; ++i) {
+ const char *dev = gbl_args->appl.if_names[i];
+ int num_rx, num_tx;
+ odp_schedule_group_t grp;
+
+ /* A queue per worker in scheduled mode */
+ num_rx = gbl_args->appl.rx_queues > 0 ? gbl_args->appl.rx_queues : num_workers;
+ num_tx = num_workers;
+
+ if (!gbl_args->appl.sched_mode) {
+ /* A queue per assigned worker */
+ num_rx = gbl_args->pktios[i].num_rx_thr;
+ num_tx = gbl_args->pktios[i].num_tx_thr;
+ }
+
+ /* Round robin pktios to groups */
+ grp = group[i % num_groups];
+
+ if (gbl_args->appl.pool_per_if) {
+ pool = pool_tbl[i];
+ vec_pool = vec_pool_tbl[i];
+ }
+
+ if (create_pktio(dev, i, num_rx, num_tx, pool, vec_pool, grp))
+ exit(EXIT_FAILURE);
+
+ /* Save destination eth address */
+ if (gbl_args->appl.dst_change) {
+ /* 02:00:00:00:00:XX */
+ memset(&new_addr, 0, sizeof(odph_ethaddr_t));
+ if (gbl_args->appl.addr_count) {
+ memcpy(&new_addr, &gbl_args->appl.addrs[i],
+ sizeof(odph_ethaddr_t));
+ } else {
+ new_addr.addr[0] = 0x02;
+ new_addr.addr[5] = i;
+ }
+ gbl_args->dst_eth_addr[i] = new_addr;
+ }
+
+ ret = odp_pktio_start(gbl_args->pktios[i].pktio);
+ if (ret) {
+ ODPH_ERR("Pktio start failed: %s\n", gbl_args->appl.if_names[i]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ gbl_args->pktios[i].pktio = ODP_PKTIO_INVALID;
+
+ bind_queues();
+
+ init_port_lookup_tbl();
+
+ if (!gbl_args->appl.sched_mode)
+ print_port_mapping();
+
+ odp_barrier_init(&gbl_args->init_barrier, num_workers + 1);
+ odp_barrier_init(&gbl_args->term_barrier, num_workers + 1);
+
+ if (gbl_args->appl.in_mode == DIRECT_RECV)
+ thr_run_func = run_worker_direct_mode;
+ else if (gbl_args->appl.in_mode == PLAIN_QUEUE)
+ thr_run_func = run_worker_plain_queue_mode;
+ else /* SCHED_PARALLEL / SCHED_ATOMIC / SCHED_ORDERED */
+ thr_run_func = gbl_args->appl.vector_mode ?
+ run_worker_sched_mode_vector : run_worker_sched_mode;
+
+ /* Create worker threads */
+ odph_thread_common_param_init(&thr_common);
+
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ /* Synchronize thread start up. Test runs are more repeatable when
+ * thread / thread ID / CPU ID mapping stays constant. */
+ thr_common.sync = 1;
+
+ for (i = 0; i < num_workers; ++i) {
+ int j;
+ int num_join;
+ int mode = gbl_args->appl.group_mode;
+
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = thr_run_func;
+ thr_param[i].arg = &gbl_args->thread_args[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+
+ gbl_args->thread_args[i].num_grp_join = 0;
+
+ /* Fill in list of groups to join */
+ if (gbl_args->appl.num_groups > 0) {
+ num_join = if_count < num_groups ? if_count : num_groups;
+
+ if (mode == 0 || mode == 1) {
+ /* All threads join all groups */
+ if (mode == 0)
+ num_join = num_groups;
+
+ gbl_args->thread_args[i].num_grp_join = num_join;
+
+ for (j = 0; j < num_join; j++)
+ gbl_args->thread_args[i].group[j] = group[j];
+ } else {
+ /* Thread joins first groups in round robin */
+ if (num_workers >= num_join) {
+ gbl_args->thread_args[i].num_grp_join = 1;
+ gbl_args->thread_args[i].group[0] = group[i % num_join];
+ } else {
+ int cnt = 0;
+
+ for (j = 0; i + j < num_join; j += num_workers) {
+ gbl_args->thread_args[i].group[cnt] = group[i + j];
+ cnt++;
+ }
+
+ gbl_args->thread_args[i].num_grp_join = cnt;
+ }
+ }
+ }
+
+ stats[i] = &gbl_args->thread_args[i].stats;
+ }
+
+ num_thr = odph_thread_create(gbl_args->thread_tbl, &thr_common,
+ thr_param, num_workers);
+
+ if (num_thr != num_workers) {
+ ODPH_ERR("Worker create failed: %i\n", num_thr);
+ exit(EXIT_FAILURE);
+ }
+
+ if (gbl_args->appl.verbose)
+ odp_shm_print_all();
+
+ ret = print_speed_stats(num_workers, stats, gbl_args->appl.time,
+ gbl_args->appl.accuracy);
+
+ for (i = 0; i < if_count; ++i) {
+ if (odp_pktio_stop(gbl_args->pktios[i].pktio)) {
+ ODPH_ERR("Pktio stop failed: %s\n", gbl_args->appl.if_names[i]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ odp_atomic_store_u32(&gbl_args->exit_threads, 1);
+ if (gbl_args->appl.in_mode != DIRECT_RECV)
+ odp_barrier_wait(&gbl_args->term_barrier);
+
+ /* Master thread waits for other threads to exit */
+ num_thr = odph_thread_join(gbl_args->thread_tbl, num_workers);
+ if (num_thr != num_workers) {
+ ODPH_ERR("Worker join failed: %i\n", num_thr);
+ exit(EXIT_FAILURE);
+ }
+
+ for (i = 0; i < if_count; ++i) {
+ odp_pktio_t pktio = gbl_args->pktios[i].pktio;
+
+ if (gbl_args->appl.verbose && odp_pktio_extra_stat_info(pktio, NULL, 0) > 0) {
+ printf("Pktio %s extra statistics:\n", gbl_args->appl.if_names[i]);
+ odp_pktio_extra_stats_print(pktio);
+ }
+
+ if (odp_pktio_close(pktio)) {
+ ODPH_ERR("Pktio close failed: %s\n", gbl_args->appl.if_names[i]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ free(gbl_args->appl.if_names);
+ free(gbl_args->appl.if_str);
+ gbl_args = NULL;
+ odp_mb_full();
+
+ for (i = 0; i < num_pools; i++) {
+ if (odp_pool_destroy(pool_tbl[i])) {
+ ODPH_ERR("Pool destroy failed: %i\n", i);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ for (i = 0; i < num_vec_pools; i++) {
+ if (odp_pool_destroy(vec_pool_tbl[i])) {
+ ODPH_ERR("Vector pool destroy failed: %i\n", i);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Shm free failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Term local failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Term global failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return ret;
+}
diff --git a/test/common_plat/performance/odp_l2fwd_run.sh b/test/performance/odp_l2fwd_run.sh
index dd42ede97..cd750ca35 100755
--- a/test/common_plat/performance/odp_l2fwd_run.sh
+++ b/test/performance/odp_l2fwd_run.sh
@@ -1,6 +1,6 @@
-#!/bin/sh
+#!/bin/bash
#
-# Copyright (c) 2015, Linaro Limited
+# Copyright (c) 2015-2018, Linaro Limited
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
@@ -25,12 +25,13 @@ TEST_DIR="${TEST_DIR:-$PWD}"
# directory where test sources are, including scripts
TEST_SRC_DIR=$(dirname $0)
-PATH=$TEST_DIR:$TEST_DIR/../../../example/generator:$PATH
+PATH=$TEST_DIR:$TEST_DIR/../../example/generator:$PATH
# exit codes expected by automake for skipped tests
TEST_SKIPPED=77
-PLATFORM_VALIDATION=${TEST_SRC_DIR}/../../$ODP_PLATFORM/validation
+VALIDATION_TESTDIR=platform/$ODP_PLATFORM/test/validation
+PLATFORM_VALIDATION=${TEST_SRC_DIR}/../../$VALIDATION_TESTDIR
FLOOD_MODE=0
@@ -66,12 +67,14 @@ run_l2fwd()
exit 1
fi
- # Max 4 workers
- # @todo: ensure that generator and l2fwd workers are not allocated to
- # the same CPUs
- (odp_generator${EXEEXT} --interval $FLOOD_MODE -I $IF0 \
+ export ODP_PLATFORM_PARAMS="-m 256 --file-prefix="gen" \
+--proc-type auto --no-pci \
+--vdev net_pcap0,iface=$IF0"
+
+ # Run generator with one worker
+ (odp_generator${EXEEXT} --interval $FLOOD_MODE -I 0 \
--srcip 192.168.0.1 --dstip 192.168.0.2 \
- -m u -w 4 2>&1 > /dev/null) \
+ -m u -w 1 2>&1 > /dev/null) \
2>&1 > /dev/null &
GEN_PID=$!
@@ -85,17 +88,24 @@ run_l2fwd()
fi
LOG=odp_l2fwd_tmp.log
+ export ODP_PLATFORM_PARAMS="-m 256 --file-prefix="l2fwd" \
+--proc-type auto --no-pci --vdev net_pcap1,iface=$IF1 \
+--vdev net_pcap2,iface=$IF2"
+
# Max 2 workers
- $STDBUF odp_l2fwd${EXEEXT} -i $IF1,$IF2 -m 0 -t 30 -c 2 | tee $LOG
- ret=$?
+ $STDBUF odp_l2fwd${EXEEXT} -i 0,1 -m 0 -t 5 -c 2 | tee $LOG
+ ret=${PIPESTATUS[0]}
- kill ${GEN_PID}
+ kill -2 ${GEN_PID}
if [ ! -f $LOG ]; then
echo "FAIL: $LOG not found"
ret=1
elif [ $ret -eq 0 ]; then
PASS_PPS=5000
+ if [ "${TEST}" = "coverage" ]; then
+ PASS_PPS=10
+ fi
MAX_PPS=$(awk '/TEST RESULT/ {print $3}' $LOG)
if [ "$MAX_PPS" -lt "$PASS_PPS" ]; then
echo "FAIL: pps below threshold $MAX_PPS < $PASS_PPS"
diff --git a/test/performance/odp_lock_perf.c b/test/performance/odp_lock_perf.c
new file mode 100644
index 000000000..0f78db3b8
--- /dev/null
+++ b/test/performance/odp_lock_perf.c
@@ -0,0 +1,699 @@
+/* Copyright (c) 2021, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_lock_perf.c
+ *
+ * Performance test application for lock APIs
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+/* Max number of workers if num_cpu=0 */
+#define DEFAULT_MAX_WORKERS 10
+
+/* Max number of counters */
+#define MAX_COUNTERS 8
+
+#define TEST_INFO(name, test, validate) { name, test, validate }
+
+typedef enum repeat_t {
+ REPEAT_NO,
+ REPEAT_UNTIL_FAIL,
+ REPEAT_FOREVER,
+} repeat_t;
+
+typedef enum place_t {
+ PLACE_PACK,
+ PLACE_SEPARATE,
+ PLACE_ALL_SEPARATE,
+} place_t;
+
+/* Command line options */
+typedef struct test_options_t {
+ uint32_t num_cpu;
+ uint32_t type;
+ uint64_t num_round;
+ repeat_t repeat;
+ uint32_t num_counter;
+ place_t place;
+} test_options_t;
+
+/* command line options default values */
+static test_options_t test_options_def = {
+ .num_cpu = 0,
+ .type = 0,
+ .num_round = 100000,
+ .repeat = REPEAT_NO,
+ .num_counter = 2,
+ .place = 2,
+};
+
+typedef struct test_global_t test_global_t;
+
+/* Test function template */
+typedef void (*test_fn_t)(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter);
+/* Test result validation function template */
+typedef int (*validate_fn_t)(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter);
+
+/* Worker thread context */
+typedef struct test_thread_ctx_t {
+ test_global_t *global;
+ test_fn_t func;
+ uint64_t nsec;
+ uint32_t idx;
+} test_thread_ctx_t;
+
+/* Global data */
+struct test_global_t {
+ test_options_t test_options;
+ uint32_t cur_type;
+ odp_barrier_t barrier;
+ odp_cpumask_t cpumask;
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ test_thread_ctx_t thread_ctx[ODP_THREAD_COUNT_MAX];
+ struct {
+ struct ODP_ALIGNED_CACHE {
+ odp_spinlock_t lock;
+ uint64_t counter[MAX_COUNTERS];
+ } spinlock;
+ struct ODP_ALIGNED_CACHE {
+ odp_spinlock_recursive_t lock;
+ uint64_t counter[MAX_COUNTERS];
+ } spinlock_recursive;
+ struct ODP_ALIGNED_CACHE {
+ odp_rwlock_t lock;
+ uint64_t counter[MAX_COUNTERS];
+ } rwlock;
+ struct ODP_ALIGNED_CACHE {
+ odp_rwlock_recursive_t lock;
+ uint64_t counter[MAX_COUNTERS];
+ } rwlock_recursive;
+ struct ODP_ALIGNED_CACHE {
+ odp_ticketlock_t lock;
+ uint64_t counter[MAX_COUNTERS];
+ } ticketlock;
+ struct ODP_ALIGNED_CACHE {
+ uint64_t counter[MAX_COUNTERS];
+ } separate;
+ struct {
+ uint64_t ODP_ALIGNED_CACHE counter;
+ } all_separate[MAX_COUNTERS];
+ } item;
+};
+
+typedef struct {
+ const char *name;
+ test_fn_t test_fn;
+ validate_fn_t validate_fn;
+} test_case_t;
+
+static test_global_t *test_global;
+
+static inline void test_spinlock(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter)
+{
+ odp_spinlock_t *lock = &g->item.spinlock.lock;
+
+ for (uint64_t i = 0; i < g->test_options.num_round; i++) {
+ odp_spinlock_lock(lock);
+ for (uint32_t j = 0; j < num_counter; j++)
+ (*counter[j])++;
+ odp_spinlock_unlock(lock);
+ }
+}
+
+static inline void test_spinlock_recursive(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter)
+{
+ odp_spinlock_recursive_t *lock = &g->item.spinlock_recursive.lock;
+
+ for (uint64_t i = 0; i < g->test_options.num_round; i++) {
+ odp_spinlock_recursive_lock(lock);
+ odp_spinlock_recursive_lock(lock);
+ for (uint32_t j = 0; j < num_counter; j++)
+ (*counter[j])++;
+ odp_spinlock_recursive_unlock(lock);
+ odp_spinlock_recursive_unlock(lock);
+ }
+}
+
+static inline void test_rwlock(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter)
+{
+ odp_rwlock_t *lock = &g->item.rwlock.lock;
+
+ for (uint64_t i = 0; i < g->test_options.num_round; i++) {
+ odp_rwlock_write_lock(lock);
+ for (uint32_t j = 0; j < num_counter; j++)
+ (*counter[j])++;
+ odp_rwlock_write_unlock(lock);
+ odp_rwlock_read_lock(lock);
+ for (uint32_t j = 1; j < num_counter; j++)
+ if (*counter[0] != *counter[j]) {
+ odp_rwlock_read_unlock(lock);
+ ODPH_ERR("Error: Counter mismatch\n");
+ return;
+ }
+ odp_rwlock_read_unlock(lock);
+ }
+}
+
+static inline void test_rwlock_recursive(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter)
+{
+ odp_rwlock_recursive_t *lock = &g->item.rwlock_recursive.lock;
+
+ for (uint64_t i = 0; i < g->test_options.num_round; i++) {
+ odp_rwlock_recursive_write_lock(lock);
+ odp_rwlock_recursive_write_lock(lock);
+ for (uint32_t j = 0; j < num_counter; j++)
+ (*counter[j])++;
+ odp_rwlock_recursive_write_unlock(lock);
+ odp_rwlock_recursive_write_unlock(lock);
+ odp_rwlock_recursive_read_lock(lock);
+ odp_rwlock_recursive_read_lock(lock);
+ for (uint32_t j = 1; j < num_counter; j++)
+ if (*counter[0] != *counter[j]) {
+ odp_rwlock_recursive_read_unlock(lock);
+ odp_rwlock_recursive_read_unlock(lock);
+ ODPH_ERR("Error: Counter mismatch\n");
+ return;
+ }
+ odp_rwlock_recursive_read_unlock(lock);
+ odp_rwlock_recursive_read_unlock(lock);
+ }
+}
+
+static inline void test_ticketlock(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter)
+{
+ odp_ticketlock_t *lock = &g->item.ticketlock.lock;
+
+ for (uint64_t i = 0; i < g->test_options.num_round; i++) {
+ odp_ticketlock_lock(lock);
+ for (uint32_t j = 0; j < num_counter; j++)
+ (*counter[j])++;
+ odp_ticketlock_unlock(lock);
+ }
+}
+
+static inline int validate_generic(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter)
+{
+ int status = 0;
+ uint64_t total = (uint64_t)g->test_options.num_cpu * g->test_options.num_round;
+
+ for (uint32_t i = 0; i < num_counter; i++) {
+ if (*counter[i] != total) {
+ status = 1;
+ ODPH_ERR("Error: Counter %d value %" PRIu64 " expected %" PRIu64 "\n",
+ i, *counter[i], total);
+ }
+ }
+
+ return status;
+}
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Lock performance test\n"
+ "\n"
+ "Usage: odp_lock_perf [options]\n"
+ "\n"
+ " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs (or max %d) (default)\n"
+ " -t, --type Lock type to test. 0: all (default %u)\n"
+ " 1: odp_spinlock_t\n"
+ " 2: odp_spinlock_recursive_t\n"
+ " 3: odp_rwlock_t\n"
+ " 4: odp_rwlock_recursive_t\n"
+ " 5: odp_ticketlock_t\n"
+ " -r, --num_round Number of rounds (default %" PRIu64 ")\n"
+ " -e, --repeat Repeat the tests (default %u)\n"
+ " 0: no repeat, run the tests once\n"
+ " 1: repeat until failure\n"
+ " 2: repeat forever\n"
+ " -o, --num_counter Number of counters (default %u)\n"
+ " -p, --place Counter placement (default %d)\n"
+ " 0: pack to same cache line with lock\n"
+ " 1: pack to separate cache line\n"
+ " 2: place each counter to separate cache line\n"
+ " -h, --help This help\n"
+ "\n",
+ DEFAULT_MAX_WORKERS, test_options_def.type,
+ test_options_def.num_round, test_options_def.repeat,
+ test_options_def.num_counter, test_options_def.place);
+}
+
+static void print_info(test_options_t *test_options)
+{
+ printf("\nLock performance test configuration:\n");
+ printf(" num cpu %u\n", test_options->num_cpu);
+ printf(" type %u\n", test_options->type);
+ printf(" num rounds %" PRIu64 "\n", test_options->num_round);
+ printf(" repeat %u\n", test_options->repeat);
+ printf(" num counters %u\n", test_options->num_counter);
+ printf(" place %u\n", test_options->place);
+ printf("\n\n");
+}
+
+static int parse_options(int argc, char *argv[], test_options_t *test_options)
+{
+ int opt;
+ int long_index;
+ int ret = 0;
+
+ static const struct option longopts[] = {
+ { "num_cpu", required_argument, NULL, 'c' },
+ { "type", required_argument, NULL, 't' },
+ { "num_round", required_argument, NULL, 'r' },
+ { "repeat", required_argument, NULL, 'e' },
+ { "num_counter", required_argument, NULL, 'o' },
+ { "place", required_argument, NULL, 'p' },
+ { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ static const char *shortopts = "+c:t:r:e:o:p:h";
+
+ *test_options = test_options_def;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'c':
+ test_options->num_cpu = atoi(optarg);
+ break;
+ case 't':
+ test_options->type = atoi(optarg);
+ break;
+ case 'r':
+ test_options->num_round = atoll(optarg);
+ break;
+ case 'e':
+ test_options->repeat = atoi(optarg);
+ break;
+ case 'o':
+ test_options->num_counter = atoi(optarg);
+ break;
+ case 'p':
+ test_options->place = atoi(optarg);
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (test_options->num_round < 1) {
+ ODPH_ERR("Invalid number of test rounds: %" PRIu64 "\n",
+ test_options->num_round);
+ return -1;
+ }
+
+ if (test_options->num_counter < 1 ||
+ test_options->num_counter > MAX_COUNTERS) {
+ ODPH_ERR("Invalid number of counters: %" PRIu32 "\n",
+ test_options->num_counter);
+ return -1;
+ }
+
+ return ret;
+}
+
+static int set_num_cpu(test_global_t *global)
+{
+ int ret, max_num;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+
+ /* One thread used for the main thread */
+ if (num_cpu > ODP_THREAD_COUNT_MAX - 1) {
+ ODPH_ERR("Too many workers. Maximum is %i.\n", ODP_THREAD_COUNT_MAX - 1);
+ return -1;
+ }
+
+ max_num = num_cpu;
+ if (num_cpu == 0) {
+ max_num = ODP_THREAD_COUNT_MAX - 1;
+ if (max_num > DEFAULT_MAX_WORKERS)
+ max_num = DEFAULT_MAX_WORKERS;
+ }
+
+ ret = odp_cpumask_default_worker(&global->cpumask, max_num);
+
+ if (num_cpu && ret != num_cpu) {
+ ODPH_ERR("Too many workers. Max supported %i.\n", ret);
+ return -1;
+ }
+
+ /* Zero: all available workers */
+ if (num_cpu == 0) {
+ if (ret > max_num) {
+ ODPH_ERR("Too many cpus from odp_cpumask_default_worker(): %i\n", ret);
+ return -1;
+ }
+
+ num_cpu = ret;
+ test_options->num_cpu = num_cpu;
+ }
+
+ odp_barrier_init(&global->barrier, num_cpu);
+
+ return 0;
+}
+
+static int init_test(test_global_t *g, const char *name)
+{
+ printf("TEST: %s\n", name);
+
+ memset(&g->item, 0, sizeof(g->item));
+ odp_spinlock_init(&g->item.spinlock.lock);
+ odp_spinlock_recursive_init(&g->item.spinlock_recursive.lock);
+ odp_rwlock_init(&g->item.rwlock.lock);
+ odp_rwlock_recursive_init(&g->item.rwlock_recursive.lock);
+ odp_ticketlock_init(&g->item.ticketlock.lock);
+
+ return 0;
+}
+
+static void fill_counter_ptrs(test_global_t *g, uint64_t **counter_out)
+{
+ test_options_t *test_options = &g->test_options;
+
+ memset(counter_out, 0, sizeof(uint64_t *) * MAX_COUNTERS);
+
+ switch (test_options->place) {
+ case PLACE_PACK:
+ for (uint32_t i = 0; i < test_options->num_counter; i++) {
+ switch (g->cur_type) {
+ case 0:
+ counter_out[i] = &g->item.spinlock.counter[i];
+ break;
+ case 1:
+ counter_out[i] = &g->item.spinlock_recursive.counter[i];
+ break;
+ case 2:
+ counter_out[i] = &g->item.rwlock.counter[i];
+ break;
+ case 3:
+ counter_out[i] = &g->item.rwlock_recursive.counter[i];
+ break;
+ case 4:
+ counter_out[i] = &g->item.ticketlock.counter[i];
+ break;
+ }
+ }
+ break;
+ case PLACE_SEPARATE:
+ for (uint32_t i = 0; i < test_options->num_counter; i++)
+ counter_out[i] = &g->item.separate.counter[i];
+ break;
+ case PLACE_ALL_SEPARATE:
+ for (uint32_t i = 0; i < test_options->num_counter; i++)
+ counter_out[i] = &g->item.all_separate[i].counter;
+ break;
+ }
+}
+
+static int run_test(void *arg)
+{
+ uint64_t nsec;
+ odp_time_t t1, t2;
+ test_thread_ctx_t *thread_ctx = arg;
+ test_global_t *global = thread_ctx->global;
+ test_options_t *test_options = &global->test_options;
+ test_fn_t test_func = thread_ctx->func;
+ uint64_t *counter[MAX_COUNTERS];
+
+ fill_counter_ptrs(global, counter);
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ t1 = odp_time_local();
+ test_func(global, counter, test_options->num_counter);
+ t2 = odp_time_local();
+ nsec = odp_time_diff_ns(t2, t1);
+
+ /* Update stats */
+ thread_ctx->nsec = nsec;
+
+ return 0;
+}
+
+static int start_workers(test_global_t *global, odp_instance_t instance,
+ test_fn_t func)
+{
+ odph_thread_common_param_t param;
+ int i, ret;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ odph_thread_param_t thr_param[num_cpu];
+
+ odph_thread_common_param_init(&param);
+ param.instance = instance;
+ param.cpumask = &global->cpumask;
+
+ for (i = 0; i < num_cpu; i++) {
+ test_thread_ctx_t *thread_ctx = &global->thread_ctx[i];
+
+ thread_ctx->global = global;
+ thread_ctx->idx = i;
+ thread_ctx->func = func;
+
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ thr_param[i].start = run_test;
+ thr_param[i].arg = thread_ctx;
+ }
+
+ ret = odph_thread_create(global->thread_tbl, &param, thr_param,
+ num_cpu);
+ if (ret != num_cpu) {
+ ODPH_ERR("Failed to create all threads %i\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int validate_results(test_global_t *global, validate_fn_t validate)
+{
+ test_options_t *test_options = &global->test_options;
+ uint64_t *counter[MAX_COUNTERS];
+
+ fill_counter_ptrs(global, counter);
+
+ if (validate(global, counter, test_options->num_counter))
+ return -1;
+
+ return 0;
+}
+
+static void print_stat(test_global_t *global)
+{
+ int i, num;
+ double nsec_ave;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ uint64_t num_round = test_options->num_round;
+ uint64_t nsec_sum = 0;
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ nsec_sum += global->thread_ctx[i].nsec;
+
+ if (nsec_sum == 0) {
+ printf("No results.\n");
+ return;
+ }
+
+ nsec_ave = nsec_sum / num_cpu;
+ num = 0;
+
+ printf("------------------------------------------------\n");
+ printf("Per thread results (Millions of rounds per sec):\n");
+ printf("------------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->thread_ctx[i].nsec) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%8.3f ", num_round / (global->thread_ctx[i].nsec / 1000.0));
+ num++;
+ }
+ }
+ printf("\n\n");
+
+ printf("Average results over %i threads:\n", num_cpu);
+ printf("------------------------------------------\n");
+ printf(" duration: %8.3f sec\n",
+ nsec_ave / ODP_TIME_SEC_IN_NS);
+ printf(" rounds per cpu: %8.3fM rounds/sec\n",
+ num_round / (nsec_ave / 1000.0));
+ printf(" total rounds: %8.3fM rounds/sec\n",
+ ((uint64_t)num_cpu * num_round) / (nsec_ave / 1000.0));
+ printf("\n\n");
+}
+
+/**
+ * Test functions
+ */
+static test_case_t test_suite[] = {
+ TEST_INFO("odp_spinlock", test_spinlock, validate_generic),
+ TEST_INFO("odp_spinlock_recursive", test_spinlock_recursive, validate_generic),
+ TEST_INFO("odp_rwlock", test_rwlock, validate_generic),
+ TEST_INFO("odp_rwlock_recursive", test_rwlock_recursive, validate_generic),
+ TEST_INFO("odp_ticketlock", test_ticketlock, validate_generic),
+};
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t helper_options;
+ odp_instance_t instance;
+ odp_init_t init;
+ odp_shm_t shm;
+ test_options_t test_options;
+ int num_tests, i;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (parse_options(argc, argv, &test_options))
+ exit(EXIT_FAILURE);
+
+ /* List features not to be used */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.schedule = 1;
+ init.not_used.feat.stash = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ init.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("Global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Reserve memory for global data from shared mem */
+ shm = odp_shm_reserve("test_global", sizeof(test_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Shared memory reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ test_global = odp_shm_addr(shm);
+ if (test_global == NULL) {
+ ODPH_ERR("Shared memory alloc failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ memset(test_global, 0, sizeof(test_global_t));
+ test_global->test_options = test_options;
+
+ odp_sys_info_print();
+
+ if (set_num_cpu(test_global))
+ exit(EXIT_FAILURE);
+
+ print_info(&test_global->test_options);
+
+ /* Loop all test cases */
+ num_tests = ODPH_ARRAY_SIZE(test_suite);
+
+ while (1) {
+ for (i = 0; i < num_tests; i++) {
+ if (test_options.type && test_options.type != (uint32_t)i + 1)
+ continue;
+
+ test_global->cur_type = i;
+
+ /* Initialize test variables */
+ if (init_test(test_global, test_suite[i].name)) {
+ ODPH_ERR("Failed to initialize test.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Start workers */
+ if (start_workers(test_global, instance, test_suite[i].test_fn))
+ exit(EXIT_FAILURE);
+
+ /* Wait workers to exit */
+ odph_thread_join(test_global->thread_tbl,
+ test_global->test_options.num_cpu);
+
+ print_stat(test_global);
+
+ /* Validate test results */
+ if (validate_results(test_global, test_suite[i].validate_fn)) {
+ ODPH_ERR("Test %s result validation failed.\n",
+ test_suite[i].name);
+ if (test_options.repeat != REPEAT_FOREVER)
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (test_options.repeat == REPEAT_NO)
+ break;
+ }
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Shm free failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Local terminate failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Global terminate failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
diff --git a/test/performance/odp_mem_perf.c b/test/performance/odp_mem_perf.c
new file mode 100644
index 000000000..241128b1f
--- /dev/null
+++ b/test/performance/odp_mem_perf.c
@@ -0,0 +1,487 @@
+/* Copyright (c) 2021, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_mem_perf.c
+ *
+ * Test application for measuring memory system bandwidth
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+typedef struct test_options_t {
+ uint32_t num_cpu;
+ uint32_t num_round;
+ uint64_t data_len;
+ uint32_t shm_flags;
+ int private;
+ int mode;
+
+} test_options_t;
+
+typedef struct test_global_t test_global_t;
+
+typedef struct test_thread_ctx_t {
+ test_global_t *global;
+ void *shm_addr;
+ uint64_t nsec;
+
+} test_thread_ctx_t;
+
+struct test_global_t {
+ test_options_t test_options;
+
+ odp_barrier_t barrier;
+ uint32_t num_shm;
+ odp_shm_t shm[ODP_THREAD_COUNT_MAX];
+ void *shm_addr[ODP_THREAD_COUNT_MAX];
+ odp_cpumask_t cpumask;
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ test_thread_ctx_t thread_ctx[ODP_THREAD_COUNT_MAX];
+
+};
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Memory performance test\n"
+ "\n"
+ "Usage: odp_mem_perf [options]\n"
+ "\n"
+ " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs. Default 1.\n"
+ " -r, --num_round Number of rounds\n"
+ " -l, --data_len Data length in bytes\n"
+ " -f, --flags SHM flags parameter. Default 0.\n"
+ " -p, --private 0: The same memory area is shared between threads (default)\n"
+ " 1: Memory areas are private to each thread. This increases\n"
+ " memory consumption to num_cpu * data_len.\n"
+ " -m, --mode 0: Memset data (default)\n"
+ " 1: Memcpy data. On each round, reads data from one half of the memory area\n"
+ " and writes it to the other half.\n"
+ " -h, --help This help\n"
+ "\n");
+}
+
+static int parse_options(int argc, char *argv[], test_options_t *test_options)
+{
+ int opt;
+ int long_index;
+ int ret = 0;
+
+ static const struct option longopts[] = {
+ {"num_cpu", required_argument, NULL, 'c'},
+ {"num_round", required_argument, NULL, 'r'},
+ {"data_len", required_argument, NULL, 'l'},
+ {"flags", required_argument, NULL, 'f'},
+ {"private", required_argument, NULL, 'p'},
+ {"mode", required_argument, NULL, 'm'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "+c:r:l:f:p:m:h";
+
+ test_options->num_cpu = 1;
+ test_options->num_round = 1000;
+ test_options->data_len = 10 * 1024 * 1024;
+ test_options->shm_flags = 0;
+ test_options->private = 0;
+ test_options->mode = 0;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'c':
+ test_options->num_cpu = atoi(optarg);
+ break;
+ case 'r':
+ test_options->num_round = atoi(optarg);
+ break;
+ case 'l':
+ test_options->data_len = strtoull(optarg, NULL, 0);
+ break;
+ case 'f':
+ test_options->shm_flags = strtoul(optarg, NULL, 0);
+ break;
+ case 'p':
+ test_options->private = atoi(optarg);
+ break;
+ case 'm':
+ test_options->mode = atoi(optarg);
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int set_num_cpu(test_global_t *global)
+{
+ int ret, max_num;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+
+ /* One thread used for the main thread */
+ if (num_cpu > ODP_THREAD_COUNT_MAX - 1) {
+ ODPH_ERR("Too many workers. Maximum is %i.\n", ODP_THREAD_COUNT_MAX - 1);
+ return -1;
+ }
+
+ max_num = num_cpu;
+ if (num_cpu == 0)
+ max_num = ODP_THREAD_COUNT_MAX - 1;
+
+ ret = odp_cpumask_default_worker(&global->cpumask, max_num);
+
+ if (num_cpu && ret != num_cpu) {
+ ODPH_ERR("Too many workers. Max supported %i.\n", ret);
+ return -1;
+ }
+
+ /* Zero: all available workers */
+ if (num_cpu == 0) {
+ if (ret > max_num) {
+ ODPH_ERR("Too many cpus from odp_cpumask_default_worker(): %i\n", ret);
+ return -1;
+ }
+
+ num_cpu = ret;
+ test_options->num_cpu = num_cpu;
+ }
+
+ odp_barrier_init(&global->barrier, num_cpu);
+
+ return 0;
+}
+
+static int create_shm(test_global_t *global)
+{
+ odp_shm_capability_t shm_capa;
+ odp_shm_t shm;
+ void *addr;
+ uint32_t i, num_shm;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_round = test_options->num_round;
+ uint32_t num_cpu = test_options->num_cpu;
+ uint64_t data_len = test_options->data_len;
+ uint32_t shm_flags = test_options->shm_flags;
+ int private = test_options->private;
+ char name[] = "mem_perf_00";
+
+ num_shm = 1;
+ if (private)
+ num_shm = num_cpu;
+
+ printf("\nMemory performance test\n");
+ printf(" num cpu %u\n", num_cpu);
+ printf(" num rounds %u\n", num_round);
+ printf(" data len %" PRIu64 "\n", data_len);
+ printf(" memory footprint %" PRIu64 "\n", num_shm * data_len);
+ printf(" shm flags 0x%x\n", shm_flags);
+ printf(" num shm %u\n", num_shm);
+ printf(" private %i\n", private);
+ printf(" mode %i\n", test_options->mode);
+
+ if (odp_shm_capability(&shm_capa)) {
+ ODPH_ERR("SHM capa failed.\n");
+ return -1;
+ }
+
+ if (shm_capa.max_size && data_len > shm_capa.max_size) {
+ ODPH_ERR("Data len too large. Maximum len is %" PRIu64 "\n", shm_capa.max_size);
+ return -1;
+ }
+
+ if (num_shm > shm_capa.max_blocks) {
+ ODPH_ERR("Too many SHM blocks. Maximum is %u\n", shm_capa.max_blocks);
+ return -1;
+ }
+
+ for (i = 0; i < num_shm; i++) {
+ name[9] = '0' + i / 10;
+ name[10] = '0' + i % 10;
+
+ shm = odp_shm_reserve(name, data_len, ODP_CACHE_LINE_SIZE, shm_flags);
+
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("SHM[%u] reserve failed.\n", i);
+ return -1;
+ }
+
+ global->shm[i] = shm;
+
+ addr = odp_shm_addr(shm);
+ if (addr == NULL) {
+ ODPH_ERR("SHM[%u] addr failed.\n", i);
+ return -1;
+ }
+
+ global->shm_addr[i] = addr;
+
+ printf(" shm addr[%u] %p\n", i, addr);
+ }
+
+ printf("\n");
+ global->num_shm = num_shm;
+
+ odp_shm_print_all();
+
+ return 0;
+}
+
+static int free_shm(test_global_t *global)
+{
+ uint32_t i;
+
+ for (i = 0; i < global->num_shm; i++) {
+ if (odp_shm_free(global->shm[i])) {
+ ODPH_ERR("SHM[%u] free failed.\n", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int run_test(void *arg)
+{
+ int thr;
+ uint32_t i;
+ uint64_t nsec;
+ odp_time_t t1, t2;
+ test_thread_ctx_t *thread_ctx = arg;
+ test_global_t *global = thread_ctx->global;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_round = test_options->num_round;
+ uint64_t data_len = test_options->data_len;
+ uint64_t half_len = data_len / 2;
+ int mode = test_options->mode;
+ uint8_t *addr = thread_ctx->shm_addr;
+
+ thr = odp_thread_id();
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ t1 = odp_time_local();
+
+ if (mode == 0) {
+ for (i = 0; i < num_round; i++)
+ memset(addr, thr + i, data_len);
+ } else {
+ for (i = 0; i < num_round; i++) {
+ if ((i & 0x1) == 0)
+ memcpy(&addr[half_len], addr, half_len);
+ else
+ memcpy(addr, &addr[half_len], half_len);
+ }
+ }
+
+ t2 = odp_time_local();
+
+ nsec = odp_time_diff_ns(t2, t1);
+
+ /* Update stats */
+ thread_ctx->nsec = nsec;
+
+ return 0;
+}
+
+static int start_workers(test_global_t *global, odp_instance_t instance)
+{
+ odph_thread_common_param_t param;
+ int i, ret;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ odph_thread_param_t thr_param[num_cpu];
+
+ odph_thread_common_param_init(&param);
+ param.instance = instance;
+ param.cpumask = &global->cpumask;
+
+ for (i = 0; i < num_cpu; i++) {
+ test_thread_ctx_t *thread_ctx = &global->thread_ctx[i];
+
+ thread_ctx->global = global;
+ thread_ctx->shm_addr = global->shm_addr[0];
+ if (global->test_options.private)
+ thread_ctx->shm_addr = global->shm_addr[i];
+
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ thr_param[i].start = run_test;
+ thr_param[i].arg = thread_ctx;
+ }
+
+ ret = odph_thread_create(global->thread_tbl, &param, thr_param, num_cpu);
+ if (ret != num_cpu) {
+ ODPH_ERR("Failed to create all threads %i\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void print_stat(test_global_t *global)
+{
+ int i, num;
+ double nsec_ave;
+ uint64_t data_touch;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ uint32_t num_round = test_options->num_round;
+ uint64_t data_len = test_options->data_len;
+ uint64_t nsec_sum = 0;
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ nsec_sum += global->thread_ctx[i].nsec;
+
+ if (nsec_sum == 0) {
+ printf("No results.\n");
+ return;
+ }
+
+ data_touch = num_round * data_len;
+ nsec_ave = nsec_sum / num_cpu;
+ num = 0;
+
+ printf("RESULTS - per thread (MB per sec):\n");
+ printf("----------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->thread_ctx[i].nsec) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%8.1f ", data_touch / (global->thread_ctx[i].nsec / 1000.0));
+ num++;
+ }
+ }
+ printf("\n\n");
+
+ printf("RESULTS - average over %i threads:\n", num_cpu);
+ printf("----------------------------------\n");
+ printf(" duration: %.6f sec\n", nsec_ave / 1000000000);
+ printf(" bandwidth per cpu: %.3f MB/s\n", data_touch / (nsec_ave / 1000.0));
+ printf(" total bandwidth: %.3f MB/s\n", (num_cpu * data_touch) / (nsec_ave / 1000.0));
+ printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t helper_options;
+ odp_instance_t instance;
+ odp_init_t init;
+ odp_shm_t shm;
+ test_global_t *global;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* List features not to be used */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.schedule = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ init.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("Global init failed.\n");
+ return -1;
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed.\n");
+ return -1;
+ }
+
+ shm = odp_shm_reserve("mem_perf_global", sizeof(test_global_t), ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Shared mem reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ global = odp_shm_addr(shm);
+ if (global == NULL) {
+ ODPH_ERR("Shared mem alloc failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(global, 0, sizeof(test_global_t));
+
+ if (parse_options(argc, argv, &global->test_options))
+ return -1;
+
+ odp_sys_info_print();
+
+ if (set_num_cpu(global))
+ return -1;
+
+ if (create_shm(global))
+ return -1;
+
+ /* Start workers */
+ if (start_workers(global, instance))
+ return -1;
+
+ /* Wait workers to exit */
+ odph_thread_join(global->thread_tbl, global->test_options.num_cpu);
+
+ print_stat(global);
+
+ if (free_shm(global))
+ return -1;
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Shared mem free failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("term local failed.\n");
+ return -1;
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("term global failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/test/performance/odp_packet_gen.c b/test/performance/odp_packet_gen.c
new file mode 100644
index 000000000..c88535791
--- /dev/null
+++ b/test/performance/odp_packet_gen.c
@@ -0,0 +1,2242 @@
+/* Copyright (c) 2020-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_packet_gen.c
+ *
+ * Performance optimized packet generator application
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+/* enable usleep */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <getopt.h>
+#include <unistd.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#if ODP_THREAD_COUNT_MAX > 33
+/* One control thread, even number of workers */
+#define MAX_THREADS 33
+#else
+#define MAX_THREADS ODP_THREAD_COUNT_MAX
+#endif
+
+#define MAX_WORKERS (MAX_THREADS - 1)
+
+/* At least one control and two worker threads */
+ODP_STATIC_ASSERT(MAX_WORKERS >= 2, "Too few threads");
+
+/* Maximum number of packet IO interfaces */
+#define MAX_PKTIOS 16
+/* Maximum number of packets to be allocated for
+ * one transmit round: bursts * burst_size * bins */
+#define MAX_ALLOC_PACKETS (64 * 1024)
+/* Maximum number of packet length bins */
+#define MAX_BINS 1024
+#define MAX_PKTIO_NAME 255
+#define RX_THREAD 1
+#define TX_THREAD 2
+#define MAX_VLANS 4
+/* Number of random 16-bit words used to generate random length packets */
+#define RAND_16BIT_WORDS 128
+/* Max retries to generate random data */
+#define MAX_RAND_RETRIES 1000
+
+/* Used don't free */
+#define TX_MODE_DF 0
+/* Use static references */
+#define TX_MODE_REF 1
+
+/* Minimum number of packets to receive in CI test */
+#define MIN_RX_PACKETS_CI 800
+
+/* Identifier for payload-timestamped packets */
+#define TS_MAGIC 0xff88ee99ddaaccbb
+
+ODP_STATIC_ASSERT(MAX_PKTIOS <= UINT8_MAX, "Interface index must fit into uint8_t\n");
+
+typedef struct test_options_t {
+ uint64_t gap_nsec;
+ uint64_t quit;
+ uint64_t update_msec;
+ uint32_t num_rx;
+ uint32_t num_tx;
+ uint32_t num_cpu;
+ uint32_t num_pktio;
+ uint32_t num_pkt;
+ uint32_t pkt_len;
+ uint8_t use_rand_pkt_len;
+ uint8_t direct_rx;
+ uint32_t rand_pkt_len_min;
+ uint32_t rand_pkt_len_max;
+ uint32_t rand_pkt_len_bins;
+ uint32_t hdr_len;
+ uint32_t burst_size;
+ uint32_t bursts;
+ uint32_t num_vlan;
+ uint32_t ipv4_src;
+ uint32_t ipv4_dst;
+ uint16_t udp_src;
+ uint16_t udp_dst;
+ uint32_t wait_sec;
+ uint32_t wait_start_sec;
+ uint32_t mtu;
+ int tx_mode;
+ odp_bool_t promisc_mode;
+ odp_bool_t calc_latency;
+ odp_bool_t calc_cs;
+ odp_bool_t fill_pl;
+
+ struct vlan_hdr {
+ uint16_t tpid;
+ uint16_t tci;
+ } vlan[MAX_VLANS];
+
+ struct {
+ uint32_t udp_src;
+ uint32_t udp_dst;
+ } c_mode;
+
+ char pktio_name[MAX_PKTIOS][MAX_PKTIO_NAME + 1];
+ char ipv4_src_s[24];
+ char ipv4_dst_s[24];
+
+} test_options_t;
+
+typedef struct thread_arg_t {
+ void *global;
+ int tx_thr;
+
+ /* pktout queue per pktio interface (per thread) */
+ odp_pktout_queue_t pktout[MAX_PKTIOS];
+
+ /* In direct_rx mode, pktin queue per pktio interface (per thread) */
+ odp_pktin_queue_t pktin[MAX_PKTIOS];
+
+ /* Pre-built packets for TX thread */
+ odp_packet_t packet[MAX_PKTIOS][MAX_ALLOC_PACKETS];
+
+} thread_arg_t;
+
+typedef struct ODP_ALIGNED_CACHE thread_stat_t {
+ uint64_t time_nsec;
+ uint64_t rx_timeouts;
+ uint64_t rx_packets;
+ uint64_t rx_bytes;
+ uint64_t rx_lat_nsec;
+ uint64_t rx_lat_min_nsec;
+ uint64_t rx_lat_max_nsec;
+ uint64_t rx_lat_packets;
+
+ uint64_t tx_timeouts;
+ uint64_t tx_packets;
+ uint64_t tx_bytes;
+ uint64_t tx_drops;
+
+ int thread_type;
+
+ struct {
+ uint64_t rx_packets;
+ uint64_t tx_packets;
+
+ } pktio[MAX_PKTIOS];
+
+} thread_stat_t;
+
+typedef struct test_global_t {
+ test_options_t test_options;
+ odp_atomic_u32_t exit_test;
+ odp_barrier_t barrier;
+ odp_cpumask_t cpumask;
+ odp_pool_t pool;
+ uint64_t drained;
+ odph_thread_t thread_tbl[MAX_THREADS];
+ thread_stat_t stat[MAX_THREADS];
+ thread_arg_t thread_arg[MAX_THREADS];
+
+ struct {
+ odph_ethaddr_t eth_src;
+ odph_ethaddr_t eth_dst;
+ odp_pktio_t pktio;
+ odp_pktout_queue_t pktout[MAX_THREADS];
+ odp_pktin_queue_t pktin[MAX_THREADS];
+ int started;
+
+ } pktio[MAX_PKTIOS];
+
+ /* Interface lookup table. Table index is pktio_index of the API. */
+ uint8_t if_from_pktio_idx[ODP_PKTIO_MAX_INDEX + 1];
+
+ uint32_t num_tx_pkt;
+ uint32_t num_bins;
+ uint32_t len_bin[MAX_BINS];
+
+} test_global_t;
+
+typedef struct ODP_PACKED {
+ uint64_t magic;
+ uint64_t tx_ts;
+} ts_data_t;
+
+typedef struct {
+ uint64_t nsec;
+ uint64_t min;
+ uint64_t max;
+ uint64_t packets;
+} rx_lat_data_t;
+
+static test_global_t *test_global;
+
+static void print_usage(void)
+{
+ printf("\n"
+ "ODP packet generator\n"
+ "\n"
+ "Usage: odp_packet_gen [options]\n"
+ "\n"
+ " Mandatory:\n"
+ " -i, --interface <name> Packet IO interfaces. Comma-separated list of\n"
+ " interface names (no spaces) e.g. eth0,eth1.\n"
+ " At least one interface is required.\n"
+ "\n");
+ printf(" Optional:\n"
+ " -e, --eth_dst <mac> Destination MAC address. Comma-separated list of\n"
+ " addresses (no spaces), one address per packet IO\n"
+ " interface e.g. AA:BB:CC:DD:EE:FF,11:22:33:44:55:66\n"
+ " Default per interface: 02:00:00:A0:B0:CX, where X = 0,1,...\n"
+ " -v, --vlan <tpid:tci> VLAN configuration. Comma-separated list of VLAN TPID:TCI\n"
+ " values in hexadecimal, starting from the outer most VLAN.\n"
+ " For example:\n"
+ " VLAN 200 (decimal): 8100:c8\n"
+ " Double tagged VLANs 1 and 2: 88a8:1,8100:2\n"
+ " -r, --num_rx Number of receive threads. Default: 1\n"
+ " -t, --num_tx Number of transmit threads. Default: 1\n"
+ " -n, --num_pkt Number of packets in the pool. Default: 1000\n"
+ " -l, --len Packet length. Default: 512\n"
+ " -L, --len_range <min,max,bins>\n"
+ " Random packet length. Specify the minimum and maximum\n"
+ " packet lengths and the number of bins. To reduce pool size\n"
+ " requirement the length range can be divided into even sized\n"
+ " bins (max %u). Min and max size packets are always used and included\n"
+ " into the number of bins (bins >= 2). Bin value of 0 means\n"
+ " that each packet length is used. Comma-separated (no spaces).\n"
+ " Overrides standard packet length option.\n"
+ " -D, --direct_rx Direct input mode (default: 0)\n"
+ " 0: Use scheduler for packet input\n"
+ " 1: Poll packet input in direct mode\n", MAX_BINS);
+ printf(" -m, --tx_mode Transmit mode (default 1):\n"
+ " 0: Re-send packets with don't free option\n"
+ " 1: Send static packet references. Some features may\n"
+ " not be available with references.\n"
+ " 2: Send copies of packets\n"
+ " -M, --mtu <len> Interface MTU in bytes.\n"
+ " -b, --burst_size Transmit burst size. Default: 8\n"
+ " -x, --bursts Number of bursts per one transmit round. Default: 1\n"
+ " -g, --gap Gap between transmit rounds in nsec. Default: 1000000\n"
+ " Transmit packet rate per interface:\n"
+ " num_tx * burst_size * bursts * (10^9 / gap)\n"
+ " -s, --ipv4_src IPv4 source address. Default: 192.168.0.1\n"
+ " -d, --ipv4_dst IPv4 destination address. Default: 192.168.0.2\n"
+ " -o, --udp_src UDP source port. Default: 10000\n"
+ " -p, --udp_dst UDP destination port. Default: 20000\n"
+ " -P, --promisc_mode Enable promiscuous mode.\n"
+ " -a, --latency Calculate latency. Cannot be used with packet\n"
+ " references (see \"--tx_mode\").\n"
+ " -c, --c_mode <counts> Counter mode for incrementing UDP port numbers.\n"
+ " Specify the number of port numbers used starting from\n"
+ " udp_src/udp_dst. Comma-separated (no spaces) list of\n"
+ " count values: <udp_src count>,<udp_dst count>\n"
+ " Default value: 0,0\n"
+ " -C, --no_udp_checksum Do not calculate UDP checksum. Instead, set it to\n"
+ " zero in every packet.\n"
+ " -A, --no_payload_fill Do not fill payload. By default, payload is filled\n"
+ " with a pattern until the end of first packet\n"
+ " segment.\n"
+ " -q, --quit Quit after this many transmit rounds.\n"
+ " Default: 0 (don't quit)\n"
+ " -u, --update_stat <msec> Update and print statistics every <msec> milliseconds.\n"
+ " 0: Don't print statistics periodically (default)\n"
+ " -h, --help This help\n"
+ " -w, --wait <sec> Wait up to <sec> seconds for network links to be up.\n"
+ " Default: 0 (don't check link status)\n"
+ " -W, --wait_start <sec> Wait <sec> seconds before starting traffic. Default: 0\n"
+ "\n");
+}
+
+static int parse_vlan(const char *str, test_global_t *global)
+{
+ struct vlan_hdr *vlan;
+ const char *start = str;
+ char *end;
+ int num_vlan = 0;
+ intptr_t str_len = strlen(str);
+
+ while (num_vlan < MAX_VLANS) {
+ vlan = &global->test_options.vlan[num_vlan];
+
+ /* TPID in hexadecimal */
+ end = NULL;
+ vlan->tpid = strtoul(start, &end, 16);
+ if (end < start)
+ break;
+
+ /* Skip ':' */
+ start = end + 1;
+ if (start - str >= str_len)
+ break;
+
+ /* TCI in hexadecimal */
+ end = NULL;
+ vlan->tci = strtoul(start, &end, 16);
+ if (end < start)
+ break;
+
+ num_vlan++;
+
+ /* Skip ',' or stop at the string end */
+ start = end + 1;
+ if (start - str >= str_len)
+ break;
+ }
+
+ return num_vlan;
+}
+
+static int init_bins(test_global_t *global)
+{
+ uint32_t i, bin_size;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_bins = test_options->rand_pkt_len_bins;
+ uint32_t len_min = test_options->rand_pkt_len_min;
+ uint32_t len_max = test_options->rand_pkt_len_max;
+ uint32_t num_bytes = len_max - len_min + 1;
+
+ if (len_max <= len_min) {
+ ODPH_ERR("Error: Bad max packet length\n");
+ return -1;
+ }
+
+ if (num_bins == 0)
+ num_bins = num_bytes;
+
+ if (num_bins == 1 || num_bins > MAX_BINS || num_bins > num_bytes) {
+ ODPH_ERR("Error: Bad number of packet length bins: %u\n", num_bins);
+ return -1;
+ }
+
+ bin_size = (len_max - len_min + 1) / (num_bins - 1);
+
+ /* Min length is the first bin */
+ for (i = 0; i < num_bins - 1; i++)
+ global->len_bin[i] = len_min + (i * bin_size);
+
+ /* Max length is the last bin */
+ global->len_bin[i] = len_max;
+ global->num_bins = num_bins;
+
+ return 0;
+}
+
+static int parse_options(int argc, char *argv[], test_global_t *global)
+{
+ int opt, i, len, str_len, long_index, udp_port;
+ unsigned long int count;
+ uint32_t min_packets, num_tx_pkt, num_tx_alloc, pkt_len, val, bins;
+ char *name, *str, *end;
+ test_options_t *test_options = &global->test_options;
+ int ret = 0;
+ uint8_t default_eth_dst[6] = {0x02, 0x00, 0x00, 0xa0, 0xb0, 0xc0};
+
+ static const struct option longopts[] = {
+ {"interface", required_argument, NULL, 'i'},
+ {"eth_dst", required_argument, NULL, 'e'},
+ {"num_rx", required_argument, NULL, 'r'},
+ {"num_tx", required_argument, NULL, 't'},
+ {"num_pkt", required_argument, NULL, 'n'},
+ {"len", required_argument, NULL, 'l'},
+ {"len_range", required_argument, NULL, 'L'},
+ {"direct_rx", required_argument, NULL, 'D'},
+ {"tx_mode", required_argument, NULL, 'm'},
+ {"burst_size", required_argument, NULL, 'b'},
+ {"bursts", required_argument, NULL, 'x'},
+ {"gap", required_argument, NULL, 'g'},
+ {"vlan", required_argument, NULL, 'v'},
+ {"ipv4_src", required_argument, NULL, 's'},
+ {"ipv4_dst", required_argument, NULL, 'd'},
+ {"udp_src", required_argument, NULL, 'o'},
+ {"udp_dst", required_argument, NULL, 'p'},
+ {"promisc_mode", no_argument, NULL, 'P'},
+ {"latency", no_argument, NULL, 'a'},
+ {"c_mode", required_argument, NULL, 'c'},
+ {"no_udp_checksum", no_argument, NULL, 'C'},
+ {"no_payload_fill", no_argument, NULL, 'A'},
+ {"mtu", required_argument, NULL, 'M'},
+ {"quit", required_argument, NULL, 'q'},
+ {"wait", required_argument, NULL, 'w'},
+ {"wait_start", required_argument, NULL, 'W'},
+ {"update_stat", required_argument, NULL, 'u'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "+i:e:r:t:n:l:L:D:m:M:b:x:g:v:s:d:o:p:c:CAq:u:w:W:Pah";
+
+ test_options->num_pktio = 0;
+ test_options->num_rx = 1;
+ test_options->num_tx = 1;
+ test_options->num_pkt = 1000;
+ test_options->pkt_len = 512;
+ test_options->use_rand_pkt_len = 0;
+ test_options->direct_rx = 0;
+ test_options->tx_mode = TX_MODE_REF;
+ test_options->burst_size = 8;
+ test_options->bursts = 1;
+ test_options->gap_nsec = 1000000;
+ test_options->num_vlan = 0;
+ test_options->promisc_mode = 0;
+ test_options->calc_latency = 0;
+ test_options->calc_cs = 1;
+ test_options->fill_pl = 1;
+ strncpy(test_options->ipv4_src_s, "192.168.0.1",
+ sizeof(test_options->ipv4_src_s) - 1);
+ strncpy(test_options->ipv4_dst_s, "192.168.0.2",
+ sizeof(test_options->ipv4_dst_s) - 1);
+ if (odph_ipv4_addr_parse(&test_options->ipv4_src, test_options->ipv4_src_s)) {
+ ODPH_ERR("Address parse failed\n");
+ return -1;
+ }
+ if (odph_ipv4_addr_parse(&test_options->ipv4_dst, test_options->ipv4_dst_s)) {
+ ODPH_ERR("Address parse failed\n");
+ return -1;
+ }
+ test_options->udp_src = 10000;
+ test_options->udp_dst = 20000;
+ test_options->c_mode.udp_src = 0;
+ test_options->c_mode.udp_dst = 0;
+ test_options->quit = 0;
+ test_options->update_msec = 0;
+ test_options->wait_sec = 0;
+ test_options->wait_start_sec = 0;
+ test_options->mtu = 0;
+
+ for (i = 0; i < MAX_PKTIOS; i++) {
+ memcpy(global->pktio[i].eth_dst.addr, default_eth_dst, 6);
+ global->pktio[i].eth_dst.addr[5] += i;
+ }
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'i':
+ i = 0;
+ str = optarg;
+ str_len = strlen(str);
+
+ while (str_len > 0) {
+ len = strcspn(str, ",");
+ str_len -= len + 1;
+
+ if (i == MAX_PKTIOS) {
+ ODPH_ERR("Error: Too many interfaces\n");
+ ret = -1;
+ break;
+ }
+
+ if (len > MAX_PKTIO_NAME) {
+ ODPH_ERR("Error: Too long interface name %s\n", str);
+ ret = -1;
+ break;
+ }
+
+ name = test_options->pktio_name[i];
+ memcpy(name, str, len);
+ str += len + 1;
+ i++;
+ }
+
+ test_options->num_pktio = i;
+
+ break;
+ case 'e':
+ i = 0;
+ str = optarg;
+ str_len = strlen(str);
+
+ while (str_len > 0) {
+ odph_ethaddr_t *dst = &global->pktio[i].eth_dst;
+
+ len = strcspn(str, ",");
+ str_len -= len + 1;
+
+ if (i == MAX_PKTIOS) {
+ ODPH_ERR("Error: Too many MAC addresses\n");
+ ret = -1;
+ break;
+ }
+
+ if (odph_eth_addr_parse(dst, str)) {
+ ODPH_ERR("Error: Bad MAC address: %s\n", str);
+ ret = -1;
+ break;
+ }
+
+ str += len + 1;
+ i++;
+ }
+ break;
+ case 'o':
+ udp_port = atoi(optarg);
+ if (udp_port < 0 || udp_port > UINT16_MAX) {
+ ODPH_ERR("Error: Bad UDP source port: %d\n", udp_port);
+ ret = -1;
+ break;
+ }
+ test_options->udp_src = udp_port;
+ break;
+ case 'p':
+ udp_port = atoi(optarg);
+ if (udp_port < 0 || udp_port > UINT16_MAX) {
+ ODPH_ERR("Error: Bad UDP destination port: %d\n", udp_port);
+ ret = -1;
+ break;
+ }
+ test_options->udp_dst = udp_port;
+ break;
+ case 'P':
+ test_options->promisc_mode = 1;
+ break;
+ case 'a':
+ test_options->calc_latency = 1;
+ break;
+ case 'r':
+ test_options->num_rx = atoi(optarg);
+ break;
+ case 't':
+ test_options->num_tx = atoi(optarg);
+ break;
+ case 'n':
+ test_options->num_pkt = atoi(optarg);
+ break;
+ case 'l':
+ test_options->pkt_len = atoi(optarg);
+ break;
+ case 'L':
+ pkt_len = strtoul(optarg, &end, 0);
+ test_options->rand_pkt_len_min = pkt_len;
+ end++;
+ pkt_len = strtoul(end, &str, 0);
+ test_options->rand_pkt_len_max = pkt_len;
+ str++;
+ val = strtoul(str, NULL, 0);
+ test_options->rand_pkt_len_bins = val;
+ test_options->use_rand_pkt_len = 1;
+ break;
+ case 'D':
+ test_options->direct_rx = atoi(optarg);
+ break;
+ case 'm':
+ test_options->tx_mode = atoi(optarg);
+ break;
+ case 'M':
+ test_options->mtu = atoi(optarg);
+ break;
+ case 'b':
+ test_options->burst_size = atoi(optarg);
+ break;
+ case 'x':
+ test_options->bursts = atoi(optarg);
+ break;
+ case 'g':
+ test_options->gap_nsec = atoll(optarg);
+ break;
+ case 'v':
+ test_options->num_vlan = parse_vlan(optarg, global);
+ if (test_options->num_vlan == 0) {
+ ODPH_ERR("Error: Did not find any VLANs\n");
+ ret = -1;
+ }
+ break;
+ case 's':
+ if (odph_ipv4_addr_parse(&test_options->ipv4_src,
+ optarg)) {
+ ODPH_ERR("Error: Bad IPv4 source address: %s\n", optarg);
+ ret = -1;
+ }
+ strncpy(test_options->ipv4_src_s, optarg,
+ sizeof(test_options->ipv4_src_s) - 1);
+ break;
+ case 'd':
+ if (odph_ipv4_addr_parse(&test_options->ipv4_dst,
+ optarg)) {
+ ODPH_ERR("Error: Bad IPv4 destination address: %s\n", optarg);
+ ret = -1;
+ }
+ strncpy(test_options->ipv4_dst_s, optarg,
+ sizeof(test_options->ipv4_dst_s) - 1);
+ break;
+ case 'c':
+ count = strtoul(optarg, &end, 0);
+ test_options->c_mode.udp_src = count;
+
+ end++;
+ count = strtoul(end, NULL, 0);
+ test_options->c_mode.udp_dst = count;
+ break;
+ case 'C':
+ test_options->calc_cs = 0;
+ break;
+ case 'A':
+ test_options->fill_pl = 0;
+ break;
+ case 'q':
+ test_options->quit = atoll(optarg);
+ break;
+ case 'u':
+ test_options->update_msec = atoll(optarg);
+ break;
+ case 'w':
+ test_options->wait_sec = atoi(optarg);
+ break;
+ case 'W':
+ test_options->wait_start_sec = atoi(optarg);
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (ret)
+ return -1;
+
+ if (test_options->num_pktio == 0) {
+ ODPH_ERR("Error: At least one packet IO interface is needed.\n");
+ ODPH_ERR(" Use -i <name> to specify interfaces.\n");
+ return -1;
+ }
+
+ if (test_options->num_rx < 1 || test_options->num_tx < 1) {
+ ODPH_ERR("Error: At least one rx and tx thread needed.\n");
+ return -1;
+ }
+
+ test_options->num_cpu = test_options->num_rx + test_options->num_tx;
+
+ if (test_options->num_cpu > MAX_WORKERS) {
+ ODPH_ERR("Error: Too many worker threads\n");
+ return -1;
+ }
+
+ num_tx_pkt = test_options->burst_size * test_options->bursts;
+ global->num_tx_pkt = num_tx_pkt;
+
+ if (num_tx_pkt == 0) {
+ ODPH_ERR("Error: Bad number of tx packets: %u\n", num_tx_pkt);
+ return -1;
+ }
+
+ if (test_options->use_rand_pkt_len) {
+ if (init_bins(global))
+ return -1;
+ }
+
+ bins = global->num_bins ? global->num_bins : 1;
+ num_tx_alloc = num_tx_pkt * bins;
+ if (num_tx_alloc > MAX_ALLOC_PACKETS) {
+ ODPH_ERR("Error: Too many tx packets: %u\n", num_tx_alloc);
+ return -1;
+ }
+
+ /* Pool needs to have enough packets for all TX side pre-allocated packets and
+ * a burst per thread (for packet copies). RX side needs one burst per thread per pktio. */
+ min_packets = test_options->num_pktio * test_options->num_tx * num_tx_alloc;
+ min_packets += test_options->num_tx * test_options->burst_size;
+ min_packets += test_options->num_pktio * test_options->num_rx * test_options->burst_size;
+
+ if (test_options->num_pkt < min_packets) {
+ ODPH_ERR("Error: Pool needs to have at least %u packets\n", min_packets);
+ return -1;
+ }
+
+ if (test_options->calc_latency && test_options->tx_mode == TX_MODE_REF) {
+ ODPH_ERR("Error: Latency test is not supported with packet references (--tx_mode 1)\n");
+ return -1;
+ }
+
+ if (test_options->gap_nsec) {
+ double gap_hz = 1000000000.0 / test_options->gap_nsec;
+
+ if (gap_hz > (double)odp_time_local_res()) {
+ ODPH_ERR("\nWARNING: Burst gap exceeds time counter resolution "
+ "%" PRIu64 "\n\n", odp_time_local_res());
+ }
+ }
+
+ if (global->num_bins) {
+ if (num_tx_pkt > global->num_bins && num_tx_pkt % global->num_bins)
+ ODPH_ERR("\nWARNING: Transmit packet count is not evenly divisible into packet length bins.\n\n");
+
+ if (num_tx_pkt < global->num_bins)
+ ODPH_ERR("\nWARNING: Not enough packets for every packet length bin.\n\n");
+ }
+
+ if (test_options->c_mode.udp_dst &&
+ num_tx_pkt % test_options->c_mode.udp_dst)
+ ODPH_ERR("\nWARNING: Transmit packet count is not evenly divisible by UDP destination port count.\n\n");
+
+ if (test_options->c_mode.udp_src &&
+ num_tx_pkt % test_options->c_mode.udp_src)
+ ODPH_ERR("\nWARNING: Transmit packet count is not evenly divisible by UDP source port count.\n\n");
+
+ test_options->hdr_len = ODPH_ETHHDR_LEN +
+ (test_options->num_vlan * ODPH_VLANHDR_LEN) +
+ ODPH_IPV4HDR_LEN + ODPH_UDPHDR_LEN;
+
+ pkt_len = test_options->use_rand_pkt_len ?
+ test_options->rand_pkt_len_min : test_options->pkt_len;
+ if (test_options->hdr_len >= pkt_len) {
+ ODPH_ERR("Error: Headers do not fit into packet length %" PRIu32 "\n", pkt_len);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int set_num_cpu(test_global_t *global)
+{
+ int ret;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+
+ ret = odp_cpumask_default_worker(&global->cpumask, num_cpu);
+
+ if (ret != num_cpu) {
+ int cpu;
+
+ /* Normally we want to use only worker threads */
+ if (ret > 1) {
+ ODPH_ERR("Error: Too many workers. Maximum supported %i.\n", ret);
+ return -1;
+ }
+
+ /* When number of workers is very limited (e.g. ODP project CI),
+ * we try to use any CPUs available. */
+ ret = odp_cpumask_all_available(&global->cpumask);
+ if (ret < num_cpu) {
+ ODPH_ERR("Error: Not enough CPUs. Maximum supported %i.\n", ret);
+ return -1;
+ }
+
+ /* Remove extra CPUs from the mask */
+ cpu = odp_cpumask_first(&global->cpumask);
+ while (ret > num_cpu) {
+ odp_cpumask_clr(&global->cpumask, cpu);
+ cpu = odp_cpumask_first(&global->cpumask);
+ ret--;
+ }
+ }
+
+ odp_barrier_init(&global->barrier, num_cpu + 1);
+
+ return 0;
+}
+
+static int open_pktios(test_global_t *global)
+{
+ odp_pool_capability_t pool_capa;
+ odp_pktio_capability_t pktio_capa;
+ odp_pool_param_t pool_param;
+ odp_pool_t pool;
+ odp_pktio_param_t pktio_param;
+ odp_pktio_t pktio;
+ odp_pktio_config_t pktio_config;
+ odp_pktin_queue_param_t pktin_param;
+ odp_pktout_queue_param_t pktout_param;
+ char *name;
+ uint32_t i, seg_len;
+ int j, pktio_idx;
+ test_options_t *test_options = &global->test_options;
+ int num_rx = test_options->num_rx;
+ int num_tx = test_options->num_tx;
+ uint32_t num_pktio = test_options->num_pktio;
+ uint32_t num_pkt = test_options->num_pkt;
+ uint32_t pkt_len = test_options->use_rand_pkt_len ?
+ test_options->rand_pkt_len_max : test_options->pkt_len;
+ odp_pktout_queue_t pktout[num_tx];
+ odp_pktin_queue_t pktin[num_rx];
+
+ printf("\nODP packet generator\n");
+ printf(" quit test after %" PRIu64 " rounds\n",
+ test_options->quit);
+ printf(" num rx threads %i\n", num_rx);
+ printf(" num tx threads %i\n", num_tx);
+ printf(" num packets %u\n", num_pkt);
+ if (test_options->use_rand_pkt_len)
+ printf(" packet length %u-%u bytes, %u bins\n",
+ test_options->rand_pkt_len_min,
+ test_options->rand_pkt_len_max,
+ test_options->rand_pkt_len_bins);
+ else
+ printf(" packet length %u bytes\n", pkt_len);
+ printf(" MTU: ");
+ if (test_options->mtu)
+ printf("%u bytes\n", test_options->mtu);
+ else
+ printf("interface default\n");
+ printf(" packet input mode: %s\n", test_options->direct_rx ? "direct" : "scheduler");
+ printf(" promisc mode: %s\n", test_options->promisc_mode ? "enabled" : "disabled");
+ printf(" transmit mode: %i\n", test_options->tx_mode);
+ printf(" measure latency: %s\n", test_options->calc_latency ? "enabled" : "disabled");
+ printf(" UDP checksum: %s\n", test_options->calc_cs ? "enabled" : "disabled");
+ printf(" payload filling: %s\n", test_options->fill_pl ? "enabled" : "disabled");
+ printf(" tx burst size %u\n", test_options->burst_size);
+ printf(" tx bursts %u\n", test_options->bursts);
+ printf(" tx burst gap %" PRIu64 " nsec\n",
+ test_options->gap_nsec);
+ printf(" clock resolution %" PRIu64 " Hz\n", odp_time_local_res());
+ for (i = 0; i < test_options->num_vlan; i++) {
+ printf(" VLAN[%i] %x:%x\n", i,
+ test_options->vlan[i].tpid, test_options->vlan[i].tci);
+ }
+ printf(" IPv4 source %s\n", test_options->ipv4_src_s);
+ printf(" IPv4 destination %s\n", test_options->ipv4_dst_s);
+ printf(" UDP source %u\n", test_options->udp_src);
+ printf(" UDP destination %u\n", test_options->udp_dst);
+ printf(" UDP src count %u\n", test_options->c_mode.udp_src);
+ printf(" UDP dst count %u\n", test_options->c_mode.udp_dst);
+ printf(" num pktio %u\n", num_pktio);
+
+ printf(" interfaces names: ");
+ for (i = 0; i < num_pktio; i++) {
+ if (i > 0)
+ printf(" ");
+ printf("%s\n", test_options->pktio_name[i]);
+ }
+
+ printf(" destination MACs: ");
+ for (i = 0; i < num_pktio; i++) {
+ uint8_t *eth_dst = global->pktio[i].eth_dst.addr;
+
+ if (i > 0)
+ printf(" ");
+ printf("%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dst[0], eth_dst[1], eth_dst[2],
+ eth_dst[3], eth_dst[4], eth_dst[5]);
+ }
+ printf("\n");
+
+ global->pool = ODP_POOL_INVALID;
+
+ if (odp_pool_capability(&pool_capa)) {
+ ODPH_ERR("Error: Pool capability failed.\n");
+ return -1;
+ }
+
+ if (pool_capa.pkt.max_num &&
+ num_pkt > pool_capa.pkt.max_num) {
+ ODPH_ERR("Error: Too many packets. Max %u supported.\n", pool_capa.pkt.max_num);
+ return -1;
+ }
+
+ if (pool_capa.pkt.max_len && pkt_len > pool_capa.pkt.max_len) {
+ ODPH_ERR("Error: Too large packets. Max %u supported length.\n",
+ pool_capa.pkt.max_len);
+ return -1;
+ }
+
+ seg_len = test_options->hdr_len;
+ if (pool_capa.pkt.max_seg_len &&
+ seg_len > pool_capa.pkt.max_seg_len) {
+ ODPH_ERR("Error: Max segment length is too small %u\n", pool_capa.pkt.max_seg_len);
+ return -1;
+ }
+
+ /* Create pool */
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_PACKET;
+ pool_param.pkt.num = num_pkt;
+ pool_param.pkt.len = pkt_len;
+ pool_param.pkt.seg_len = seg_len;
+
+ pool = odp_pool_create("packet gen pool", &pool_param);
+
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error: Pool create failed.\n");
+ return -1;
+ }
+
+ global->pool = pool;
+
+ odp_pktio_param_init(&pktio_param);
+
+ if (test_options->direct_rx)
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+ else
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+
+ for (i = 0; i < num_pktio; i++)
+ global->pktio[i].pktio = ODP_PKTIO_INVALID;
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_pktio; i++) {
+ name = test_options->pktio_name[i];
+ pktio = odp_pktio_open(name, pool, &pktio_param);
+
+ if (pktio == ODP_PKTIO_INVALID) {
+ ODPH_ERR("Error (%s): Pktio open failed.\n", name);
+ return -1;
+ }
+
+ global->pktio[i].pktio = pktio;
+
+ odp_pktio_print(pktio);
+
+ pktio_idx = odp_pktio_index(pktio);
+ if (pktio_idx < 0) {
+ ODPH_ERR("Error (%s): Reading pktio index failed: %i\n", name, pktio_idx);
+ return -1;
+ }
+ global->if_from_pktio_idx[pktio_idx] = i;
+
+ if (odp_pktio_capability(pktio, &pktio_capa)) {
+ ODPH_ERR("Error (%s): Pktio capability failed.\n", name);
+ return -1;
+ }
+
+ if (num_rx > (int)pktio_capa.max_input_queues) {
+ ODPH_ERR("Error (%s): Too many RX threads. Interface supports max %u input queues.\n",
+ name, pktio_capa.max_input_queues);
+ return -1;
+ }
+
+ if (num_tx > (int)pktio_capa.max_output_queues) {
+ ODPH_ERR("Error (%s): Too many TX threads. Interface supports max %u output queues.\n",
+ name, pktio_capa.max_output_queues);
+ return -1;
+ }
+
+ if (odp_pktio_mac_addr(pktio,
+ &global->pktio[i].eth_src.addr,
+ ODPH_ETHADDR_LEN) != ODPH_ETHADDR_LEN) {
+ ODPH_ERR("Error (%s): MAC address read failed.\n", name);
+ return -1;
+ }
+
+ if (test_options->mtu) {
+ uint32_t maxlen_input = pktio_capa.maxlen.max_input ? test_options->mtu : 0;
+ uint32_t maxlen_output = pktio_capa.maxlen.max_output ?
+ test_options->mtu : 0;
+
+ if (!pktio_capa.set_op.op.maxlen) {
+ ODPH_ERR("Error (%s): modifying interface MTU not supported.\n",
+ name);
+ return -1;
+ }
+
+ if (maxlen_input &&
+ (maxlen_input < pktio_capa.maxlen.min_input ||
+ maxlen_input > pktio_capa.maxlen.max_input)) {
+ ODPH_ERR("Error (%s): unsupported MTU value %" PRIu32 " "
+ "(min %" PRIu32 ", max %" PRIu32 ")\n", name, maxlen_input,
+ pktio_capa.maxlen.min_input, pktio_capa.maxlen.max_input);
+ return -1;
+ }
+ if (maxlen_output &&
+ (maxlen_output < pktio_capa.maxlen.min_output ||
+ maxlen_output > pktio_capa.maxlen.max_output)) {
+ ODPH_ERR("Error (%s): unsupported MTU value %" PRIu32 " "
+ "(min %" PRIu32 ", max %" PRIu32 ")\n", name,
+ maxlen_output, pktio_capa.maxlen.min_output,
+ pktio_capa.maxlen.max_output);
+ return -1;
+ }
+
+ if (odp_pktio_maxlen_set(pktio, maxlen_input, maxlen_output)) {
+ ODPH_ERR("Error (%s): setting MTU failed\n", name);
+ return -1;
+ }
+ }
+
+ if (test_options->tx_mode == TX_MODE_DF && pktio_capa.free_ctrl.dont_free == 0) {
+ ODPH_ERR("Error (%s): Don't free mode not supported\n", name);
+ return -1;
+ }
+
+ odp_pktio_config_init(&pktio_config);
+ pktio_config.parser.layer = ODP_PROTO_LAYER_ALL;
+
+ odp_pktio_config(pktio, &pktio_config);
+
+ if (test_options->promisc_mode && odp_pktio_promisc_mode(pktio) != 1) {
+ if (!pktio_capa.set_op.op.promisc_mode) {
+ ODPH_ERR("Error (%s): promisc mode set not supported\n", name);
+ return -1;
+ }
+
+ if (odp_pktio_promisc_mode_set(pktio, true)) {
+ ODPH_ERR("Error (%s): promisc mode enable failed\n", name);
+ return -1;
+ }
+ }
+
+ odp_pktin_queue_param_init(&pktin_param);
+
+ if (test_options->direct_rx) {
+ pktin_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ } else {
+ pktin_param.queue_param.sched.prio = odp_schedule_default_prio();
+ pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ pktin_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ }
+
+ pktin_param.num_queues = num_rx;
+
+ if (num_rx > 1) {
+ pktin_param.hash_enable = 1;
+ pktin_param.hash_proto.proto.ipv4_udp = 1;
+ }
+
+ if (odp_pktin_queue_config(pktio, &pktin_param)) {
+ ODPH_ERR("Error (%s): Pktin config failed.\n", name);
+ return -1;
+ }
+
+ odp_pktout_queue_param_init(&pktout_param);
+ pktout_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ pktout_param.num_queues = num_tx;
+
+ if (odp_pktout_queue_config(pktio, &pktout_param)) {
+ ODPH_ERR("Error (%s): Pktout config failed.\n", name);
+ return -1;
+ }
+
+ if (odp_pktout_queue(pktio, pktout, num_tx) != num_tx) {
+ ODPH_ERR("Error (%s): Pktout queue request failed.\n", name);
+ return -1;
+ }
+
+ for (j = 0; j < num_tx; j++)
+ global->pktio[i].pktout[j] = pktout[j];
+
+ if (test_options->direct_rx) {
+ if (odp_pktin_queue(pktio, pktin, num_rx) != num_rx) {
+ ODPH_ERR("Error (%s): Pktin queue request failed.\n", name);
+ return -1;
+ }
+
+ for (j = 0; j < num_rx; j++)
+ global->pktio[i].pktin[j] = pktin[j];
+ }
+ }
+
+ return 0;
+}
+
+static int print_link_info(odp_pktio_t pktio)
+{
+ odp_pktio_link_info_t info;
+
+ if (odp_pktio_link_info(pktio, &info)) {
+ ODPH_ERR("Error: Pktio link info failed.\n");
+ return -1;
+ }
+
+ printf(" autoneg %s\n",
+ (info.autoneg == ODP_PKTIO_LINK_AUTONEG_ON ? "on" :
+ (info.autoneg == ODP_PKTIO_LINK_AUTONEG_OFF ? "off" : "unknown")));
+ printf(" duplex %s\n",
+ (info.duplex == ODP_PKTIO_LINK_DUPLEX_HALF ? "half" :
+ (info.duplex == ODP_PKTIO_LINK_DUPLEX_FULL ? "full" : "unknown")));
+ printf(" media %s\n", info.media);
+ printf(" pause_rx %s\n",
+ (info.pause_rx == ODP_PKTIO_LINK_PAUSE_ON ? "on" :
+ (info.pause_rx == ODP_PKTIO_LINK_PAUSE_OFF ? "off" : "unknown")));
+ printf(" pause_tx %s\n",
+ (info.pause_tx == ODP_PKTIO_LINK_PAUSE_ON ? "on" :
+ (info.pause_tx == ODP_PKTIO_LINK_PAUSE_OFF ? "off" : "unknown")));
+ printf(" speed(Mbit/s) %" PRIu32 "\n\n", info.speed);
+
+ return 0;
+}
+static int start_pktios(test_global_t *global)
+{
+ uint32_t i;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_pktio = test_options->num_pktio;
+ uint32_t link_wait = 0;
+
+ for (i = 0; i < num_pktio; i++) {
+ if (odp_pktio_start(global->pktio[i].pktio)) {
+ ODPH_ERR("Error (%s): Pktio start failed.\n", test_options->pktio_name[i]);
+
+ return -1;
+ }
+
+ global->pktio[i].started = 1;
+ }
+
+ /* Wait until all links are up */
+ for (i = 0; test_options->wait_sec && i < num_pktio; i++) {
+ while (1) {
+ odp_pktio_t pktio = global->pktio[i].pktio;
+
+ if (odp_pktio_link_status(pktio) == ODP_PKTIO_LINK_STATUS_UP) {
+ printf("pktio:%s\n", test_options->pktio_name[i]);
+ if (print_link_info(pktio)) {
+ ODPH_ERR("Error (%s): Printing link info failed.\n",
+ test_options->pktio_name[i]);
+ return -1;
+ }
+ break;
+ }
+ link_wait++;
+ if (link_wait > test_options->wait_sec) {
+ ODPH_ERR("Error (%s): Pktio link down.\n",
+ test_options->pktio_name[i]);
+ return -1;
+ }
+ odp_time_wait_ns(ODP_TIME_SEC_IN_NS);
+ }
+ }
+
+ if (test_options->wait_start_sec)
+ odp_time_wait_ns(test_options->wait_start_sec * ODP_TIME_SEC_IN_NS);
+
+ return 0;
+}
+
+static int stop_pktios(test_global_t *global)
+{
+ uint32_t i;
+ odp_pktio_t pktio;
+ int ret = 0;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_pktio = test_options->num_pktio;
+
+ for (i = 0; i < num_pktio; i++) {
+ pktio = global->pktio[i].pktio;
+
+ if (pktio == ODP_PKTIO_INVALID || global->pktio[i].started == 0)
+ continue;
+
+ if (odp_pktio_stop(pktio)) {
+ ODPH_ERR("Error (%s): Pktio stop failed.\n", test_options->pktio_name[i]);
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+static int close_pktios(test_global_t *global)
+{
+ uint32_t i;
+ odp_pktio_t pktio;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_pktio = test_options->num_pktio;
+ int ret = 0;
+
+ for (i = 0; i < num_pktio; i++) {
+ pktio = global->pktio[i].pktio;
+
+ if (pktio == ODP_PKTIO_INVALID)
+ continue;
+
+ if (odp_pktio_close(pktio)) {
+ ODPH_ERR("Error (%s): Pktio close failed.\n", test_options->pktio_name[i]);
+ ret = -1;
+ }
+ }
+
+ if (global->pool != ODP_POOL_INVALID &&
+ odp_pool_destroy(global->pool)) {
+ ODPH_ERR("Error: Pool destroy failed.\n");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static inline void get_timestamp(odp_packet_t pkt, uint32_t ts_off, rx_lat_data_t *lat_data,
+ uint64_t rx_ts)
+{
+ ts_data_t ts_data;
+ uint64_t nsec;
+
+ if (odp_unlikely(odp_packet_copy_to_mem(pkt, ts_off, sizeof(ts_data), &ts_data) < 0 ||
+ ts_data.magic != TS_MAGIC))
+ return;
+
+ nsec = rx_ts - ts_data.tx_ts;
+
+ if (nsec < lat_data->min)
+ lat_data->min = nsec;
+
+ if (nsec > lat_data->max)
+ lat_data->max = nsec;
+
+ lat_data->nsec += nsec;
+ lat_data->packets++;
+}
+
+static int rx_thread(void *arg)
+{
+ int i, thr, num;
+ uint32_t exit_test;
+ uint64_t bytes;
+ odp_time_t t1, t2, exit_time;
+ thread_arg_t *thread_arg = arg;
+ test_global_t *global = thread_arg->global;
+ int direct_rx = global->test_options.direct_rx;
+ int periodic_stat = global->test_options.update_msec ? 1 : 0;
+ uint64_t rx_timeouts = 0;
+ uint64_t rx_packets = 0;
+ uint64_t rx_bytes = 0;
+ uint64_t nsec = 0;
+ int ret = 0;
+ int clock_started = 0;
+ int exit_timer_started = 0;
+ int paused = 0;
+ const int max_num = 32;
+ int pktin = 0;
+ int num_pktio = global->test_options.num_pktio;
+ odp_pktin_queue_t pktin_queue[num_pktio];
+ odp_packet_t pkt[max_num];
+ uint32_t ts_off = global->test_options.calc_latency ? global->test_options.hdr_len : 0;
+ uint64_t rx_ts = 0;
+ rx_lat_data_t rx_lat_data = { .nsec = 0, .min = UINT64_MAX, .max = 0, .packets = 0 };
+
+ thr = odp_thread_id();
+ global->stat[thr].thread_type = RX_THREAD;
+
+ if (direct_rx) {
+ for (i = 0; i < num_pktio; i++)
+ pktin_queue[i] = thread_arg->pktin[i];
+ }
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ while (1) {
+ if (direct_rx) {
+ num = odp_pktin_recv(pktin_queue[pktin], pkt, max_num);
+
+ if (odp_unlikely(num < 0)) {
+ ODPH_ERR("pktin (%i) recv failed: %i\n", pktin, num);
+ ret = -1;
+ num = 0;
+ break;
+ }
+
+ pktin++;
+ if (pktin >= num_pktio)
+ pktin = 0;
+ } else {
+ odp_event_t ev[max_num];
+
+ num = odp_schedule_multi_no_wait(NULL, ev, max_num);
+
+ if (num)
+ odp_packet_from_event_multi(pkt, ev, num);
+ }
+
+ if (ts_off && num)
+ rx_ts = odp_time_global_ns();
+
+ exit_test = odp_atomic_load_u32(&global->exit_test);
+ if (exit_test) {
+ /* Wait 1 second for possible in flight packets sent by the tx threads */
+ if (exit_timer_started == 0) {
+ exit_time = odp_time_local();
+ t2 = exit_time;
+ exit_timer_started = 1;
+ } else if (odp_time_diff_ns(odp_time_local(), exit_time) >
+ ODP_TIME_SEC_IN_NS) {
+ if (direct_rx == 0 && paused == 0) {
+ odp_schedule_pause();
+ paused = 1;
+ } else if (num == 0) {
+ /* Exit main loop after (schedule paused and) no more
+ * packets received */
+ break;
+ }
+ }
+ /* Use last received packet as stop time and don't increase rx_timeouts
+ * counter since tx threads have already been stopped */
+ if (num)
+ t2 = odp_time_local();
+ else
+ continue;
+ }
+
+ if (num == 0) {
+ if (direct_rx == 0)
+ rx_timeouts++;
+
+ continue;
+ }
+
+ if (!clock_started) {
+ t1 = odp_time_local();
+ clock_started = 1;
+ }
+
+ bytes = 0;
+ for (i = 0; i < num; i++) {
+ bytes += odp_packet_len(pkt[i]);
+
+ if (ts_off)
+ get_timestamp(pkt[i], ts_off, &rx_lat_data, rx_ts);
+ }
+
+ rx_packets += num;
+ rx_bytes += bytes;
+
+ if (odp_unlikely(periodic_stat)) {
+ /* All packets from the same queue are from the same pktio interface */
+ int index = odp_packet_input_index(pkt[0]);
+
+ if (index >= 0) {
+ int if_idx = global->if_from_pktio_idx[index];
+
+ global->stat[thr].pktio[if_idx].rx_packets += num;
+ }
+ }
+
+ odp_packet_free_multi(pkt, num);
+ }
+
+ if (clock_started)
+ nsec = odp_time_diff_ns(t2, t1);
+
+ /* Update stats*/
+ global->stat[thr].time_nsec = nsec;
+ global->stat[thr].rx_timeouts = rx_timeouts;
+ global->stat[thr].rx_packets = rx_packets;
+ global->stat[thr].rx_bytes = rx_bytes;
+ global->stat[thr].rx_lat_nsec = rx_lat_data.nsec;
+ global->stat[thr].rx_lat_min_nsec = rx_lat_data.min;
+ global->stat[thr].rx_lat_max_nsec = rx_lat_data.max;
+ global->stat[thr].rx_lat_packets = rx_lat_data.packets;
+
+ return ret;
+}
+
+static void drain_scheduler(test_global_t *global)
+{
+ odp_event_t ev;
+ uint64_t wait_time = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
+
+ while ((ev = odp_schedule(NULL, wait_time)) != ODP_EVENT_INVALID) {
+ global->drained++;
+ odp_event_free(ev);
+ }
+}
+
+static void drain_direct_input(test_global_t *global)
+{
+ odp_pktin_queue_t pktin;
+ odp_packet_t pkt;
+ int i, j;
+ int num_pktio = global->test_options.num_pktio;
+ int num_rx = global->test_options.num_rx;
+
+ for (i = 0; i < num_pktio; i++) {
+ for (j = 0; j < num_rx; j++) {
+ pktin = global->pktio[i].pktin[j];
+
+ while (odp_pktin_recv(pktin, &pkt, 1) == 1) {
+ global->drained++;
+ odp_packet_free(pkt);
+ }
+ }
+ }
+}
+
+static int init_packets(test_global_t *global, int pktio,
+ odp_packet_t packet[], uint32_t num, uint16_t seq)
+{
+ odp_packet_t pkt;
+ uint32_t i, j, pkt_len, seg_len, payload_len, l2_len;
+ void *data;
+ uint8_t *u8;
+ odph_ethhdr_t *eth;
+ odph_ipv4hdr_t *ip;
+ odph_udphdr_t *udp;
+ uint16_t tpid;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_vlan = test_options->num_vlan;
+ uint32_t hdr_len = test_options->hdr_len;
+ uint16_t udp_src = test_options->udp_src;
+ uint16_t udp_dst = test_options->udp_dst;
+ uint32_t udp_src_cnt = 0;
+ uint32_t udp_dst_cnt = 0;
+ odph_vlanhdr_t *vlan = NULL; /* Fixes bogus compiler warning */
+
+ if (num_vlan > MAX_VLANS)
+ num_vlan = MAX_VLANS;
+
+ for (i = 0; i < num; i++) {
+ pkt = packet[i];
+ pkt_len = odp_packet_len(pkt);
+ seg_len = odp_packet_seg_len(pkt);
+ data = odp_packet_data(pkt);
+ payload_len = pkt_len - hdr_len;
+
+ if (seg_len < hdr_len) {
+ ODPH_ERR("Error: First segment too short %u\n", seg_len);
+ return -1;
+ }
+
+ /* Ethernet */
+ eth = data;
+ memcpy(eth->dst.addr, global->pktio[pktio].eth_dst.addr, 6);
+ memcpy(eth->src.addr, global->pktio[pktio].eth_src.addr, 6);
+ eth->type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
+ l2_len = ODPH_ETHHDR_LEN;
+
+ /* VLAN(s) */
+ if (num_vlan) {
+ tpid = test_options->vlan[0].tpid;
+ eth->type = odp_cpu_to_be_16(tpid);
+ }
+
+ for (j = 0; j < num_vlan; j++) {
+ vlan = (odph_vlanhdr_t *)((uint8_t *)data + l2_len);
+ vlan->tci = odp_cpu_to_be_16(test_options->vlan[j].tci);
+ if (j < num_vlan - 1) {
+ tpid = test_options->vlan[j + 1].tpid;
+ vlan->type = odp_cpu_to_be_16(tpid);
+ }
+
+ l2_len += ODPH_VLANHDR_LEN;
+ }
+
+ if (num_vlan)
+ vlan->type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
+
+ /* IPv4 */
+ ip = (odph_ipv4hdr_t *)((uint8_t *)data + l2_len);
+ memset(ip, 0, ODPH_IPV4HDR_LEN);
+ ip->ver_ihl = ODPH_IPV4 << 4 | ODPH_IPV4HDR_IHL_MIN;
+ ip->tot_len = odp_cpu_to_be_16(pkt_len - l2_len);
+ ip->id = odp_cpu_to_be_16(seq + i);
+ ip->ttl = 64;
+ ip->proto = ODPH_IPPROTO_UDP;
+ ip->src_addr = odp_cpu_to_be_32(test_options->ipv4_src);
+ ip->dst_addr = odp_cpu_to_be_32(test_options->ipv4_dst);
+ ip->chksum = ~odp_chksum_ones_comp16(ip, ODPH_IPV4HDR_LEN);
+
+ /* UDP */
+ udp = (odph_udphdr_t *)((uint8_t *)data + l2_len +
+ ODPH_IPV4HDR_LEN);
+ memset(udp, 0, ODPH_UDPHDR_LEN);
+ udp->src_port = odp_cpu_to_be_16(udp_src);
+ udp->dst_port = odp_cpu_to_be_16(udp_dst);
+ udp->length = odp_cpu_to_be_16(payload_len + ODPH_UDPHDR_LEN);
+ udp->chksum = 0;
+
+ u8 = data;
+ u8 += hdr_len;
+
+ if (test_options->fill_pl) {
+ /* Init UDP payload until the end of the first segment */
+ for (j = 0; j < seg_len - hdr_len; j++)
+ u8[j] = j;
+ }
+
+ /* Insert UDP checksum */
+ odp_packet_l3_offset_set(pkt, l2_len);
+ odp_packet_l4_offset_set(pkt, l2_len + ODPH_IPV4HDR_LEN);
+ odp_packet_has_eth_set(pkt, 1);
+ odp_packet_has_ipv4_set(pkt, 1);
+ odp_packet_has_udp_set(pkt, 1);
+
+ udp->chksum = !test_options->calc_latency && test_options->calc_cs ?
+ odph_ipv4_udp_chksum(pkt) : 0;
+
+ /* Increment port numbers */
+ if (test_options->c_mode.udp_src) {
+ udp_src_cnt++;
+ if (udp_src_cnt < test_options->c_mode.udp_src) {
+ udp_src++;
+ } else {
+ udp_src = test_options->udp_src;
+ udp_src_cnt = 0;
+ }
+ }
+ if (test_options->c_mode.udp_dst) {
+ udp_dst_cnt++;
+ if (udp_dst_cnt < test_options->c_mode.udp_dst) {
+ udp_dst++;
+ } else {
+ udp_dst = test_options->udp_dst;
+ udp_dst_cnt = 0;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static inline int update_rand_data(uint8_t *data, uint32_t data_len)
+{
+ uint32_t generated = 0;
+ uint32_t retries = 0;
+
+ while (generated < data_len) {
+ int32_t ret = odp_random_data(data, data_len - generated, ODP_RANDOM_BASIC);
+
+ if (odp_unlikely(ret < 0)) {
+ ODPH_ERR("Error: odp_random_data() failed: %" PRId32 "\n", ret);
+ return -1;
+ } else if (odp_unlikely(ret == 0)) {
+ retries++;
+ if (odp_unlikely(retries > MAX_RAND_RETRIES)) {
+ ODPH_ERR("Error: Failed to create random data\n");
+ return -1;
+ }
+ continue;
+ }
+ data += ret;
+ generated += ret;
+ }
+ return 0;
+}
+
+static inline void set_timestamp(odp_packet_t pkt, uint32_t ts_off, odp_bool_t calc_cs)
+{
+ const ts_data_t ts_data = { .magic = TS_MAGIC, .tx_ts = odp_time_global_ns() };
+ odph_udphdr_t *udp = odp_packet_l4_ptr(pkt, NULL);
+
+ (void)odp_packet_copy_from_mem(pkt, ts_off, sizeof(ts_data), &ts_data);
+ udp->chksum = calc_cs ? odph_ipv4_udp_chksum(pkt) : 0;
+}
+
+static int alloc_packets(odp_pool_t pool, odp_packet_t *pkt_tbl, uint32_t num,
+ test_global_t *global)
+{
+ uint32_t i, pkt_len;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_bins = global->num_bins;
+
+ pkt_len = test_options->pkt_len;
+
+ for (i = 0; i < num; i++) {
+ if (num_bins)
+ pkt_len = global->len_bin[i % num_bins];
+
+ pkt_tbl[i] = odp_packet_alloc(pool, pkt_len);
+ if (pkt_tbl[i] == ODP_PACKET_INVALID) {
+ ODPH_ERR("Error: Alloc of %uB packet failed\n", pkt_len);
+ break;
+ }
+ }
+
+ if (i == 0)
+ return -1;
+
+ if (i != num) {
+ odp_packet_free_multi(pkt_tbl, i);
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline uint32_t form_burst(odp_packet_t out_pkt[], uint32_t burst_size, uint32_t num_bins,
+ uint32_t burst, odp_packet_t *pkt_tbl, odp_pool_t pool,
+ int tx_mode, uint32_t ts_off, odp_bool_t calc_cs,
+ uint64_t *total_bytes)
+{
+ uint32_t i, idx;
+ odp_packet_t pkt;
+ static __thread int rand_idx = RAND_16BIT_WORDS;
+ static __thread uint16_t rand_data[RAND_16BIT_WORDS];
+ uint64_t bytes = 0;
+
+ idx = burst * burst_size;
+ if (num_bins)
+ idx = burst * burst_size * num_bins;
+
+ for (i = 0; i < burst_size; i++) {
+ if (num_bins) {
+ uint32_t bin;
+
+ if (rand_idx >= RAND_16BIT_WORDS) {
+ if (odp_unlikely(update_rand_data((uint8_t *)rand_data,
+ RAND_16BIT_WORDS * 2)))
+ break;
+ rand_idx = 0;
+ }
+ /* Select random length bin */
+ bin = rand_data[rand_idx++] % num_bins;
+ pkt = pkt_tbl[idx + bin];
+ idx += num_bins;
+ } else {
+ pkt = pkt_tbl[idx];
+ idx++;
+ }
+
+ if (tx_mode == TX_MODE_DF) {
+ out_pkt[i] = pkt;
+ } else if (tx_mode == TX_MODE_REF) {
+ out_pkt[i] = odp_packet_ref_static(pkt);
+
+ if (odp_unlikely(out_pkt[i] == ODP_PACKET_INVALID))
+ break;
+ } else {
+ out_pkt[i] = odp_packet_copy(pkt, pool);
+
+ if (odp_unlikely(out_pkt[i] == ODP_PACKET_INVALID))
+ break;
+
+ if (ts_off)
+ set_timestamp(out_pkt[i], ts_off, calc_cs);
+ }
+
+ bytes += odp_packet_len(out_pkt[i]);
+ }
+
+ *total_bytes = bytes;
+
+ return i;
+}
+
+static inline uint32_t send_burst(odp_pktout_queue_t pktout, odp_packet_t pkt[],
+ uint32_t num, int tx_mode, uint64_t *drop_bytes)
+{
+ int ret;
+ uint32_t sent;
+ uint64_t bytes = 0;
+
+ ret = odp_pktout_send(pktout, pkt, num);
+
+ sent = ret;
+ if (odp_unlikely(ret < 0))
+ sent = 0;
+
+ if (odp_unlikely(sent != num)) {
+ uint32_t i;
+ uint32_t num_drop = num - sent;
+
+ for (i = sent; i < num; i++)
+ bytes += odp_packet_len(pkt[i]);
+
+ if (tx_mode != TX_MODE_DF)
+ odp_packet_free_multi(&pkt[sent], num_drop);
+ }
+
+ *drop_bytes = bytes;
+
+ return sent;
+}
+
+static int tx_thread(void *arg)
+{
+ int i, thr, tx_thr;
+ uint32_t exit_test, num_alloc, j;
+ odp_time_t t1, t2, next_tmo;
+ uint64_t diff_ns, t1_nsec;
+ odp_packet_t *pkt_tbl;
+ thread_arg_t *thread_arg = arg;
+ test_global_t *global = thread_arg->global;
+ test_options_t *test_options = &global->test_options;
+ int periodic_stat = test_options->update_msec ? 1 : 0;
+ odp_pool_t pool = global->pool;
+ uint64_t gap_nsec = test_options->gap_nsec;
+ uint64_t quit = test_options->quit;
+ uint64_t tx_timeouts = 0;
+ uint64_t tx_bytes = 0;
+ uint64_t tx_packets = 0;
+ uint64_t tx_drops = 0;
+ int ret = 0;
+ const uint32_t burst_size = test_options->burst_size;
+ const uint32_t bursts = test_options->bursts;
+ const uint32_t num_tx = test_options->num_tx;
+ const int tx_mode = test_options->tx_mode;
+ odp_bool_t calc_cs = test_options->calc_cs;
+ int num_pktio = test_options->num_pktio;
+ odp_pktout_queue_t pktout[num_pktio];
+ uint32_t ts_off = test_options->calc_latency ? test_options->hdr_len : 0;
+ uint32_t tot_packets = 0;
+ uint32_t num_bins = global->num_bins;
+ thr = odp_thread_id();
+ tx_thr = thread_arg->tx_thr;
+ global->stat[thr].thread_type = TX_THREAD;
+
+ num_alloc = global->num_tx_pkt;
+ if (num_bins)
+ num_alloc = global->num_tx_pkt * num_bins;
+
+ for (i = 0; i < num_pktio; i++) {
+ int seq = i * num_alloc;
+
+ pktout[i] = thread_arg->pktout[i];
+ pkt_tbl = thread_arg->packet[i];
+
+ if (alloc_packets(pool, pkt_tbl, num_alloc, global)) {
+ ret = -1;
+ break;
+ }
+
+ tot_packets += num_alloc;
+
+ if (init_packets(global, i, pkt_tbl, num_alloc, seq)) {
+ ret = -1;
+ break;
+ }
+
+ if (tx_mode == TX_MODE_DF) {
+ for (j = 0; j < num_alloc; j++)
+ odp_packet_free_ctrl_set(pkt_tbl[j],
+ ODP_PACKET_FREE_CTRL_DONT_FREE);
+ }
+ }
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ t1 = odp_time_local();
+
+ /* Start TX burst at different per thread offset */
+ t1_nsec = odp_time_to_ns(t1) + gap_nsec + (tx_thr * gap_nsec / num_tx);
+
+ while (ret == 0) {
+ exit_test = odp_atomic_load_u32(&global->exit_test);
+ if (exit_test)
+ break;
+
+ if (quit && tx_timeouts >= quit) {
+ odp_atomic_inc_u32(&global->exit_test);
+ break;
+ }
+
+ if (gap_nsec) {
+ uint64_t nsec = t1_nsec + tx_timeouts * gap_nsec;
+
+ next_tmo = odp_time_local_from_ns(nsec);
+ odp_time_wait_until(next_tmo);
+ }
+ tx_timeouts++;
+
+ /* Send bursts to each pktio */
+ for (i = 0; i < num_pktio; i++) {
+ uint32_t num, sent;
+ uint64_t total_bytes, drop_bytes;
+ odp_packet_t pkt[burst_size];
+
+ pkt_tbl = thread_arg->packet[i];
+
+ for (j = 0; j < bursts; j++) {
+ num = form_burst(pkt, burst_size, num_bins, j, pkt_tbl, pool,
+ tx_mode, ts_off, calc_cs, &total_bytes);
+
+ if (odp_unlikely(num == 0)) {
+ ret = -1;
+ tx_drops += burst_size;
+ break;
+ }
+
+ sent = send_burst(pktout[i], pkt, num, tx_mode, &drop_bytes);
+
+ if (odp_unlikely(sent == 0)) {
+ ret = -1;
+ tx_drops += burst_size;
+ break;
+ }
+
+ tx_bytes += total_bytes - drop_bytes;
+ tx_packets += sent;
+ if (odp_unlikely(sent < burst_size))
+ tx_drops += burst_size - sent;
+
+ if (odp_unlikely(periodic_stat))
+ global->stat[thr].pktio[i].tx_packets += sent;
+
+ }
+ }
+ }
+
+ t2 = odp_time_local();
+ diff_ns = odp_time_diff_ns(t2, t1);
+
+ for (i = 0; i < num_pktio; i++) {
+ pkt_tbl = thread_arg->packet[i];
+
+ if (tot_packets == 0)
+ break;
+
+ odp_packet_free_multi(pkt_tbl, num_alloc);
+ tot_packets -= num_alloc;
+ }
+
+ /* Update stats */
+ global->stat[thr].time_nsec = diff_ns;
+ global->stat[thr].tx_timeouts = tx_timeouts;
+ global->stat[thr].tx_bytes = tx_bytes;
+ global->stat[thr].tx_packets = tx_packets;
+ global->stat[thr].tx_drops = tx_drops;
+
+ return ret;
+}
+
+static int start_workers(test_global_t *global, odp_instance_t instance)
+{
+ odph_thread_common_param_t thr_common;
+ int i, j, ret, tx_thr;
+ test_options_t *test_options = &global->test_options;
+ int num_pktio = test_options->num_pktio;
+ int num_rx = test_options->num_rx;
+ int num_cpu = test_options->num_cpu;
+ odph_thread_param_t thr_param[num_cpu];
+
+ memset(global->thread_tbl, 0, sizeof(global->thread_tbl));
+ odph_thread_common_param_init(&thr_common);
+
+ thr_common.instance = instance;
+ thr_common.cpumask = &global->cpumask;
+
+ /* Receive threads */
+ for (i = 0; i < num_rx; i++) {
+ /* In direct mode, dedicate a pktin queue per pktio interface (per RX thread) */
+ for (j = 0; test_options->direct_rx && j < num_pktio; j++)
+ global->thread_arg[i].pktin[j] = global->pktio[j].pktin[i];
+
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = rx_thread;
+ thr_param[i].arg = &global->thread_arg[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ }
+
+ /* Transmit threads */
+ tx_thr = 0;
+ for (i = num_rx; i < num_cpu; i++) {
+ for (j = 0; j < num_pktio; j++) {
+ odp_pktout_queue_t pktout;
+
+ global->thread_arg[i].tx_thr = tx_thr;
+
+ /* Dedicate a pktout queue per pktio interface
+ * (per TX thread) */
+ pktout = global->pktio[j].pktout[tx_thr];
+ global->thread_arg[i].pktout[j] = pktout;
+ }
+
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = tx_thread;
+ thr_param[i].arg = &global->thread_arg[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ tx_thr++;
+ }
+
+ ret = odph_thread_create(global->thread_tbl, &thr_common, thr_param,
+ num_cpu);
+
+ if (ret != num_cpu) {
+ ODPH_ERR("Error: thread create failed %i\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void print_periodic_stat(test_global_t *global, uint64_t nsec)
+{
+ int i, j;
+ int num_pktio = global->test_options.num_pktio;
+ double sec = nsec / 1000000000.0;
+ uint64_t num_tx[num_pktio];
+ uint64_t num_rx[num_pktio];
+
+ for (i = 0; i < num_pktio; i++) {
+ num_tx[i] = 0;
+ num_rx[i] = 0;
+
+ for (j = 0; j < MAX_THREADS; j++) {
+ if (global->stat[j].thread_type == RX_THREAD)
+ num_rx[i] += global->stat[j].pktio[i].rx_packets;
+ else if (global->stat[j].thread_type == TX_THREAD)
+ num_tx[i] += global->stat[j].pktio[i].tx_packets;
+ }
+ }
+
+ printf(" TX: %12.6fs", sec);
+ for (i = 0; i < num_pktio; i++)
+ printf(" %10" PRIu64 "", num_tx[i]);
+
+ printf("\n RX: %12.6fs", sec);
+ for (i = 0; i < num_pktio; i++)
+ printf(" %10" PRIu64 "", num_rx[i]);
+
+ printf("\n");
+}
+
+static void periodic_print_loop(test_global_t *global)
+{
+ odp_time_t t1, t2;
+ uint64_t nsec;
+ int i;
+ int num_pktio = global->test_options.num_pktio;
+
+ printf("\n\nPackets per interface\n");
+ printf(" Dir Time");
+ for (i = 0; i < num_pktio; i++)
+ printf(" %10i", i);
+
+ printf("\n -----------------");
+ for (i = 0; i < num_pktio; i++)
+ printf("-----------");
+
+ printf("\n");
+
+ t1 = odp_time_local();
+ while (odp_atomic_load_u32(&global->exit_test) == 0) {
+ usleep(1000 * global->test_options.update_msec);
+ t2 = odp_time_local();
+ nsec = odp_time_diff_ns(t2, t1);
+ print_periodic_stat(global, nsec);
+ }
+}
+
+static void print_humanised_time(double time_nsec)
+{
+ if (time_nsec > ODP_TIME_SEC_IN_NS)
+ printf("%.2f s\n", time_nsec / ODP_TIME_SEC_IN_NS);
+ else if (time_nsec > ODP_TIME_MSEC_IN_NS)
+ printf("%.2f ms\n", time_nsec / ODP_TIME_MSEC_IN_NS);
+ else if (time_nsec > ODP_TIME_USEC_IN_NS)
+ printf("%.2f us\n", time_nsec / ODP_TIME_USEC_IN_NS);
+ else
+ printf("%.0f ns\n", time_nsec);
+}
+
+static void print_humanised_latency(double lat_nsec, double lat_min_nsec, double lat_max_nsec)
+{
+ printf(" rx ave packet latency: ");
+ print_humanised_time(lat_nsec);
+ printf(" rx min packet latency: ");
+ print_humanised_time(lat_min_nsec);
+ printf(" rx max packet latency: ");
+ print_humanised_time(lat_max_nsec);
+}
+
+static int print_final_stat(test_global_t *global)
+{
+ int i, num_thr;
+ double rx_pkt_ave, rx_mbit_per_sec, tx_mbit_per_sec;
+ test_options_t *test_options = &global->test_options;
+ int num_rx = test_options->num_rx;
+ int num_tx = test_options->num_tx;
+ uint64_t rx_nsec_sum = 0;
+ uint64_t rx_pkt_sum = 0;
+ uint64_t rx_byte_sum = 0;
+ uint64_t rx_tmo_sum = 0;
+ uint64_t rx_lat_nsec_sum = 0;
+ uint64_t rx_lat_min_nsec = UINT64_MAX;
+ uint64_t rx_lat_max_nsec = 0;
+ uint64_t rx_lat_pkt_sum = 0;
+ uint64_t tx_nsec_sum = 0;
+ uint64_t tx_pkt_sum = 0;
+ uint64_t tx_byte_sum = 0;
+ uint64_t tx_drop_sum = 0;
+ uint64_t tx_tmo_sum = 0;
+ double rx_pkt_per_sec = 0.0;
+ double rx_byte_per_sec = 0.0;
+ double rx_pkt_len = 0.0;
+ double rx_sec = 0.0;
+ double rx_ave_lat_nsec = 0.0;
+ double tx_pkt_per_sec = 0.0;
+ double tx_byte_per_sec = 0.0;
+ double tx_sec = 0.0;
+
+ printf("\nRESULTS PER THREAD\n");
+ printf(" rx thread:\n");
+ printf(" 1 2 3 4 5 6 7 8\n");
+ printf(" ---------------------------------------------------------------------------------------\n");
+ printf(" ");
+
+ num_thr = 0;
+ for (i = 0; i < MAX_THREADS; i++) {
+ if (global->stat[i].thread_type != RX_THREAD)
+ continue;
+
+ if (num_thr && (num_thr % 8) == 0)
+ printf("\n ");
+
+ printf("%10" PRIu64 " ", global->stat[i].rx_packets);
+ num_thr++;
+ }
+
+ printf("\n\n");
+
+ printf(" tx thread:\n");
+ printf(" 1 2 3 4 5 6 7 8\n");
+ printf(" ---------------------------------------------------------------------------------------\n");
+ printf(" ");
+
+ num_thr = 0;
+ for (i = 0; i < MAX_THREADS; i++) {
+ if (global->stat[i].thread_type != TX_THREAD)
+ continue;
+
+ if (num_thr && (num_thr % 8) == 0)
+ printf("\n ");
+
+ printf("%10" PRIu64 " ", global->stat[i].tx_packets);
+ num_thr++;
+ }
+
+ printf("\n\n");
+
+ for (i = 0; i < MAX_THREADS; i++) {
+ if (global->stat[i].thread_type == RX_THREAD) {
+ rx_tmo_sum += global->stat[i].rx_timeouts;
+ rx_pkt_sum += global->stat[i].rx_packets;
+ rx_byte_sum += global->stat[i].rx_bytes;
+ rx_nsec_sum += global->stat[i].time_nsec;
+ rx_lat_nsec_sum += global->stat[i].rx_lat_nsec;
+ rx_lat_pkt_sum += global->stat[i].rx_lat_packets;
+
+ if (global->stat[i].rx_lat_min_nsec < rx_lat_min_nsec)
+ rx_lat_min_nsec = global->stat[i].rx_lat_min_nsec;
+
+ if (global->stat[i].rx_lat_max_nsec > rx_lat_max_nsec)
+ rx_lat_max_nsec = global->stat[i].rx_lat_max_nsec;
+ } else if (global->stat[i].thread_type == TX_THREAD) {
+ tx_tmo_sum += global->stat[i].tx_timeouts;
+ tx_pkt_sum += global->stat[i].tx_packets;
+ tx_byte_sum += global->stat[i].tx_bytes;
+ tx_drop_sum += global->stat[i].tx_drops;
+ tx_nsec_sum += global->stat[i].time_nsec;
+ }
+ }
+
+ rx_pkt_ave = (double)rx_pkt_sum / num_rx;
+ rx_sec = rx_nsec_sum / 1000000000.0;
+ tx_sec = tx_nsec_sum / 1000000000.0;
+
+ /* Packets and bytes per thread per sec */
+ if (rx_nsec_sum) {
+ rx_pkt_per_sec = (1000000000.0 * (double)rx_pkt_sum) /
+ (double)rx_nsec_sum;
+
+ rx_byte_per_sec = 1000000000.0;
+ rx_byte_per_sec *= (rx_byte_sum + 24 * rx_pkt_sum);
+ rx_byte_per_sec /= (double)rx_nsec_sum;
+ }
+
+ if (tx_nsec_sum) {
+ tx_pkt_per_sec = (1000000000.0 * (double)tx_pkt_sum) /
+ (double)tx_nsec_sum;
+
+ tx_byte_per_sec = 1000000000.0;
+ tx_byte_per_sec *= (tx_byte_sum + 24 * tx_pkt_sum);
+ tx_byte_per_sec /= (double)tx_nsec_sum;
+ }
+
+ /* Total Mbit/s */
+ rx_mbit_per_sec = (num_rx * 8 * rx_byte_per_sec) / 1000000.0;
+ tx_mbit_per_sec = (num_tx * 8 * tx_byte_per_sec) / 1000000.0;
+
+ if (rx_pkt_sum)
+ rx_pkt_len = (double)rx_byte_sum / rx_pkt_sum;
+
+ if (rx_lat_pkt_sum)
+ rx_ave_lat_nsec = (double)rx_lat_nsec_sum / rx_lat_pkt_sum;
+
+ printf("TOTAL (%i rx and %i tx threads)\n", num_rx, num_tx);
+ printf(" rx timeouts: %" PRIu64 "\n", rx_tmo_sum);
+ printf(" rx time spent (sec): %.3f\n", rx_sec);
+ printf(" rx packets: %" PRIu64 "\n", rx_pkt_sum);
+ printf(" rx packets drained: %" PRIu64 "\n", global->drained);
+ printf(" rx packets per thr: %.1f\n", rx_pkt_ave);
+ printf(" rx packets per thr per sec: %.1f\n", rx_pkt_per_sec);
+ printf(" rx packets per sec: %.1f\n", num_rx * rx_pkt_per_sec);
+ printf(" rx ave packet len: %.1f\n", rx_pkt_len);
+
+ if (rx_lat_pkt_sum)
+ print_humanised_latency(rx_ave_lat_nsec, rx_lat_min_nsec, rx_lat_max_nsec);
+
+ printf(" rx Mbit/s: %.1f\n", rx_mbit_per_sec);
+ printf("\n");
+ printf(" tx timeouts: %" PRIu64 "\n", tx_tmo_sum);
+ printf(" tx time spent (sec): %.3f\n", tx_sec);
+ printf(" tx packets: %" PRIu64 "\n", tx_pkt_sum);
+ printf(" tx dropped packets: %" PRIu64 "\n", tx_drop_sum);
+ printf(" tx packets per thr per sec: %.1f\n", tx_pkt_per_sec);
+ printf(" tx packets per sec: %.1f\n", num_tx * tx_pkt_per_sec);
+ printf(" tx Mbit/s: %.1f\n", tx_mbit_per_sec);
+ printf("\n");
+
+ if (rx_pkt_sum < MIN_RX_PACKETS_CI)
+ return -1;
+
+ return 0;
+}
+
+static void sig_handler(int signo)
+{
+ (void)signo;
+
+ if (test_global == NULL)
+ return;
+
+ odp_atomic_add_u32(&test_global->exit_test, 1);
+}
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t helper_options;
+ odp_instance_t instance;
+ odp_init_t init;
+ test_global_t *global;
+ odp_shm_t shm;
+ int i;
+ int ret = 0;
+
+ signal(SIGINT, sig_handler);
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* List features not to be used */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ init.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("Error: Global init failed.\n");
+ return 1;
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Error: Local init failed.\n");
+ return 1;
+ }
+
+ shm = odp_shm_reserve("packet_gen_global", sizeof(test_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Error: SHM reserve failed.\n");
+ return 1;
+ }
+
+ global = odp_shm_addr(shm);
+ test_global = global;
+
+ memset(global, 0, sizeof(test_global_t));
+ odp_atomic_init_u32(&global->exit_test, 0);
+
+ for (i = 0; i < MAX_THREADS; i++)
+ global->thread_arg[i].global = global;
+
+ if (parse_options(argc, argv, global)) {
+ ret = 1;
+ goto term;
+ }
+
+ odp_sys_info_print();
+
+ /* Avoid all scheduler API calls in direct input mode */
+ if (global->test_options.direct_rx == 0)
+ odp_schedule_config(NULL);
+
+ if (set_num_cpu(global)) {
+ ret = 1;
+ goto term;
+ }
+
+ if (open_pktios(global)) {
+ ret = 1;
+ goto term;
+ }
+
+ if (start_pktios(global)) {
+ ret = 1;
+ goto term;
+ }
+
+ /* Start worker threads */
+ start_workers(global, instance);
+
+ /* Wait until workers have started. */
+ odp_barrier_wait(&global->barrier);
+
+ /* Periodic statistics printing */
+ if (global->test_options.update_msec)
+ periodic_print_loop(global);
+
+ /* Wait workers to exit */
+ odph_thread_join(global->thread_tbl,
+ global->test_options.num_cpu);
+
+ if (stop_pktios(global))
+ ret = 1;
+
+ if (global->test_options.direct_rx)
+ drain_direct_input(global);
+ else
+ drain_scheduler(global);
+
+ if (close_pktios(global))
+ ret = 1;
+
+ if (print_final_stat(global))
+ ret = 2;
+
+term:
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Error: SHM free failed.\n");
+ return 1;
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Error: term local failed.\n");
+ return 1;
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Error: term global failed.\n");
+ return 1;
+ }
+
+ return ret;
+}
diff --git a/test/performance/odp_packet_gen_run.sh b/test/performance/odp_packet_gen_run.sh
new file mode 100755
index 000000000..af272f619
--- /dev/null
+++ b/test/performance/odp_packet_gen_run.sh
@@ -0,0 +1,88 @@
+#!/bin/sh
+#
+# Copyright (c) 2020, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# directory where test binaries have been built
+TEST_DIR="${TEST_DIR:-$PWD}"
+
+# directory where test sources are, including scripts
+TEST_SRC_DIR=$(dirname $0)
+
+PATH=$TEST_DIR:$PATH
+
+# exit codes expected by automake for skipped tests
+TEST_SKIPPED=77
+
+VALIDATION_TESTDIR=platform/$ODP_PLATFORM/test/validation
+PLATFORM_VALIDATION=${TEST_SRC_DIR}/../../$VALIDATION_TESTDIR
+
+# Use installed pktio env or for make check take it from platform directory
+if [ -f "./pktio_env" ]; then
+ . ./pktio_env
+elif [ "$ODP_PLATFORM" = "" ]; then
+ echo "$0: error: ODP_PLATFORM must be defined"
+ # not skipped as this should never happen via "make check"
+ exit 1
+elif [ -f ${PLATFORM_VALIDATION}/api/pktio/pktio_env ]; then
+ . ${PLATFORM_VALIDATION}/api/pktio/pktio_env
+else
+ echo "BUG: unable to find pktio_env!"
+ echo "pktio_env has to be in current directory or "
+ echo "in platform/\$ODP_PLATFORM/test."
+ echo "ODP_PLATFORM=\"$ODP_PLATFORM\""
+ exit 1
+fi
+
+run_packet_gen()
+{
+ setup_pktio_env clean # install trap to call cleanup_pktio_env
+
+ if [ $? -ne 0 ]; then
+ echo "setup_pktio_env error $?"
+ exit $TEST_SKIPPED
+ fi
+
+ export ODP_PLATFORM_PARAMS="--no-pci \
+--vdev net_pcap1,iface=$IF0 --vdev net_pcap2,iface=$IF1"
+
+ # Runs 500 * 10ms = 5 sec
+ # Sends 500 packets through both interfaces => total 1000 packets
+
+ # Static packet length
+ odp_packet_gen${EXEEXT} -i 0,1 -b 1 -g 10000000 -q 500 -w 10
+ ret=$?
+
+ if [ $ret -eq 2 ]; then
+ echo "FAIL: too few packets received"
+ fi
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: test failed: $ret"
+ cleanup_pktio_env
+ exit $ret
+ fi
+
+ # Random packet length
+ odp_packet_gen${EXEEXT} -i 0,1 -b 1 -g 10000000 -q 500 -L 60,1514,10 -w 10
+ ret=$?
+
+ if [ $ret -eq 2 ]; then
+ echo "FAIL: too few packets received"
+ fi
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: test failed: $ret"
+ fi
+
+ cleanup_pktio_env
+
+ exit $ret
+}
+
+case "$1" in
+ setup) setup_pktio_env ;;
+ cleanup) cleanup_pktio_env ;;
+ *) run_packet_gen ;;
+esac
diff --git a/test/common_plat/performance/odp_pktio_ordered.c b/test/performance/odp_pktio_ordered.c
index bff4586e5..6177a8160 100644
--- a/test/common_plat/performance/odp_pktio_ordered.c
+++ b/test/performance/odp_pktio_ordered.c
@@ -1,13 +1,15 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**
- * @file
+ * @example odp_pktio_ordered.c
*
- * @example odp_pktio_ordered.c ODP ordered pktio test application
+ * Test application for ordered packet IO
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
*/
/** enable strtok */
@@ -21,14 +23,10 @@
#include <errno.h>
#include <inttypes.h>
-#include <test_debug.h>
-#include <dummy_crc.h>
+#include "dummy_crc.h"
#include <odp_api.h>
-#include <odp/helper/threads.h>
-#include <odp/helper/eth.h>
-#include <odp/helper/ip.h>
-#include <odp/helper/udp.h>
+#include <odp/helper/odph_api.h>
/** Jenkins hash support.
*
@@ -73,11 +71,11 @@
#define JHASH_GOLDEN_RATIO 0x9e3779b9
-/** Maximum number of worker threads */
-#define MAX_WORKERS 64
+/* Maximum pool and queue size */
+#define MAX_NUM_PKT (8 * 1024)
-/** Number of packet buffers in the memory pool */
-#define PKT_POOL_SIZE 8192
+/** Maximum number of worker threads */
+#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1)
/** Buffer size of the packet pool buffer in bytes*/
#define PKT_POOL_BUF_SIZE 1856
@@ -132,7 +130,7 @@ typedef enum pktin_mode_t {
* Parsed command line application arguments
*/
typedef struct {
- int cpu_count; /**< CPU count */
+ unsigned int cpu_count; /**< CPU count */
int if_count; /**< Number of interfaces to be used */
int addr_count; /**< Number of dst addresses to be used */
int num_rx_q; /**< Number of input queues per interface */
@@ -144,10 +142,9 @@ typedef struct {
int time; /**< Time in seconds to run. */
int accuracy; /**< Statistics print interval */
char *if_str; /**< Storage for interface names */
+ int promisc_mode; /**< Promiscuous mode enabled */
} appl_args_t;
-static int exit_threads; /**< Break workers loop if set to 1 */
-
/**
* Queue context
*/
@@ -174,7 +171,7 @@ ODP_STATIC_ASSERT(sizeof(flow_t) <= PKT_UAREA_SIZE,
/**
* Statistics
*/
-typedef union {
+typedef union ODP_ALIGNED_CACHE {
struct {
/** Number of forwarded packets */
uint64_t packets;
@@ -187,7 +184,7 @@ typedef union {
} s;
uint8_t padding[ODP_CACHE_LINE_SIZE];
-} stats_t ODP_ALIGNED_CACHE;
+} stats_t;
/**
* IPv4 5-tuple
@@ -248,14 +245,15 @@ typedef struct {
int num_rx_queue;
int num_tx_queue;
} pktios[MAX_PKTIOS];
+ /** Global barrier to synchronize main and workers */
+ odp_barrier_t barrier;
+ /** Break workers loop if set to 1 */
+ odp_atomic_u32_t exit_threads;
} args_t;
/** Global pointer to args */
static args_t *gbl_args;
-/** Global barrier to synchronize main and workers */
-static odp_barrier_t barrier;
-
/**
* Lookup the destination port for a given packet
*
@@ -274,7 +272,7 @@ static inline int lookup_dest_port(odp_packet_t pkt)
src_idx = i;
if (src_idx == -1)
- LOG_ABORT("Failed to determine pktio input\n");
+ ODPH_ABORT("Failed to determine pktio input\n");
return gbl_args->dst_port[src_idx];
}
@@ -502,7 +500,7 @@ static inline void process_input(odp_event_t ev_tbl[], int num, stats_t *stats,
ev_tbl[i]);
if (odp_unlikely(ret != 0)) {
- LOG_ERR("odp_queue_enq() failed\n");
+ ODPH_ERR("odp_queue_enq() failed\n");
stats->s.tx_drops++;
odp_event_free(ev_tbl[i]);
} else {
@@ -535,10 +533,10 @@ static int run_worker(void *arg)
gbl_args->pktios[i].num_tx_queue];
}
}
- odp_barrier_wait(&barrier);
+ odp_barrier_wait(&gbl_args->barrier);
/* Loop packets */
- while (!exit_threads) {
+ while (!odp_atomic_load_u32(&gbl_args->exit_threads)) {
pkts = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT, ev_tbl,
MAX_PKT_BURST);
if (pkts <= 0)
@@ -586,6 +584,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
odp_pktio_t pktio;
odp_pktio_param_t pktio_param;
odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
odp_pktin_queue_param_t pktin_param;
odp_pktout_queue_param_t pktout_param;
odp_pktio_op_mode_t mode_rx;
@@ -598,7 +597,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
pktio = odp_pktio_open(dev, pool, &pktio_param);
if (pktio == ODP_PKTIO_INVALID) {
- LOG_ERR("Error: failed to open %s\n", dev);
+ ODPH_ERR("Error: failed to open %s\n", dev);
return -1;
}
@@ -606,11 +605,29 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
odp_pktio_to_u64(pktio), dev);
if (odp_pktio_capability(pktio, &capa)) {
- LOG_ERR("Error: capability query failed %s\n", dev);
+ ODPH_ERR("Error: capability query failed %s\n", dev);
odp_pktio_close(pktio);
return -1;
}
+ odp_pktio_config_init(&config);
+ config.parser.layer = ODP_PROTO_LAYER_L2;
+ odp_pktio_config(pktio, &config);
+
+ if (gbl_args->appl.promisc_mode && odp_pktio_promisc_mode(pktio) != 1) {
+ if (!capa.set_op.op.promisc_mode) {
+ ODPH_ERR("Error: promisc mode set not supported %s\n",
+ dev);
+ return -1;
+ }
+
+ /* Enable promisc mode */
+ if (odp_pktio_promisc_mode_set(pktio, true)) {
+ ODPH_ERR("Error: promisc mode enable failed %s\n", dev);
+ return -1;
+ }
+ }
+
odp_pktin_queue_param_init(&pktin_param);
odp_pktout_queue_param_init(&pktout_param);
@@ -625,7 +642,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ORDERED;
pktin_param.queue_param.sched.lock_count = 1;
}
- pktin_param.queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ pktin_param.queue_param.sched.prio = odp_schedule_default_prio();
pktin_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
if (num_rx > (int)capa.max_input_queues) {
@@ -642,7 +659,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
mode_tx = ODP_PKTIO_OP_MT;
}
- pktin_param.hash_enable = 1;
+ pktin_param.hash_enable = (num_rx > 1) ? 1 : 0;
pktin_param.hash_proto.proto.ipv4_udp = 1;
pktin_param.num_queues = num_rx;
pktin_param.op_mode = mode_rx;
@@ -651,19 +668,18 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
pktout_param.num_queues = num_tx;
if (odp_pktin_queue_config(pktio, &pktin_param)) {
- LOG_ERR("Error: input queue config failed %s\n", dev);
+ ODPH_ERR("Error: input queue config failed %s\n", dev);
return -1;
}
if (odp_pktout_queue_config(pktio, &pktout_param)) {
- LOG_ERR("Error: output queue config failed %s\n", dev);
+ ODPH_ERR("Error: output queue config failed %s\n", dev);
return -1;
}
if (odp_pktin_event_queue(pktio, gbl_args->pktios[idx].pktin,
num_rx) != num_rx) {
- LOG_ERR("Error: pktin event queue query failed %s\n",
- dev);
+ ODPH_ERR("Error: pktin event queue query failed %s\n", dev);
return -1;
}
@@ -675,8 +691,8 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
if (odp_queue_context_set(gbl_args->pktios[idx].pktin[i],
&gbl_args->input_qcontext[idx][i],
sizeof(qcontext_t))) {
- LOG_ERR("Error: pktin queue context set failed %s\n",
- dev);
+ ODPH_ERR("Error: pktin queue context set failed %s\n",
+ dev);
return -1;
}
}
@@ -684,7 +700,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
if (odp_pktout_queue(pktio,
gbl_args->pktios[idx].pktout,
num_tx) != num_tx) {
- LOG_ERR("Error: pktout queue query failed %s\n", dev);
+ ODPH_ERR("Error: pktout queue query failed %s\n", dev);
return -1;
}
@@ -725,7 +741,7 @@ static int print_speed_stats(int num_workers, stats_t *thr_stats,
timeout = 1;
}
/* Wait for all threads to be ready*/
- odp_barrier_wait(&barrier);
+ odp_barrier_wait(&gbl_args->barrier);
do {
pkts = 0;
@@ -795,7 +811,7 @@ static void init_forwarding_tbl(void)
}
/**
- * Prinf usage information
+ * Print usage information
*/
static void usage(char *progname)
{
@@ -819,11 +835,12 @@ static void usage(char *progname)
" -r, --num_rx_q Number of RX queues per interface\n"
" -f, --num_flows Number of packet flows\n"
" -e, --extra_input <number> Number of extra input processing rounds\n"
- " -c, --count <number> CPU count.\n"
+ " -c, --count <number> CPU count, 0=all available, default=1\n"
" -t, --time <number> Time in seconds to run.\n"
" -a, --accuracy <number> Statistics print interval in seconds\n"
" (default is 1 second).\n"
" -d, --dst_addr Destination addresses (comma-separated, no spaces)\n"
+ " -P, --promisc_mode Enable promiscuous mode.\n"
" -h, --help Display help and exit.\n\n"
"\n", NO_PATH(progname), NO_PATH(progname), MAX_PKTIOS
);
@@ -854,22 +871,20 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
{"num_rx_q", required_argument, NULL, 'r'},
{"num_flows", required_argument, NULL, 'f'},
{"extra_input", required_argument, NULL, 'e'},
+ {"promisc_mode", no_argument, NULL, 'P'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:+t:+a:i:m:d:r:f:e:h";
-
- /* let helper collect its own arguments (e.g. --odph_proc) */
- odph_parse_options(argc, argv, shortopts, longopts);
+ static const char *shortopts = "+c:t:a:i:m:d:r:f:e:Ph";
appl_args->time = 0; /* loop forever if time to run is 0 */
appl_args->accuracy = DEF_STATS_INT;
+ appl_args->cpu_count = 1; /* use one worker by default */
appl_args->num_rx_q = DEF_NUM_RX_QUEUES;
appl_args->num_flows = DEF_NUM_FLOWS;
appl_args->extra_rounds = DEF_EXTRA_ROUNDS;
-
- opterr = 0; /* do not issue errors on helper options */
+ appl_args->promisc_mode = 0;
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -983,6 +998,9 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
case 'e':
appl_args->extra_rounds = atoi(optarg);
break;
+ case 'P':
+ appl_args->promisc_mode = 1;
+ break;
case 'h':
usage(argv[0]);
exit(EXIT_SUCCESS);
@@ -992,12 +1010,6 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
}
}
- if (appl_args->cpu_count > MAX_WORKERS) {
- printf("Too many workers requested %d, max: %d\n",
- appl_args->cpu_count, MAX_WORKERS);
- exit(EXIT_FAILURE);
- }
-
if (appl_args->num_flows > MAX_FLOWS) {
printf("Too many flows requested %d, max: %d\n",
appl_args->num_flows, MAX_FLOWS);
@@ -1027,28 +1039,26 @@ static void print_info(char *progname, appl_args_t *appl_args)
{
int i;
- printf("\n"
- "ODP system info\n"
- "---------------\n"
- "ODP API version: %s\n"
- "ODP impl name: %s\n"
- "CPU model: %s\n"
- "CPU freq (hz): %" PRIu64 "\n"
- "Cache line size: %i\n"
- "CPU count: %i\n"
- "\n",
- odp_version_api_str(), odp_version_impl_name(),
- odp_cpu_model_str(), odp_cpu_hz_max(),
- odp_sys_cache_line_size(), odp_cpu_count());
-
- printf("Running ODP appl: \"%s\"\n"
- "-----------------\n"
- "IF-count: %i\n"
- "Using IFs: ",
+ odp_sys_info_print();
+
+ printf("%s options\n"
+ "-------------------------\n"
+ "IF-count: %i\n"
+ "Using IFs: ",
progname, appl_args->if_count);
for (i = 0; i < appl_args->if_count; ++i)
printf(" %s", appl_args->if_names[i]);
- printf("\n\n");
+ printf("\n"
+ "Input queues: %d\n"
+ "Mode: %s\n"
+ "Flows: %d\n"
+ "Extra rounds: %d\n"
+ "Promisc mode: %s\n", appl_args->num_rx_q,
+ (appl_args->in_mode == SCHED_ATOMIC) ? "PKTIN_SCHED_ATOMIC" :
+ (appl_args->in_mode == SCHED_PARALLEL ? "PKTIN_SCHED_PARALLEL" :
+ "PKTIN_SCHED_ORDERED"), appl_args->num_flows,
+ appl_args->extra_rounds, appl_args->promisc_mode ?
+ "enabled" : "disabled");
fflush(NULL);
}
@@ -1057,6 +1067,7 @@ static void gbl_args_init(args_t *args)
int pktio, queue;
memset(args, 0, sizeof(args_t));
+ odp_atomic_init_u32(&args->exit_threads, 0);
for (pktio = 0; pktio < MAX_PKTIOS; pktio++) {
args->pktios[pktio].pktio = ODP_PKTIO_INVALID;
@@ -1073,30 +1084,55 @@ int main(int argc, char *argv[])
{
odp_cpumask_t cpumask;
odp_instance_t instance;
+ odp_init_t init_param;
odp_pool_t pool;
odp_pool_param_t params;
odp_shm_t shm;
- odp_queue_capability_t capa;
+ odp_schedule_capability_t schedule_capa;
+ odp_schedule_config_t schedule_config;
+ odp_pool_capability_t pool_capa;
odph_ethaddr_t new_addr;
- odph_odpthread_t thread_tbl[MAX_WORKERS];
+ odph_helper_options_t helper_options;
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[MAX_WORKERS];
stats_t *stats;
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
- int cpu;
int i, j;
int if_count;
int ret;
int num_workers;
- int in_mode;
+ uint32_t queue_size, pool_size;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
/* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
- LOG_ERR("Error: ODP global init failed.\n");
+ if (odp_init_global(&instance, &init_param, NULL)) {
+ ODPH_ERR("Error: ODP global init failed.\n");
exit(EXIT_FAILURE);
}
/* Init this thread */
if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
- LOG_ERR("Error: ODP local init failed.\n");
+ ODPH_ERR("Error: ODP local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_schedule_capability(&schedule_capa)) {
+ printf("Error: Schedule capa failed.\n");
+ return -1;
+ }
+
+ if (odp_pool_capability(&pool_capa)) {
+ ODPH_ERR("Error: Pool capa failed\n");
exit(EXIT_FAILURE);
}
@@ -1105,14 +1141,14 @@ int main(int argc, char *argv[])
ODP_CACHE_LINE_SIZE, 0);
if (shm == ODP_SHM_INVALID) {
- LOG_ERR("Error: shared mem reserve failed.\n");
+ ODPH_ERR("Error: shared mem reserve failed.\n");
exit(EXIT_FAILURE);
}
gbl_args = odp_shm_addr(shm);
if (gbl_args == NULL) {
- LOG_ERR("Error: shared mem alloc failed.\n");
+ ODPH_ERR("Error: shared mem alloc failed.\n");
odp_shm_free(shm);
exit(EXIT_FAILURE);
}
@@ -1121,20 +1157,21 @@ int main(int argc, char *argv[])
/* Parse and store the application arguments */
parse_args(argc, argv, &gbl_args->appl);
+ odp_schedule_config_init(&schedule_config);
+ odp_schedule_config(&schedule_config);
+
if (gbl_args->appl.in_mode == SCHED_ORDERED) {
/* At least one ordered lock required */
- odp_queue_capability(&capa);
- if (capa.max_ordered_locks < 1) {
- LOG_ERR("Error: Ordered locks not available.\n");
+ if (schedule_capa.max_ordered_locks < 1) {
+ ODPH_ERR("Error: Ordered locks not available.\n");
exit(EXIT_FAILURE);
}
}
/* Print both system and application information */
print_info(NO_PATH(argv[0]), &gbl_args->appl);
- /* Default to system CPU count unless user specified */
num_workers = MAX_WORKERS;
- if (gbl_args->appl.cpu_count)
+ if (gbl_args->appl.cpu_count && gbl_args->appl.cpu_count < MAX_WORKERS)
num_workers = gbl_args->appl.cpu_count;
/* Get default worker cpumask */
@@ -1147,18 +1184,32 @@ int main(int argc, char *argv[])
printf("First CPU: %i\n", odp_cpumask_first(&cpumask));
printf("CPU mask: %s\n\n", cpumaskstr);
+ pool_size = MAX_NUM_PKT;
+ if (pool_capa.pkt.max_num && pool_capa.pkt.max_num < MAX_NUM_PKT)
+ pool_size = pool_capa.pkt.max_num;
+
+ queue_size = MAX_NUM_PKT;
+ if (schedule_config.queue_size &&
+ schedule_config.queue_size < MAX_NUM_PKT)
+ queue_size = schedule_config.queue_size;
+
+ /* Pool should not be larger than queue, otherwise queue enqueues at
+ * packet input may fail. */
+ if (pool_size > queue_size)
+ pool_size = queue_size;
+
/* Create packet pool */
odp_pool_param_init(&params);
params.pkt.seg_len = PKT_POOL_BUF_SIZE;
params.pkt.len = PKT_POOL_BUF_SIZE;
- params.pkt.num = PKT_POOL_SIZE;
+ params.pkt.num = pool_size;
params.pkt.uarea_size = PKT_UAREA_SIZE;
params.type = ODP_POOL_PACKET;
pool = odp_pool_create("packet pool", &params);
if (pool == ODP_POOL_INVALID) {
- LOG_ERR("Error: packet pool create failed.\n");
+ ODPH_ERR("Error: packet pool create failed.\n");
exit(EXIT_FAILURE);
}
odp_pool_print(pool);
@@ -1179,7 +1230,7 @@ int main(int argc, char *argv[])
if (odp_pktio_mac_addr(gbl_args->pktios[i].pktio,
gbl_args->port_eth_addr[i].addr,
ODPH_ETHADDR_LEN) != ODPH_ETHADDR_LEN) {
- LOG_ERR("Error: interface ethernet address unknown\n");
+ ODPH_ERR("Error: interface ethernet address unknown\n");
exit(EXIT_FAILURE);
}
@@ -1205,11 +1256,11 @@ int main(int argc, char *argv[])
odp_pktio_capability_t capa;
if (odp_pktio_capability(gbl_args->pktios[i].pktio, &capa)) {
- LOG_ERR("Error: pktio capability failed.\n");
+ ODPH_ERR("Error: pktio capability failed.\n");
exit(EXIT_FAILURE);
}
- if ((unsigned)gbl_args->appl.num_flows > capa.max_output_queues)
+ if ((uint32_t)gbl_args->appl.num_flows > capa.max_output_queues)
gbl_args->appl.num_flows = capa.max_output_queues;
}
@@ -1224,9 +1275,10 @@ int main(int argc, char *argv[])
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qparam.sched.prio = odp_schedule_default_prio();
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
+ qparam.size = queue_size;
gbl_args->flow_qcontext[i][j].idx = i;
gbl_args->flow_qcontext[i][j].input_queue = 0;
@@ -1235,7 +1287,7 @@ int main(int argc, char *argv[])
queue = odp_queue_create(qname, &qparam);
if (queue == ODP_QUEUE_INVALID) {
- LOG_ERR("Error: flow queue create failed.\n");
+ ODPH_ERR("Error: flow queue create failed.\n");
exit(EXIT_FAILURE);
}
@@ -1243,45 +1295,28 @@ int main(int argc, char *argv[])
}
}
- in_mode = gbl_args->appl.in_mode;
- printf("\nApplication parameters\n"
- "----------------------\n"
- "Input queues: %d\n"
- "Mode: %s\n"
- "Flows: %d\n"
- "Extra rounds: %d\n\n", gbl_args->appl.num_rx_q,
- (in_mode == SCHED_ATOMIC) ? "PKTIN_SCHED_ATOMIC" :
- (in_mode == SCHED_PARALLEL ? "PKTIN_SCHED_PARALLEL" :
- "PKTIN_SCHED_ORDERED"), gbl_args->appl.num_flows,
- gbl_args->appl.extra_rounds);
-
memset(thread_tbl, 0, sizeof(thread_tbl));
stats = gbl_args->stats;
- odp_barrier_init(&barrier, num_workers + 1);
+ odp_barrier_init(&gbl_args->barrier, num_workers + 1);
/* Create worker threads */
- cpu = odp_cpumask_first(&cpumask);
- for (i = 0; i < num_workers; ++i) {
- odp_cpumask_t thd_mask;
- odph_odpthread_params_t thr_params;
-
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.start = run_worker;
- thr_params.arg = &gbl_args->thread[i];
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ for (i = 0; i < num_workers; ++i) {
gbl_args->thread[i].stats = &stats[i];
- odp_cpumask_zero(&thd_mask);
- odp_cpumask_set(&thd_mask, cpu);
- odph_odpthreads_create(&thread_tbl[i], &thd_mask,
- &thr_params);
- cpu = odp_cpumask_next(&cpumask, cpu);
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = run_worker;
+ thr_param[i].arg = &gbl_args->thread[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
}
+ odph_thread_create(thread_tbl, &thr_common, thr_param, num_workers);
+
/* Start packet receive and transmit */
for (i = 0; i < if_count; ++i) {
odp_pktio_t pktio;
@@ -1289,8 +1324,8 @@ int main(int argc, char *argv[])
pktio = gbl_args->pktios[i].pktio;
ret = odp_pktio_start(pktio);
if (ret) {
- LOG_ERR("Error: unable to start %s\n",
- gbl_args->appl.if_names[i]);
+ ODPH_ERR("Error: unable to start %s\n",
+ gbl_args->appl.if_names[i]);
exit(EXIT_FAILURE);
}
}
@@ -1302,11 +1337,10 @@ int main(int argc, char *argv[])
for (i = 0; i < if_count; i++)
odp_pktio_stop(gbl_args->pktios[i].pktio);
- exit_threads = 1;
+ odp_atomic_store_u32(&gbl_args->exit_threads, 1);
/* Master thread waits for other threads to exit */
- for (i = 0; i < num_workers; ++i)
- odph_odpthreads_join(&thread_tbl[i]);
+ odph_thread_join(thread_tbl, num_workers);
for (i = 0; i < if_count; i++) {
odp_pktio_close(gbl_args->pktios[i].pktio);
@@ -1319,22 +1353,22 @@ int main(int argc, char *argv[])
free(gbl_args->appl.if_str);
if (odp_pool_destroy(pool)) {
- LOG_ERR("Error: pool destroy\n");
+ ODPH_ERR("Error: pool destroy\n");
exit(EXIT_FAILURE);
}
if (odp_shm_free(shm)) {
- LOG_ERR("Error: shm free\n");
+ ODPH_ERR("Error: shm free\n");
exit(EXIT_FAILURE);
}
if (odp_term_local()) {
- LOG_ERR("Error: term local\n");
+ ODPH_ERR("Error: term local\n");
exit(EXIT_FAILURE);
}
if (odp_term_global(instance)) {
- LOG_ERR("Error: term global\n");
+ ODPH_ERR("Error: term global\n");
exit(EXIT_FAILURE);
}
diff --git a/test/performance/odp_pktio_ordered_run.sh b/test/performance/odp_pktio_ordered_run.sh
new file mode 100755
index 000000000..b4584753f
--- /dev/null
+++ b/test/performance/odp_pktio_ordered_run.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+#
+# Copyright (c) 2016-2018, Linaro Limited
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+TEST_SRC_DIR=$(dirname $0)
+TEST_DIR="${TEST_DIR:-$(dirname $0)}"
+
+DURATION=1
+LOG=odp_pktio_ordered.log
+LOOPS=100000000
+PASS_PPS=100
+PCAP_IN=`find . ${TEST_SRC_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+PCAP_OUT=/dev/null
+
+if [ ! -f ${PCAP_IN} ]; then
+ echo "FAIL: no udp64.pcap"
+ exit 1
+fi
+
+# This just turns off output buffering so that you still get periodic
+# output while piping to tee, as long as stdbuf is available.
+if [ "$(which stdbuf)" != "" ]; then
+ STDBUF="stdbuf -o 0"
+else
+ STDBUF=
+fi
+
+export ODP_PLATFORM_PARAMS="--no-pci \
+--vdev net_pcap0,rx_pcap=${PCAP_IN},tx_pcap=${PCAP_OUT} \
+--vdev net_pcap1,rx_pcap=${PCAP_IN},tx_pcap=${PCAP_OUT}"
+
+$STDBUF ${TEST_DIR}/odp_pktio_ordered${EXEEXT} \
+ -i 0,1 \
+ -t $DURATION | tee $LOG
+ret=${PIPESTATUS[0]}
+
+if [ $ret -ne 0 ]; then
+ echo "FAIL: no odp_pktio_ordered${EXEEXT}"
+ rm -f $LOG
+ exit $ret
+fi
+
+if [ ! -f $LOG ]; then
+ echo "FAIL: $LOG not found"
+ ret=1
+ exit $ret
+fi
+
+MAX_PPS=$(awk '/TEST RESULT/ {print $3}' $LOG)
+echo "MAX_PPS=$MAX_PPS"
+if [ $MAX_PPS -lt $PASS_PPS ]; then
+ echo "FAIL: pps below threshold $MAX_PPS < $PASS_PPS"
+ ret=1
+fi
+
+rm -f $LOG
+
+exit $ret
diff --git a/test/common_plat/performance/odp_pktio_perf.c b/test/performance/odp_pktio_perf.c
index 094630811..4cfeb50cf 100644
--- a/test/common_plat/performance/odp_pktio_perf.c
+++ b/test/performance/odp_pktio_perf.c
@@ -1,15 +1,17 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_pktio_perf.c
*
- * ODP Packet IO basic performance test application.
- *
- * Runs a number of transmit and receive workers on separate cores, the
- * transmitters generate packets at a defined rate and the receivers consume
- * them. Generated packets are UDP and each packet is marked with a magic
- * number in the UDP payload allowing receiver to distinguish them from other
- * traffic.
+ * Packet IO basic performance test application. Runs a number of transmit and
+ * receive workers on separate cores, the transmitters generate packets at a
+ * defined rate and the receivers consume them. Generated packets are UDP and
+ * each packet is marked with a magic number in the UDP payload allowing
+ * receiver to distinguish them from other traffic.
*
* Each test iteration runs for a fixed period, at the end of the iteration
* it is verified that the number of packets transmitted was as expected and
@@ -19,7 +21,9 @@
* determine the maximum rate at which no packet loss occurs. Alternatively
* a single packet rate can be specified on the command line.
*
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
*/
+
#include <odp_api.h>
#include <odp/helper/odph_api.h>
@@ -29,14 +33,13 @@
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
-#include <test_debug.h>
#define TEST_SKIP 77
#define PKT_BUF_NUM (32 * 1024)
#define MAX_NUM_IFACES 2
#define TEST_HDR_MAGIC 0x92749451
-#define MAX_WORKERS 32
+#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1)
#define BATCH_LEN_MAX 32
/* Packet rate at which to start when using binary search */
@@ -52,11 +55,11 @@
* received by this time will be assumed to have been lost. */
#define SHUTDOWN_DELAY_NS (ODP_TIME_MSEC_IN_NS * 100)
-#define VPRINT(fmt, ...) \
- do { \
- if (gbl_args->args.verbose) \
- printf(fmt, ##__VA_ARGS__); \
- } while (0)
+/* Number of duration units in a second. */
+#define T_SCALE 10
+
+/* Default test duration in T_SCALE units */
+#define DEFAULT_DURATION 1
#define CACHE_ALIGN_ROUNDUP(x)\
((ODP_CACHE_LINE_SIZE) * \
@@ -67,10 +70,9 @@
/** Parsed command line application arguments */
typedef struct {
- int cpu_count; /* CPU count */
+ unsigned int cpu_count; /* CPU count */
int num_tx_workers;/* Number of CPUs to use for transmit */
- int duration; /* Number of seconds to run each iteration
- of the test for */
+ int duration; /* Time to run each iteration of the test for */
uint32_t tx_batch_len; /* Number of packets to send in a single
batch */
int schedule; /* 1: receive packets via scheduler
@@ -92,26 +94,16 @@ typedef struct {
int num_ifaces;
} test_args_t;
-struct rx_stats_s {
+typedef struct ODP_ALIGNED_CACHE {
uint64_t rx_cnt; /* Valid packets received */
uint64_t rx_ignore; /* Ignored packets */
-};
-
-typedef union rx_stats_u {
- struct rx_stats_s s;
- uint8_t pad[CACHE_ALIGN_ROUNDUP(sizeof(struct rx_stats_s))];
} pkt_rx_stats_t;
-struct tx_stats_s {
+typedef struct ODP_ALIGNED_CACHE {
uint64_t tx_cnt; /* Packets transmitted */
uint64_t alloc_failures;/* Packet allocation failures */
uint64_t enq_failures; /* Enqueue failures */
odp_time_t idle_ticks; /* Idle ticks count in TX loop */
-};
-
-typedef union tx_stats_u {
- struct tx_stats_s s;
- uint8_t pad[CACHE_ALIGN_ROUNDUP(sizeof(struct tx_stats_s))];
} pkt_tx_stats_t;
/* Test global variables */
@@ -122,12 +114,18 @@ typedef struct {
odp_barrier_t tx_barrier;
odp_pktio_t pktio_tx;
odp_pktio_t pktio_rx;
+ /* Pool from which transmitted packets are allocated */
+ odp_pool_t transmit_pkt_pool;
pkt_rx_stats_t *rx_stats;
pkt_tx_stats_t *tx_stats;
uint8_t src_mac[ODPH_ETHADDR_LEN];
uint8_t dst_mac[ODPH_ETHADDR_LEN];
uint32_t rx_stats_size;
uint32_t tx_stats_size;
+ /* Indicate to the receivers to shutdown */
+ odp_atomic_u32_t shutdown;
+ /* Sequence number of IP packets */
+ odp_atomic_u32_t ip_seq ODP_ALIGNED_CACHE;
} test_globals_t;
/* Status of max rate search */
@@ -141,7 +139,7 @@ typedef struct {
/* Thread specific arguments */
typedef struct {
int batch_len; /* Number of packets per transmit batch */
- int duration; /* Run duration in seconds */
+ int duration; /* Run duration in scaled time units */
uint64_t pps; /* Packets per second for this thread */
} thread_args_t;
@@ -149,36 +147,26 @@ typedef struct {
odp_u32be_t magic; /* Packet header magic number */
} pkt_head_t;
-/* Pool from which transmitted packets are allocated */
-static odp_pool_t transmit_pkt_pool = ODP_POOL_INVALID;
-
-/* Sequence number of IP packets */
-static odp_atomic_u32_t ip_seq;
-
-/* Indicate to the receivers to shutdown */
-static odp_atomic_u32_t shutdown;
-
/* Application global data */
static test_globals_t *gbl_args;
/*
* Generate a single test packet for transmission.
*/
-static odp_packet_t pktio_create_packet(void)
+static odp_packet_t pktio_create_packet(uint32_t seq)
{
odp_packet_t pkt;
odph_ethhdr_t *eth;
odph_ipv4hdr_t *ip;
odph_udphdr_t *udp;
char *buf;
- uint16_t seq;
uint32_t offset;
pkt_head_t pkt_hdr;
size_t payload_len;
payload_len = sizeof(pkt_hdr) + gbl_args->args.pkt_len;
- pkt = odp_packet_alloc(transmit_pkt_pool,
+ pkt = odp_packet_alloc(gbl_args->transmit_pkt_pool,
payload_len + ODPH_UDPHDR_LEN +
ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN);
@@ -206,7 +194,6 @@ static odp_packet_t pktio_create_packet(void)
ODPH_IPV4HDR_LEN);
ip->ttl = 128;
ip->proto = ODPH_IPPROTO_UDP;
- seq = odp_atomic_fetch_inc_u32(&ip_seq);
ip->id = odp_cpu_to_be_16(seq);
ip->chksum = 0;
odph_ipv4_csum_update(pkt);
@@ -225,7 +212,7 @@ static odp_packet_t pktio_create_packet(void)
pkt_hdr.magic = TEST_HDR_MAGIC;
if (odp_packet_copy_from_mem(pkt, offset, sizeof(pkt_hdr),
&pkt_hdr) != 0)
- LOG_ABORT("Failed to generate test packet.\n");
+ ODPH_ABORT("Failed to generate test packet.\n");
return pkt;
}
@@ -238,18 +225,16 @@ static int pktio_pkt_has_magic(odp_packet_t pkt)
size_t l4_off;
pkt_head_t pkt_hdr;
- l4_off = odp_packet_l4_offset(pkt);
- if (l4_off) {
- int ret = odp_packet_copy_to_mem(pkt,
- l4_off + ODPH_UDPHDR_LEN,
- sizeof(pkt_hdr), &pkt_hdr);
+ l4_off = ODPH_ETHHDR_LEN + ODPH_IPV4HDR_LEN;
+ int ret = odp_packet_copy_to_mem(pkt,
+ l4_off + ODPH_UDPHDR_LEN,
+ sizeof(pkt_hdr), &pkt_hdr);
- if (ret != 0)
- return 0;
+ if (ret != 0)
+ return 0;
- if (pkt_hdr.magic == TEST_HDR_MAGIC)
- return 1;
- }
+ if (pkt_hdr.magic == TEST_HDR_MAGIC)
+ return 1;
return 0;
}
@@ -260,9 +245,11 @@ static int pktio_pkt_has_magic(odp_packet_t pkt)
static int alloc_packets(odp_packet_t *pkt_tbl, int num_pkts)
{
int n;
+ uint16_t seq;
+ seq = odp_atomic_fetch_add_u32(&gbl_args->ip_seq, num_pkts);
for (n = 0; n < num_pkts; ++n) {
- pkt_tbl[n] = pktio_create_packet();
+ pkt_tbl[n] = pktio_create_packet(seq + n);
if (pkt_tbl[n] == ODP_PACKET_INVALID)
break;
}
@@ -328,12 +315,13 @@ static int run_thread_tx(void *arg)
stats = &globals->tx_stats[thr_id];
if (odp_pktout_queue(globals->pktio_tx, &pktout, 1) != 1)
- LOG_ABORT("Failed to get output queue for thread %d\n", thr_id);
+ ODPH_ABORT("Failed to get output queue for thread %d\n",
+ thr_id);
burst_gap = odp_time_local_from_ns(
ODP_TIME_SEC_IN_NS / (targs->pps / targs->batch_len));
send_duration =
- odp_time_local_from_ns(targs->duration * ODP_TIME_SEC_IN_NS);
+ odp_time_local_from_ns(targs->duration * ODP_TIME_SEC_IN_NS / T_SCALE);
odp_barrier_wait(&globals->tx_barrier);
@@ -353,8 +341,8 @@ static int run_thread_tx(void *arg)
if (odp_time_cmp(idle_start, ODP_TIME_NULL) > 0) {
odp_time_t diff = odp_time_diff(cur_time, idle_start);
- stats->s.idle_ticks =
- odp_time_sum(diff, stats->s.idle_ticks);
+ stats->idle_ticks =
+ odp_time_sum(diff, stats->idle_ticks);
idle_start = ODP_TIME_NULL;
}
@@ -363,22 +351,23 @@ static int run_thread_tx(void *arg)
alloc_cnt = alloc_packets(tx_packet, batch_len - unsent_pkts);
if (alloc_cnt != batch_len)
- stats->s.alloc_failures++;
+ stats->alloc_failures++;
tx_cnt = send_packets(pktout, tx_packet, alloc_cnt);
unsent_pkts = alloc_cnt - tx_cnt;
- stats->s.enq_failures += unsent_pkts;
- stats->s.tx_cnt += tx_cnt;
+ stats->enq_failures += unsent_pkts;
+ stats->tx_cnt += tx_cnt;
cur_time = odp_time_local();
}
- VPRINT(" %02d: TxPkts %-8" PRIu64 " EnqFail %-6" PRIu64
- " AllocFail %-6" PRIu64 " Idle %" PRIu64 "ms\n",
- thr_id, stats->s.tx_cnt,
- stats->s.enq_failures, stats->s.alloc_failures,
- odp_time_to_ns(stats->s.idle_ticks) /
- (uint64_t)ODP_TIME_MSEC_IN_NS);
+ if (gbl_args->args.verbose)
+ printf(" %02d: TxPkts %-8" PRIu64 " EnqFail %-6" PRIu64
+ " AllocFail %-6" PRIu64 " Idle %" PRIu64 "ms\n",
+ thr_id, stats->tx_cnt, stats->enq_failures,
+ stats->alloc_failures,
+ odp_time_to_ns(stats->idle_ticks) /
+ (uint64_t)ODP_TIME_MSEC_IN_NS);
return 0;
}
@@ -432,7 +421,7 @@ static int run_thread_rx(void *arg)
if (gbl_args->args.schedule == 0) {
if (odp_pktin_event_queue(globals->pktio_rx, &queue, 1) != 1)
- LOG_ABORT("No input queue.\n");
+ ODPH_ABORT("No input queue.\n");
}
odp_barrier_wait(&globals->rx_barrier);
@@ -446,13 +435,13 @@ static int run_thread_rx(void *arg)
if (odp_event_type(ev[i]) == ODP_EVENT_PACKET) {
pkt = odp_packet_from_event(ev[i]);
if (pktio_pkt_has_magic(pkt))
- stats->s.rx_cnt++;
+ stats->rx_cnt++;
else
- stats->s.rx_ignore++;
+ stats->rx_ignore++;
}
odp_event_free(ev[i]);
}
- if (n_ev == 0 && odp_atomic_load_u32(&shutdown))
+ if (n_ev == 0 && odp_atomic_load_u32(&gbl_args->shutdown))
break;
}
@@ -477,12 +466,12 @@ static int process_results(uint64_t expected_tx_cnt,
int len = 0;
for (i = 0; i < odp_thread_count_max(); ++i) {
- rx_pkts += gbl_args->rx_stats[i].s.rx_cnt;
- tx_pkts += gbl_args->tx_stats[i].s.tx_cnt;
+ rx_pkts += gbl_args->rx_stats[i].rx_cnt;
+ tx_pkts += gbl_args->tx_stats[i].tx_cnt;
}
if (rx_pkts == 0) {
- LOG_ERR("no packets received\n");
+ ODPH_ERR("no packets received\n");
return -1;
}
@@ -556,13 +545,19 @@ static int setup_txrx_masks(odp_cpumask_t *thd_mask_tx,
odp_cpumask_default_worker(&cpumask,
gbl_args->args.cpu_count);
if (num_workers < 2) {
- LOG_ERR("Need at least two cores\n");
+ ODPH_ERR("Need at least two cores\n");
return TEST_SKIP;
}
+ if (num_workers > MAX_WORKERS) {
+ ODPH_DBG("Worker count limited to MAX_WORKERS define (=%d)\n",
+ MAX_WORKERS);
+ num_workers = MAX_WORKERS;
+ }
+
if (gbl_args->args.num_tx_workers) {
if (gbl_args->args.num_tx_workers > (num_workers - 1)) {
- LOG_ERR("Invalid TX worker count\n");
+ ODPH_ERR("Invalid TX worker count\n");
return -1;
}
num_tx_workers = gbl_args->args.num_tx_workers;
@@ -601,54 +596,70 @@ static int run_test_single(odp_cpumask_t *thd_mask_tx,
odp_cpumask_t *thd_mask_rx,
test_status_t *status)
{
- odph_odpthread_t thd_tbl[MAX_WORKERS];
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
thread_args_t args_tx, args_rx;
uint64_t expected_tx_cnt;
int num_tx_workers, num_rx_workers;
- odph_odpthread_params_t thr_params;
-
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = gbl_args->instance;
- odp_atomic_store_u32(&shutdown, 0);
+ odp_atomic_store_u32(&gbl_args->shutdown, 0);
- memset(thd_tbl, 0, sizeof(thd_tbl));
+ memset(thread_tbl, 0, sizeof(thread_tbl));
memset(gbl_args->rx_stats, 0, gbl_args->rx_stats_size);
memset(gbl_args->tx_stats, 0, gbl_args->tx_stats_size);
- expected_tx_cnt = status->pps_curr * gbl_args->args.duration;
+ expected_tx_cnt = status->pps_curr * gbl_args->args.duration / T_SCALE;
/* start receiver threads first */
- thr_params.start = run_thread_rx;
- thr_params.arg = &args_rx;
+
+ num_rx_workers = odp_cpumask_count(thd_mask_rx);
args_rx.batch_len = gbl_args->args.rx_batch_len;
- odph_odpthreads_create(&thd_tbl[0], thd_mask_rx, &thr_params);
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = gbl_args->instance;
+ thr_common.cpumask = thd_mask_rx;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_thread_rx;
+ thr_param.arg = &args_rx;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_create(thread_tbl, &thr_common, &thr_param, num_rx_workers);
odp_barrier_wait(&gbl_args->rx_barrier);
- num_rx_workers = odp_cpumask_count(thd_mask_rx);
/* then start transmitters */
- thr_params.start = run_thread_tx;
- thr_params.arg = &args_tx;
+
num_tx_workers = odp_cpumask_count(thd_mask_tx);
args_tx.pps = status->pps_curr / num_tx_workers;
args_tx.duration = gbl_args->args.duration;
args_tx.batch_len = gbl_args->args.tx_batch_len;
- odph_odpthreads_create(&thd_tbl[num_rx_workers], thd_mask_tx,
- &thr_params);
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = gbl_args->instance;
+ thr_common.cpumask = thd_mask_tx;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_thread_tx;
+ thr_param.arg = &args_tx;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_create(&thread_tbl[num_rx_workers], &thr_common, &thr_param, num_tx_workers);
odp_barrier_wait(&gbl_args->tx_barrier);
/* wait for transmitter threads to terminate */
- odph_odpthreads_join(&thd_tbl[num_rx_workers]);
+ odph_thread_join(&thread_tbl[num_rx_workers], num_tx_workers);
/* delay to allow transmitted packets to reach the receivers */
odp_time_wait_ns(SHUTDOWN_DELAY_NS);
/* indicate to the receivers to exit */
- odp_atomic_store_u32(&shutdown, 1);
+ odp_atomic_store_u32(&gbl_args->shutdown, 1);
/* wait for receivers */
- odph_odpthreads_join(&thd_tbl[0]);
+ odph_thread_join(thread_tbl, num_rx_workers);
if (!status->warmup)
return process_results(expected_tx_cnt, status);
@@ -736,6 +747,7 @@ static int test_init(void)
odp_pool_param_t params;
const char *iface;
int schedule;
+ odp_pktio_config_t cfg;
odp_pool_param_init(&params);
params.pkt.len = PKT_HDR_LEN + gbl_args->args.pkt_len;
@@ -743,16 +755,20 @@ static int test_init(void)
params.pkt.num = PKT_BUF_NUM;
params.type = ODP_POOL_PACKET;
- transmit_pkt_pool = odp_pool_create("pkt_pool_transmit", &params);
- if (transmit_pkt_pool == ODP_POOL_INVALID)
- LOG_ABORT("Failed to create transmit pool\n");
+ gbl_args->transmit_pkt_pool = odp_pool_create("pkt_pool_transmit",
+ &params);
+ if (gbl_args->transmit_pkt_pool == ODP_POOL_INVALID)
+ ODPH_ABORT("Failed to create transmit pool\n");
- odp_atomic_init_u32(&ip_seq, 0);
- odp_atomic_init_u32(&shutdown, 0);
+ odp_atomic_init_u32(&gbl_args->ip_seq, 0);
+ odp_atomic_init_u32(&gbl_args->shutdown, 0);
iface = gbl_args->args.ifaces[0];
schedule = gbl_args->args.schedule;
+ if (schedule)
+ odp_schedule_config(NULL);
+
/* create pktios and associate input/output queues */
gbl_args->pktio_tx = create_pktio(iface, schedule);
if (gbl_args->args.num_ifaces > 1) {
@@ -769,30 +785,37 @@ static int test_init(void)
if (gbl_args->pktio_rx == ODP_PKTIO_INVALID ||
gbl_args->pktio_tx == ODP_PKTIO_INVALID) {
- LOG_ERR("failed to open pktio\n");
+ ODPH_ERR("failed to open pktio\n");
return -1;
}
/* Create single queue with default parameters */
if (odp_pktout_queue_config(gbl_args->pktio_tx, NULL)) {
- LOG_ERR("failed to configure pktio_tx queue\n");
+ ODPH_ERR("failed to configure pktio_tx queue\n");
return -1;
}
/* Configure also input side (with defaults) */
if (odp_pktin_queue_config(gbl_args->pktio_tx, NULL)) {
- LOG_ERR("failed to configure pktio_tx queue\n");
+ ODPH_ERR("failed to configure pktio_tx queue\n");
return -1;
}
+ /* Disable packet parsing as this is done in the driver where it
+ * affects scalability.
+ */
+ odp_pktio_config_init(&cfg);
+ cfg.parser.layer = ODP_PROTO_LAYER_NONE;
+ odp_pktio_config(gbl_args->pktio_rx, &cfg);
+
if (gbl_args->args.num_ifaces > 1) {
if (odp_pktout_queue_config(gbl_args->pktio_rx, NULL)) {
- LOG_ERR("failed to configure pktio_rx queue\n");
+ ODPH_ERR("failed to configure pktio_rx queue\n");
return -1;
}
if (odp_pktin_queue_config(gbl_args->pktio_rx, NULL)) {
- LOG_ERR("failed to configure pktio_rx queue\n");
+ ODPH_ERR("failed to configure pktio_rx queue\n");
return -1;
}
}
@@ -808,7 +831,7 @@ static int test_init(void)
static int empty_inq(odp_pktio_t pktio)
{
- odp_queue_t queue;
+ odp_queue_t queue = ODP_QUEUE_INVALID;
odp_event_t ev;
odp_queue_type_t q_type;
@@ -842,12 +865,12 @@ static int test_term(void)
if (gbl_args->pktio_tx != gbl_args->pktio_rx) {
if (odp_pktio_stop(gbl_args->pktio_tx)) {
- LOG_ERR("Failed to stop pktio_tx\n");
+ ODPH_ERR("Failed to stop pktio_tx\n");
return -1;
}
if (odp_pktio_close(gbl_args->pktio_tx)) {
- LOG_ERR("Failed to close pktio_tx\n");
+ ODPH_ERR("Failed to close pktio_tx\n");
ret = -1;
}
}
@@ -855,12 +878,12 @@ static int test_term(void)
empty_inq(gbl_args->pktio_rx);
if (odp_pktio_stop(gbl_args->pktio_rx)) {
- LOG_ERR("Failed to stop pktio_rx\n");
+ ODPH_ERR("Failed to stop pktio_rx\n");
return -1;
}
if (odp_pktio_close(gbl_args->pktio_rx) != 0) {
- LOG_ERR("Failed to close pktio_rx\n");
+ ODPH_ERR("Failed to close pktio_rx\n");
ret = -1;
}
@@ -872,28 +895,28 @@ static int test_term(void)
continue;
if (odp_pool_destroy(pool) != 0) {
- LOG_ERR("Failed to destroy pool %s\n", pool_name);
+ ODPH_ERR("Failed to destroy pool %s\n", pool_name);
ret = -1;
}
}
- if (odp_pool_destroy(transmit_pkt_pool) != 0) {
- LOG_ERR("Failed to destroy transmit pool\n");
+ if (odp_pool_destroy(gbl_args->transmit_pkt_pool) != 0) {
+ ODPH_ERR("Failed to destroy transmit pool\n");
ret = -1;
}
free(gbl_args->args.if_str);
if (odp_shm_free(odp_shm_lookup("test_globals")) != 0) {
- LOG_ERR("Failed to free test_globals\n");
+ ODPH_ERR("Failed to free test_globals\n");
ret = -1;
}
if (odp_shm_free(odp_shm_lookup("test_globals.rx_stats")) != 0) {
- LOG_ERR("Failed to free test_globals.rx_stats\n");
+ ODPH_ERR("Failed to free test_globals.rx_stats\n");
ret = -1;
}
if (odp_shm_free(odp_shm_lookup("test_globals.tx_stats")) != 0) {
- LOG_ERR("Failed to free test_globals.tx_stats\n");
+ ODPH_ERR("Failed to free test_globals.tx_stats\n");
ret = -1;
}
@@ -903,8 +926,7 @@ static int test_term(void)
static void usage(void)
{
printf("\nUsage: odp_pktio_perf [options]\n\n");
- printf(" -c, --count <number> CPU count\n");
- printf(" default: all available\n");
+ printf(" -c, --count <number> CPU count, 0=all available, default=2\n");
printf(" -t, --txcount <number> Number of CPUs to use for TX\n");
printf(" default: cpu_count+1/2\n");
printf(" -b, --txbatch <length> Number of packets per TX batch\n");
@@ -945,21 +967,16 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
static const char *shortopts = "+c:t:b:pR:l:r:i:d:vh";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- odph_parse_options(argc, argv, shortopts, longopts);
-
- args->cpu_count = 0; /* all CPUs */
+ args->cpu_count = 2;
args->num_tx_workers = 0; /* defaults to cpu_count+1/2 */
args->tx_batch_len = BATCH_LEN_MAX;
args->rx_batch_len = BATCH_LEN_MAX;
- args->duration = 1;
+ args->duration = DEFAULT_DURATION;
args->pps = RATE_SEARCH_INITIAL_PPS;
args->search = 1;
args->schedule = 1;
args->verbose = 0;
- opterr = 0; /* do not issue errors on helper options */
-
while (1) {
opt = getopt_long(argc, argv, shortopts,
longopts, &long_index);
@@ -978,7 +995,7 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
args->num_tx_workers = atoi(optarg);
break;
case 'd':
- args->duration = atoi(optarg);
+ args->duration = atoi(optarg) * T_SCALE;
break;
case 'r':
args->pps = atoi(optarg);
@@ -992,7 +1009,7 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
args->if_str = malloc(strlen(optarg) + 1);
if (!args->if_str)
- LOG_ABORT("Failed to alloc iface storage\n");
+ ODPH_ABORT("Failed to alloc iface storage\n");
strcpy(args->if_str, optarg);
for (token = strtok(args->if_str, ",");
@@ -1030,19 +1047,36 @@ int main(int argc, char **argv)
int ret;
odp_shm_t shm;
int max_thrs;
+ odph_helper_options_t helper_options;
odp_instance_t instance;
+ odp_init_t init_param;
- if (odp_init_global(&instance, NULL, NULL) != 0)
- LOG_ABORT("Failed global init.\n");
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (odp_init_global(&instance, &init_param, NULL) != 0)
+ ODPH_ABORT("Failed global init.\n");
if (odp_init_local(instance, ODP_THREAD_CONTROL) != 0)
- LOG_ABORT("Failed local init.\n");
+ ODPH_ABORT("Failed local init.\n");
+
+ odp_sys_info_print();
shm = odp_shm_reserve("test_globals",
sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID)
+ ODPH_ABORT("Shared memory reserve failed.\n");
+
gbl_args = odp_shm_addr(shm);
if (gbl_args == NULL)
- LOG_ABORT("Shared memory reserve failed.\n");
+ ODPH_ABORT("Shared memory reserve failed.\n");
memset(gbl_args, 0, sizeof(test_globals_t));
max_thrs = odp_thread_count_max();
@@ -1054,22 +1088,26 @@ int main(int argc, char **argv)
shm = odp_shm_reserve("test_globals.rx_stats",
gbl_args->rx_stats_size,
ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID)
+ ODPH_ABORT("Shared memory reserve failed.\n");
gbl_args->rx_stats = odp_shm_addr(shm);
if (gbl_args->rx_stats == NULL)
- LOG_ABORT("Shared memory reserve failed.\n");
+ ODPH_ABORT("Shared memory reserve failed.\n");
memset(gbl_args->rx_stats, 0, gbl_args->rx_stats_size);
shm = odp_shm_reserve("test_globals.tx_stats",
gbl_args->tx_stats_size,
ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID)
+ ODPH_ABORT("Shared memory reserve failed.\n");
gbl_args->tx_stats = odp_shm_addr(shm);
if (gbl_args->tx_stats == NULL)
- LOG_ABORT("Shared memory reserve failed.\n");
+ ODPH_ABORT("Shared memory reserve failed.\n");
memset(gbl_args->tx_stats, 0, gbl_args->tx_stats_size);
diff --git a/test/performance/odp_pool_latency.c b/test/performance/odp_pool_latency.c
new file mode 100644
index 000000000..6b964e773
--- /dev/null
+++ b/test/performance/odp_pool_latency.c
@@ -0,0 +1,1382 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 Nokia
+ */
+
+/**
+ * @example odp_pool_latency.c
+ *
+ * Pool latency tester. Allocate from different kind of pools with a varying set of configurations
+ * and record latencies.
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define PROG_NAME "odp_pool_latency"
+#define DELIMITER ","
+#define ALLOC '+'
+#define FREE '-'
+#define TOP 't'
+#define BOTTOM 'b'
+#define DELAY 'd'
+
+enum {
+ BUFFER = 0U,
+ PACKET,
+ TMO,
+ VECTOR
+};
+
+enum {
+ SINGLE = 0U,
+ MANY
+};
+
+#define DEF_ALLOC 1U
+#define DEF_FREE 1U
+#define DEF_DIR TOP
+#define DEF_TYPE BUFFER
+#define DEF_CNT 32768U
+#define DEF_SIZE 1024U
+#define DEF_POLICY MANY
+#define DEF_ROUNDS 100000U
+#define DEF_IGNORE 0U
+#define DEF_WORKERS 1U
+#define DEF_UA_SIZE 0U
+
+#define MAX_PATTERN_LEN 32U
+#define MAX_WORKERS ((uint32_t)(ODP_THREAD_COUNT_MAX - 1))
+#define MAX_RETRIES 10U
+
+#define COND_MIN(a, b) ((a) > 0U ? ODPH_MIN((a), (b)) : (b))
+#define UA_DATA 0xAA
+
+ODP_STATIC_ASSERT(MAX_PATTERN_LEN < UINT8_MAX, "Too long pattern length");
+
+typedef struct {
+ uint32_t num_evs_buf;
+ uint32_t num_evs_pkt;
+ uint32_t num_evs_tmo;
+ uint32_t num_evs_vec;
+ uint32_t data_size_buf;
+ uint32_t data_size_pkt;
+ uint32_t data_size_vec;
+ uint32_t cache_size_buf;
+ uint32_t cache_size_pkt;
+ uint32_t cache_size_tmo;
+ uint32_t cache_size_vec;
+} dynamic_defs_t;
+
+typedef enum {
+ PRS_OK,
+ PRS_NOK,
+ PRS_TERM
+} parse_result_t;
+
+typedef struct {
+ uint64_t tot_tm;
+ uint64_t alloc_tm;
+ uint64_t max_alloc_tm;
+ uint64_t min_alloc_tm;
+ uint64_t max_alloc_rnd;
+ uint64_t min_alloc_rnd;
+ uint64_t alloc_cnt;
+ uint64_t alloc_b_cnt;
+ uint64_t uarea_tm;
+ uint64_t max_uarea_tm;
+ uint64_t min_uarea_tm;
+ uint64_t max_uarea_rnd;
+ uint64_t min_uarea_rnd;
+ uint64_t free_tm;
+ uint64_t max_free_tm;
+ uint64_t min_free_tm;
+ uint64_t max_free_rnd;
+ uint64_t min_free_rnd;
+ uint64_t free_b_cnt;
+ uint64_t reallocs;
+ uint64_t alloc_errs;
+ uint64_t pattern_errs;
+ uint8_t max_alloc_pt;
+ uint8_t min_alloc_pt;
+ uint8_t max_uarea_pt;
+ uint8_t min_uarea_pt;
+ uint8_t max_free_pt;
+ uint8_t min_free_pt;
+} stats_t;
+
+typedef struct {
+ uint32_t val;
+ uint8_t op;
+ uint8_t opt;
+} alloc_elem_t;
+
+typedef struct prog_config_s prog_config_t;
+
+typedef struct ODP_ALIGNED_CACHE {
+ stats_t stats;
+ odp_pool_t pool;
+ void *data;
+ prog_config_t *prog_config;
+ odp_shm_t shm;
+ uint32_t data_size;
+ uint32_t uarea_size;
+} worker_config_t;
+
+typedef uint32_t (*alloc_fn_t)(worker_config_t *config, void *data, uint32_t idx, uint32_t num,
+ uint64_t round, uint8_t pattern, odp_bool_t is_saved);
+typedef void (*free_fn_t)(void *data, uint32_t idx, uint32_t num, stats_t *stats,
+ uint64_t round, uint8_t pattern, odp_bool_t is_saved);
+
+typedef struct prog_config_s {
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ worker_config_t worker_config[MAX_WORKERS];
+ alloc_elem_t alloc_elems[MAX_PATTERN_LEN];
+ dynamic_defs_t dyn_defs;
+ odp_instance_t odp_instance;
+ odp_cpumask_t worker_mask;
+ odp_barrier_t init_barrier;
+ odp_barrier_t term_barrier;
+ alloc_fn_t alloc_fn;
+ free_fn_t free_fn;
+ int64_t cache_size;
+ uint32_t num_data_elems;
+ uint32_t seg_len;
+ uint32_t handle_size;
+ uint32_t num_evs;
+ uint32_t data_size;
+ uint32_t num_rounds;
+ uint32_t num_ignore;
+ uint32_t num_workers;
+ uint32_t uarea_size;
+ uint8_t num_elems;
+ uint8_t type;
+ uint8_t policy;
+} prog_config_t;
+
+static prog_config_t *prog_conf;
+
+static void init_config(prog_config_t *config)
+{
+ alloc_elem_t *alloc_elem;
+ odp_pool_capability_t capa;
+ odp_pool_param_t param;
+ worker_config_t *worker;
+
+ memset(config, 0, sizeof(*config));
+ alloc_elem = &config->alloc_elems[0];
+ alloc_elem->val = DEF_ALLOC;
+ alloc_elem->op = ALLOC;
+ alloc_elem = &config->alloc_elems[1];
+ alloc_elem->val = DEF_FREE;
+ alloc_elem->op = FREE;
+ alloc_elem->opt = DEF_DIR;
+ config->num_elems = 2U;
+
+ if (odp_pool_capability(&capa) == 0) {
+ config->dyn_defs.num_evs_buf = COND_MIN(capa.buf.max_num, DEF_CNT);
+ config->dyn_defs.num_evs_pkt = COND_MIN(capa.pkt.max_num, DEF_CNT);
+ config->dyn_defs.num_evs_tmo = COND_MIN(capa.tmo.max_num, DEF_CNT);
+ config->dyn_defs.num_evs_vec = COND_MIN(capa.vector.max_num, DEF_CNT);
+ config->dyn_defs.data_size_buf = COND_MIN(capa.buf.max_size, DEF_SIZE);
+ config->dyn_defs.data_size_pkt = COND_MIN(capa.pkt.max_len, DEF_SIZE);
+ config->dyn_defs.data_size_vec = COND_MIN(capa.vector.max_size, DEF_SIZE);
+ odp_pool_param_init(&param);
+ config->dyn_defs.cache_size_buf = param.buf.cache_size;
+ config->dyn_defs.cache_size_pkt = param.pkt.cache_size;
+ config->dyn_defs.cache_size_tmo = param.tmo.cache_size;
+ config->dyn_defs.cache_size_vec = param.vector.cache_size;
+ }
+
+ config->cache_size = -1;
+ config->num_rounds = DEF_ROUNDS;
+ config->num_ignore = DEF_IGNORE;
+ config->num_workers = DEF_WORKERS;
+ config->uarea_size = DEF_UA_SIZE;
+ config->type = DEF_TYPE;
+ config->policy = DEF_POLICY;
+
+ for (uint32_t i = 0U; i < MAX_WORKERS; ++i) {
+ worker = &config->worker_config[i];
+ worker->stats.min_alloc_tm = UINT64_MAX;
+ worker->stats.min_uarea_tm = UINT64_MAX;
+ worker->stats.min_free_tm = UINT64_MAX;
+ worker->pool = ODP_POOL_INVALID;
+ worker->shm = ODP_SHM_INVALID;
+ }
+}
+
+static void parse_burst_pattern(prog_config_t *config, const char *optarg)
+{
+ char *tmp_str = strdup(optarg), *tmp, op, opt;
+ uint8_t num_elems = 0U;
+ alloc_elem_t *elem;
+ uint32_t val;
+ int ret;
+
+ if (tmp_str == NULL)
+ return;
+
+ tmp = strtok(tmp_str, DELIMITER);
+
+ while (tmp && num_elems < MAX_PATTERN_LEN) {
+ elem = &config->alloc_elems[num_elems];
+ ret = sscanf(tmp, "%c%u%c", &op, &val, &opt);
+
+ if (ret == 2 || ret == 3) {
+ if (op == ALLOC || (op == FREE && (opt == TOP || opt == BOTTOM)) ||
+ op == DELAY) {
+ if (op == FREE)
+ elem->opt = opt;
+
+ elem->val = val;
+ elem->op = op;
+ ++num_elems;
+ }
+ }
+
+ tmp = strtok(NULL, DELIMITER);
+ }
+
+ free(tmp_str);
+ config->num_elems = num_elems;
+}
+
+static void print_usage(const dynamic_defs_t *dyn_defs)
+{
+ printf("\n"
+ "Pool latency tester. Allocate from different kind of pools with a varying set of\n"
+ "configurations and record latencies.\n"
+ "\n"
+ "Usage: " PROG_NAME " [OPTIONS]\n");
+ printf("\n"
+ " E.g. " PROG_NAME "\n"
+ " " PROG_NAME " -b %c7" DELIMITER "%c1%c" DELIMITER "%c3" DELIMITER "%c9%c\n",
+ ALLOC, FREE, TOP, ALLOC, FREE, BOTTOM);
+ printf(" " PROG_NAME " -b %c10" DELIMITER "%c1000" DELIMITER "%c10%c -t 1 -d 2048 "
+ "-p 0 -w 64\n", ALLOC, DELAY, FREE, TOP);
+ printf("\n"
+ "Optional OPTIONS:\n"
+ "\n"
+ " -b, --burst_pattern Burst pattern for allocations, frees and delays per round,\n"
+ " delimited by '%s', no spaces. Allocations are indicated\n"
+ " with a '%c' prefix, frees with a '%c' prefix. The location\n"
+ " of frees are indicated from the top of a previously\n"
+ " allocated array of events with a '%c' suffix and from the\n"
+ " bottom with a '%c' suffix. Delays are indicated with a\n"
+ " '%c' prefix, followed by a delay in nanoseconds.\n"
+ " Allocations and frees should be equal in the aggregate and\n"
+ " frees should never outnumber allocations at any instant.\n"
+ " '%c%u%s%c%u%c' by default. Maximum pattern length is %u.\n"
+ " -t, --type Pool type. %u by default.\n"
+ " 0: buffer\n"
+ " 1: packet\n"
+ " 2: timeout\n"
+ " 3: vector\n"
+ " -e, --event_count Number of events. Defaults:\n"
+ " buffer: %u\n"
+ " packet: %u\n"
+ " timeout: %u\n"
+ " vector: %u\n"
+ " -d, --data_size Data size in bytes, ignored in case of timeout pools, with\n"
+ " vector pools, defines the vector size.\n"
+ " Defaults:\n"
+ " buffer: %u\n"
+ " packet: %u\n"
+ " vector: %u\n"
+ " -p, --policy Pool allocation policy. %u by default.\n"
+ " Policies:\n"
+ " 0: One pool shared by workers\n"
+ " 1: One pool per worker\n"
+ " -r, --round_count Number of rounds to run. %u by default.\n"
+ " -i, --ignore_rounds Ignore an amount of initial rounds. %u by default.\n"
+ " -c, --worker_count Number of workers. %u by default.\n"
+ " -C, --cache_size Maximum cache size for pools. Defaults:\n"
+ " buffer: %u\n"
+ " packet: %u\n"
+ " timeout: %u\n"
+ " vector: %u\n"
+ " -w, --write_uarea Write data to allocated event user areas. 0 bytes disables\n"
+ " user area write. %u by default.\n"
+ " -h, --help This help.\n"
+ "\n", DELIMITER, ALLOC, FREE, TOP, BOTTOM, DELAY, ALLOC, DEF_ALLOC, DELIMITER, FREE,
+ DEF_FREE, DEF_DIR, MAX_PATTERN_LEN, DEF_TYPE, dyn_defs->num_evs_buf,
+ dyn_defs->num_evs_pkt, dyn_defs->num_evs_tmo, dyn_defs->num_evs_vec,
+ dyn_defs->data_size_buf, dyn_defs->data_size_pkt, dyn_defs->data_size_vec,
+ DEF_POLICY, DEF_ROUNDS, DEF_IGNORE, DEF_WORKERS, dyn_defs->cache_size_buf,
+ dyn_defs->cache_size_pkt, dyn_defs->cache_size_tmo, dyn_defs->cache_size_vec,
+ DEF_UA_SIZE);
+}
+
+static parse_result_t check_options(prog_config_t *config)
+{
+ odp_pool_capability_t pool_capa;
+ uint32_t max_workers, num_pools;
+ alloc_elem_t *elem;
+ int64_t num_tot = 0;
+ odp_shm_capability_t shm_capa;
+ uint64_t shm_size;
+
+ if (config->type != BUFFER && config->type != PACKET && config->type != TMO &&
+ config->type != VECTOR) {
+ ODPH_ERR("Invalid pool type: %u\n", config->type);
+ return PRS_NOK;
+ }
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ ODPH_ERR("Error querying pool capabilities\n");
+ return PRS_NOK;
+ }
+
+ max_workers = ODPH_MIN(MAX_WORKERS, (uint32_t)odp_cpumask_default_worker(NULL, 0));
+
+ if (config->num_workers == 0U || config->num_workers > max_workers) {
+ ODPH_ERR("Invalid worker count: %u (min: 1, max: %u)\n", config->num_workers,
+ max_workers);
+ return PRS_NOK;
+ }
+
+ (void)odp_cpumask_default_worker(&config->worker_mask, config->num_workers);
+ num_pools = config->policy == SINGLE ? 1U : config->num_workers;
+
+ if (config->type == BUFFER) {
+ if (config->num_evs == 0U)
+ config->num_evs = config->dyn_defs.num_evs_buf;
+
+ if (config->data_size == 0U)
+ config->data_size = config->dyn_defs.data_size_buf;
+
+ if (config->cache_size == -1)
+ config->cache_size = config->dyn_defs.cache_size_buf;
+
+ if (config->num_evs > pool_capa.buf.max_num) {
+ ODPH_ERR("Invalid event count: %u (max: %u)\n", config->num_evs,
+ pool_capa.buf.max_num);
+ return PRS_NOK;
+ }
+
+ if (config->data_size > pool_capa.buf.max_size) {
+ ODPH_ERR("Invalid data size: %u (max: %u)\n", config->data_size,
+ pool_capa.buf.max_size);
+ return PRS_NOK;
+ }
+
+ if (config->cache_size < pool_capa.buf.min_cache_size ||
+ config->cache_size > pool_capa.buf.max_cache_size) {
+ ODPH_ERR("Invalid cache size: %" PRIi64 " (min: %u, max: %u)\n",
+ config->cache_size, pool_capa.buf.min_cache_size,
+ pool_capa.buf.max_cache_size);
+ return PRS_NOK;
+ }
+
+ if (num_pools > pool_capa.buf.max_pools) {
+ ODPH_ERR("Invalid pool count: %u (max: %u)\n", num_pools,
+ pool_capa.buf.max_pools);
+ return PRS_NOK;
+ }
+
+ config->handle_size = sizeof(odp_buffer_t);
+ config->uarea_size = ODPH_MIN(config->uarea_size, pool_capa.buf.max_uarea_size);
+ } else if (config->type == PACKET) {
+ if (config->num_evs == 0U)
+ config->num_evs = config->dyn_defs.num_evs_pkt;
+
+ if (config->data_size == 0U)
+ config->data_size = config->dyn_defs.data_size_pkt;
+
+ if (config->cache_size == -1)
+ config->cache_size = config->dyn_defs.cache_size_pkt;
+
+ if (config->num_evs > pool_capa.pkt.max_num) {
+ ODPH_ERR("Invalid event count: %u (max: %u)\n", config->num_evs,
+ pool_capa.pkt.max_num);
+ return PRS_NOK;
+ }
+
+ if (config->data_size > pool_capa.pkt.max_len) {
+ ODPH_ERR("Invalid data size: %u (max: %u)\n", config->data_size,
+ pool_capa.pkt.max_len);
+ return PRS_NOK;
+ }
+
+ if (config->cache_size < pool_capa.pkt.min_cache_size ||
+ config->cache_size > pool_capa.pkt.max_cache_size) {
+ ODPH_ERR("Invalid cache size: %" PRIi64 " (min: %u, max: %u)\n",
+ config->cache_size, pool_capa.pkt.min_cache_size,
+ pool_capa.pkt.max_cache_size);
+ return PRS_NOK;
+ }
+
+ if (num_pools > pool_capa.pkt.max_pools) {
+ ODPH_ERR("Invalid pool count: %u (max: %u)\n", num_pools,
+ pool_capa.pkt.max_pools);
+ return PRS_NOK;
+ }
+
+ config->seg_len = pool_capa.pkt.max_seg_len > config->data_size ?
+ config->data_size : pool_capa.pkt.max_seg_len;
+ config->handle_size = sizeof(odp_packet_t);
+ config->uarea_size = ODPH_MIN(config->uarea_size, pool_capa.pkt.max_uarea_size);
+ } else if (config->type == TMO) {
+ if (config->num_evs == 0U)
+ config->num_evs = config->dyn_defs.num_evs_tmo;
+
+ if (config->cache_size == -1)
+ config->cache_size = config->dyn_defs.cache_size_tmo;
+
+ if (config->num_evs > pool_capa.tmo.max_num) {
+ ODPH_ERR("Invalid event count: %u (max: %u)\n", config->num_evs,
+ pool_capa.tmo.max_num);
+ return PRS_NOK;
+ }
+
+ if (config->cache_size < pool_capa.tmo.min_cache_size ||
+ config->cache_size > pool_capa.tmo.max_cache_size) {
+ ODPH_ERR("Invalid cache size: %" PRIi64 " (min: %u, max: %u)\n",
+ config->cache_size, pool_capa.tmo.min_cache_size,
+ pool_capa.tmo.max_cache_size);
+ return PRS_NOK;
+ }
+
+ if (num_pools > pool_capa.tmo.max_pools) {
+ ODPH_ERR("Invalid pool count: %u (max: %u)\n", num_pools,
+ pool_capa.tmo.max_pools);
+ return PRS_NOK;
+ }
+
+ config->handle_size = sizeof(odp_timeout_t);
+ config->uarea_size = ODPH_MIN(config->uarea_size, pool_capa.tmo.max_uarea_size);
+ } else {
+ if (config->num_evs == 0U)
+ config->num_evs = config->dyn_defs.num_evs_vec;
+
+ if (config->data_size == 0U)
+ config->data_size = config->dyn_defs.data_size_vec;
+
+ if (config->cache_size == -1)
+ config->cache_size = config->dyn_defs.cache_size_vec;
+
+ if (config->num_evs > pool_capa.vector.max_num) {
+ ODPH_ERR("Invalid event count: %u (max: %u)\n", config->num_evs,
+ pool_capa.vector.max_num);
+ return PRS_NOK;
+ }
+
+ if (config->data_size > pool_capa.vector.max_size) {
+ ODPH_ERR("Invalid vector size: %u (max: %u)\n", config->data_size,
+ pool_capa.vector.max_size);
+ return PRS_NOK;
+ }
+
+ if (config->cache_size < pool_capa.vector.min_cache_size ||
+ config->cache_size > pool_capa.vector.max_cache_size) {
+ ODPH_ERR("Invalid cache size: %" PRIi64 " (min: %u, max: %u)\n",
+ config->cache_size, pool_capa.vector.min_cache_size,
+ pool_capa.vector.max_cache_size);
+ return PRS_NOK;
+ }
+
+ if (num_pools > pool_capa.vector.max_pools) {
+ ODPH_ERR("Invalid pool count: %u (max: %u)\n", num_pools,
+ pool_capa.vector.max_pools);
+ return PRS_NOK;
+ }
+
+ config->handle_size = sizeof(odp_packet_vector_t);
+ config->uarea_size = ODPH_MIN(config->uarea_size, pool_capa.vector.max_uarea_size);
+ }
+
+ if (config->num_elems == 0U) {
+ ODPH_ERR("Invalid burst pattern, no elements\n");
+ return PRS_NOK;
+ }
+
+ for (uint8_t i = 0U; i < config->num_elems; ++i) {
+ elem = &config->alloc_elems[i];
+
+ if (elem->op == ALLOC)
+ num_tot += elem->val;
+ else if (elem->op == FREE)
+ num_tot -= elem->val;
+
+ if (num_tot < 0) {
+ ODPH_ERR("Invalid burst pattern, frees exceed allocations "
+ "instantaneously\n");
+ return PRS_NOK;
+ }
+
+ config->num_data_elems += (elem->op == ALLOC ? elem->val : 0U);
+ }
+
+ if (num_tot != 0) {
+ ODPH_ERR("Invalid burst pattern, cumulative sum not zero: %" PRId64 "\n", num_tot);
+ return PRS_NOK;
+ }
+
+ if (odp_shm_capability(&shm_capa) < 0) {
+ ODPH_ERR("Error querying SHM capabilities\n");
+ return PRS_NOK;
+ }
+
+ if (shm_capa.max_blocks < config->num_workers + 1U) {
+ ODPH_ERR("Invalid amount of SHM blocks: %u (max: %u)\n", config->num_workers + 1U,
+ shm_capa.max_blocks);
+ return PRS_NOK;
+ }
+
+ shm_size = (uint64_t)config->num_data_elems * config->handle_size;
+
+ if (shm_capa.max_size != 0U && shm_size > shm_capa.max_size) {
+ ODPH_ERR("Invalid total SHM block size: %" PRIu64 " (max: %" PRIu64 ")\n",
+ shm_size, shm_capa.max_size);
+ return PRS_NOK;
+ }
+
+ if (config->policy != SINGLE && config->policy != MANY) {
+ ODPH_ERR("Invalid pool policy: %u\n", config->policy);
+ return PRS_NOK;
+ }
+
+ if (config->num_rounds == 0U) {
+ ODPH_ERR("Invalid round count: %u (min: 1)\n", config->num_rounds);
+ return PRS_NOK;
+ }
+
+ if (config->num_ignore >= config->num_rounds) {
+ ODPH_ERR("Invalid round ignorance count: %u (max: %u)\n", config->num_ignore,
+ config->num_rounds - 1U);
+ return PRS_NOK;
+ }
+
+ return PRS_OK;
+}
+
+static parse_result_t parse_options(int argc, char **argv, prog_config_t *config)
+{
+ int opt, long_index;
+
+ static const struct option longopts[] = {
+ { "burst_pattern", required_argument, NULL, 'b' },
+ { "type", required_argument, NULL, 't' },
+ { "event_count", required_argument, NULL, 'e' },
+ { "data_size", required_argument, NULL, 'd' },
+ { "policy", required_argument, NULL, 'p' },
+ { "round_count", required_argument, NULL, 'r' },
+ { "ignore_rounds", required_argument, NULL, 'i' },
+ { "worker_count", required_argument, NULL, 'c' },
+ { "cache_size", required_argument, NULL, 'C' },
+ { "write_uarea", required_argument, NULL, 'w' },
+ { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ static const char *shortopts = "b:t:e:d:p:r:i:c:C:w:h";
+
+ init_config(config);
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'b':
+ parse_burst_pattern(config, optarg);
+ break;
+ case 't':
+ config->type = atoi(optarg);
+ break;
+ case 'e':
+ config->num_evs = atoi(optarg);
+ break;
+ case 'd':
+ config->data_size = atoi(optarg);
+ break;
+ case 'p':
+ config->policy = atoi(optarg);
+ break;
+ case 'r':
+ config->num_rounds = atoi(optarg);
+ break;
+ case 'i':
+ config->num_ignore = atoi(optarg);
+ break;
+ case 'c':
+ config->num_workers = atoi(optarg);
+ break;
+ case 'C':
+ config->cache_size = atoi(optarg);
+ break;
+ case 'w':
+ config->uarea_size = atoi(optarg);
+ break;
+ case 'h':
+ print_usage(&config->dyn_defs);
+ return PRS_TERM;
+ case '?':
+ default:
+ print_usage(&config->dyn_defs);
+ return PRS_NOK;
+ }
+ }
+
+ return check_options(config);
+}
+
+static inline void save_alloc_stats(odp_time_t t1, odp_time_t t2, uint32_t num_alloc,
+ uint64_t round, uint8_t pattern, stats_t *stats)
+{
+ const uint64_t tm_diff = odp_time_diff_ns(t2, t1);
+
+ stats->alloc_tm += tm_diff;
+ stats->alloc_cnt += num_alloc;
+ ++stats->alloc_b_cnt;
+
+ if (tm_diff > stats->max_alloc_tm) {
+ stats->max_alloc_tm = tm_diff;
+ stats->max_alloc_rnd = round;
+ stats->max_alloc_pt = pattern;
+ }
+
+ if (tm_diff < stats->min_alloc_tm) {
+ stats->min_alloc_tm = tm_diff;
+ stats->min_alloc_rnd = round;
+ stats->min_alloc_pt = pattern;
+ }
+}
+
+static inline void write_to_uarea(uint8_t *data, uint32_t size)
+{
+ memset(data, UA_DATA, size);
+}
+
+static inline void save_uarea_stats(odp_time_t t1, odp_time_t t2, uint64_t round, uint8_t pattern,
+ stats_t *stats)
+{
+ const uint64_t tm_diff = odp_time_diff_ns(t2, t1);
+
+ stats->uarea_tm += tm_diff;
+
+ if (tm_diff > stats->max_uarea_tm) {
+ stats->max_uarea_tm = tm_diff;
+ stats->max_uarea_rnd = round;
+ stats->max_uarea_pt = pattern;
+ }
+
+ if (tm_diff < stats->min_uarea_tm) {
+ stats->min_uarea_tm = tm_diff;
+ stats->min_uarea_rnd = round;
+ stats->min_uarea_pt = pattern;
+ }
+}
+
+static inline void save_free_stats(odp_time_t t1, odp_time_t t2, uint64_t round, uint8_t pattern,
+ stats_t *stats)
+{
+ const uint64_t tm_diff = odp_time_diff_ns(t2, t1);
+
+ stats->free_tm += tm_diff;
+ ++stats->free_b_cnt;
+
+ if (tm_diff > stats->max_free_tm) {
+ stats->max_free_tm = tm_diff;
+ stats->max_free_rnd = round;
+ stats->max_free_pt = pattern;
+ }
+
+ if (tm_diff < stats->min_free_tm) {
+ stats->min_free_tm = tm_diff;
+ stats->min_free_rnd = round;
+ stats->min_free_pt = pattern;
+ }
+
+ stats->max_free_tm = ODPH_MAX(tm_diff, stats->max_free_tm);
+ stats->min_free_tm = ODPH_MIN(tm_diff, stats->min_free_tm);
+}
+
+static uint32_t allocate_buffers(worker_config_t *config, void *data, uint32_t idx, uint32_t num,
+ uint64_t round, uint8_t pattern, odp_bool_t is_saved)
+{
+ odp_time_t t1, t2;
+ odp_pool_t pool = config->pool;
+ uint32_t retries = MAX_RETRIES;
+ odp_buffer_t *bufs = &((odp_buffer_t *)data)[idx];
+ uint32_t num_alloc, num_tot = 0U;
+ int ret;
+ stats_t *stats = &config->stats;
+
+ while (retries-- > 0U && num_tot < num) {
+ num_alloc = num - num_tot;
+ t1 = odp_time_local_strict();
+ ret = odp_buffer_alloc_multi(pool, &bufs[num_tot], num_alloc);
+ t2 = odp_time_local_strict();
+
+ if (odp_unlikely(ret < 0)) {
+ ++stats->alloc_errs;
+ break;
+ }
+
+ if (odp_unlikely((uint32_t)ret < num_alloc))
+ ++stats->reallocs;
+
+ num_tot += ret;
+
+ if (odp_likely(is_saved))
+ save_alloc_stats(t1, t2, ret, round, pattern, stats);
+ }
+
+ if (config->uarea_size > 0U) {
+ t1 = odp_time_local_strict();
+
+ for (uint32_t i = 0U; i < num_tot; ++i)
+ write_to_uarea(odp_buffer_user_area(bufs[i]), config->uarea_size);
+
+ t2 = odp_time_local_strict();
+
+ if (odp_likely(is_saved))
+ save_uarea_stats(t1, t2, round, pattern, stats);
+ }
+
+ return num_tot;
+}
+
+static void free_buffers(void *data, uint32_t idx, uint32_t num, stats_t *stats, uint64_t round,
+ uint8_t pattern, odp_bool_t is_saved)
+{
+ odp_time_t t1, t2;
+ odp_buffer_t *bufs = &((odp_buffer_t *)data)[idx];
+
+ t1 = odp_time_local_strict();
+ odp_buffer_free_multi(bufs, num);
+ t2 = odp_time_local_strict();
+
+ if (odp_likely(is_saved))
+ save_free_stats(t1, t2, round, pattern, stats);
+}
+
+static uint32_t allocate_packets(worker_config_t *config, void *data, uint32_t idx, uint32_t num,
+ uint64_t round, uint8_t pattern, odp_bool_t is_saved)
+{
+ odp_time_t t1, t2;
+ odp_pool_t pool = config->pool;
+ uint32_t retries = MAX_RETRIES, data_size = config->data_size;
+ odp_packet_t *pkts = &((odp_packet_t *)data)[idx];
+ uint32_t num_alloc, num_tot = 0U;
+ int ret;
+ stats_t *stats = &config->stats;
+
+ while (retries-- > 0U && num_tot < num) {
+ num_alloc = num - num_tot;
+ t1 = odp_time_local_strict();
+ ret = odp_packet_alloc_multi(pool, data_size, &pkts[num_tot], num_alloc);
+ t2 = odp_time_local_strict();
+
+ if (odp_unlikely(ret < 0)) {
+ ++stats->alloc_errs;
+ break;
+ }
+
+ if (odp_unlikely((uint32_t)ret < num_alloc))
+ ++stats->reallocs;
+
+ num_tot += ret;
+
+ if (odp_likely(is_saved))
+ save_alloc_stats(t1, t2, ret, round, pattern, stats);
+ }
+
+ if (config->uarea_size > 0U) {
+ t1 = odp_time_local_strict();
+
+ for (uint32_t i = 0U; i < num_tot; ++i)
+ write_to_uarea(odp_packet_user_area(pkts[i]), config->uarea_size);
+
+ t2 = odp_time_local_strict();
+
+ if (odp_likely(is_saved))
+ save_uarea_stats(t1, t2, round, pattern, stats);
+ }
+
+ return num_tot;
+}
+
+static void free_packets(void *data, uint32_t idx, uint32_t num, stats_t *stats, uint64_t round,
+ uint8_t pattern, odp_bool_t is_saved)
+{
+ odp_time_t t1, t2;
+ odp_packet_t *pkts = &((odp_packet_t *)data)[idx];
+
+ t1 = odp_time_local_strict();
+ odp_packet_free_multi(pkts, num);
+ t2 = odp_time_local_strict();
+
+ if (odp_likely(is_saved))
+ save_free_stats(t1, t2, round, pattern, stats);
+}
+
+static uint32_t allocate_timeouts(worker_config_t *config, void *data, uint32_t idx, uint32_t num,
+ uint64_t round, uint8_t pattern, odp_bool_t is_saved)
+{
+ odp_time_t t1, t2;
+ odp_pool_t pool = config->pool;
+ uint32_t retries = MAX_RETRIES;
+ odp_timeout_t *tmos = &((odp_timeout_t *)data)[idx];
+ uint32_t num_alloc, num_tot = 0U;
+ int ret;
+ stats_t *stats = &config->stats;
+
+ while (retries-- > 0U && num_tot < num) {
+ num_alloc = num - num_tot;
+ t1 = odp_time_local_strict();
+ ret = odp_timeout_alloc_multi(pool, &tmos[num_tot], num_alloc);
+ t2 = odp_time_local_strict();
+
+ if (odp_unlikely(ret < 0)) {
+ ++stats->alloc_errs;
+ break;
+ }
+
+ if (odp_unlikely((uint32_t)ret < num_alloc))
+ ++stats->reallocs;
+
+ num_tot += ret;
+
+ if (odp_likely(is_saved))
+ save_alloc_stats(t1, t2, ret, round, pattern, stats);
+ }
+
+ if (config->uarea_size > 0U) {
+ t1 = odp_time_local_strict();
+
+ for (uint32_t i = 0U; i < num_tot; ++i)
+ write_to_uarea(odp_timeout_user_area(tmos[i]), config->uarea_size);
+
+ t2 = odp_time_local_strict();
+
+ if (odp_likely(is_saved))
+ save_uarea_stats(t1, t2, round, pattern, stats);
+ }
+
+ return num_tot;
+}
+
+static void free_timeouts(void *data, uint32_t idx, uint32_t num, stats_t *stats, uint64_t round,
+ uint8_t pattern, odp_bool_t is_saved)
+{
+ odp_time_t t1, t2;
+ odp_timeout_t *tmos = &((odp_timeout_t *)data)[idx];
+
+ t1 = odp_time_local_strict();
+ odp_timeout_free_multi(tmos, num);
+ t2 = odp_time_local_strict();
+
+ if (odp_likely(is_saved))
+ save_free_stats(t1, t2, round, pattern, stats);
+}
+
+static uint32_t allocate_vectors(worker_config_t *config, void *data, uint32_t idx, uint32_t num,
+ uint64_t round, uint8_t pattern, odp_bool_t is_saved)
+{
+ odp_time_t t1, t2;
+ odp_pool_t pool = config->pool;
+ uint32_t num_tot = 0U;
+ odp_packet_vector_t *vecs = &((odp_packet_vector_t *)data)[idx], vec;
+ stats_t *stats = &config->stats;
+
+ t1 = odp_time_local_strict();
+
+ for (uint32_t i = 0U; i < num; ++i) {
+ vec = odp_packet_vector_alloc(pool);
+
+ if (odp_unlikely(vec == ODP_PACKET_VECTOR_INVALID))
+ break;
+
+ vecs[num_tot++] = vec;
+ }
+
+ t2 = odp_time_local_strict();
+
+ if (odp_unlikely(num_tot == 0))
+ ++stats->alloc_errs;
+ else if (odp_likely(is_saved))
+ save_alloc_stats(t1, t2, num_tot, round, pattern, stats);
+
+ if (config->uarea_size > 0U) {
+ t1 = odp_time_local_strict();
+
+ for (uint32_t i = 0U; i < num_tot; ++i)
+ write_to_uarea(odp_packet_vector_user_area(vecs[i]), config->uarea_size);
+
+ t2 = odp_time_local_strict();
+
+ if (odp_likely(is_saved))
+ save_uarea_stats(t1, t2, round, pattern, stats);
+ }
+
+ return num_tot;
+}
+
+static void free_vectors(void *data, uint32_t idx, uint32_t num, stats_t *stats, uint64_t round,
+ uint8_t pattern, odp_bool_t is_saved)
+{
+ odp_time_t t1, t2;
+ odp_packet_vector_t *vecs = &((odp_packet_vector_t *)data)[idx];
+
+ t1 = odp_time_local_strict();
+
+ for (uint32_t i = 0U; i < num; ++i)
+ odp_packet_vector_free(vecs[i]);
+
+ t2 = odp_time_local_strict();
+
+ if (odp_likely(is_saved))
+ save_free_stats(t1, t2, round, pattern, stats);
+}
+
+static odp_pool_t create_pool(const char *name, const odp_pool_param_t *params, uint8_t policy)
+{
+ static odp_pool_t pool = ODP_POOL_INVALID;
+
+ if (policy == SINGLE && pool != ODP_POOL_INVALID)
+ return pool;
+
+ pool = odp_pool_create(name, params);
+
+ return pool;
+}
+
+static odp_bool_t setup_worker_config(prog_config_t *config)
+{
+ odp_pool_param_t param;
+ odp_pool_t pool;
+ worker_config_t *worker;
+ odp_shm_t shm;
+ void *data;
+
+ odp_pool_param_init(&param);
+
+ if (config->type == BUFFER) {
+ param.type = ODP_POOL_BUFFER;
+ param.buf.num = config->num_evs;
+ param.buf.size = config->data_size;
+ param.buf.uarea_size = config->uarea_size;
+ param.buf.cache_size = config->cache_size;
+ config->alloc_fn = allocate_buffers;
+ config->free_fn = free_buffers;
+ } else if (config->type == PACKET) {
+ param.type = ODP_POOL_PACKET;
+ param.pkt.num = config->num_evs;
+ param.pkt.len = config->data_size;
+ param.pkt.seg_len = config->seg_len;
+ param.pkt.uarea_size = config->uarea_size;
+ param.pkt.cache_size = config->cache_size;
+ config->alloc_fn = allocate_packets;
+ config->free_fn = free_packets;
+ } else if (config->type == TMO) {
+ param.type = ODP_POOL_TIMEOUT;
+ param.tmo.num = config->num_evs;
+ param.tmo.uarea_size = config->uarea_size;
+ param.tmo.cache_size = config->cache_size;
+ config->alloc_fn = allocate_timeouts;
+ config->free_fn = free_timeouts;
+ } else {
+ param.type = ODP_POOL_VECTOR;
+ param.vector.num = config->num_evs;
+ param.vector.max_size = config->data_size;
+ param.vector.uarea_size = config->uarea_size;
+ param.vector.cache_size = config->cache_size;
+ config->alloc_fn = allocate_vectors;
+ config->free_fn = free_vectors;
+ }
+
+ for (uint32_t i = 0U; i < config->num_workers; ++i) {
+ pool = create_pool(PROG_NAME "_pool", &param, config->policy);
+
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error creating worker pool\n");
+ return false;
+ }
+
+ shm = odp_shm_reserve(PROG_NAME "_shm",
+ config->handle_size * config->num_data_elems,
+ ODP_CACHE_LINE_SIZE, 0U);
+
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Error creating worker SHM\n");
+ return false;
+ }
+
+ data = odp_shm_addr(shm);
+
+ if (data == NULL) {
+ ODPH_ERR("Error resolving worker SHM\n");
+ return false;
+ }
+
+ worker = &config->worker_config[i];
+ worker->pool = pool;
+ worker->data = data;
+ worker->prog_config = config;
+ worker->shm = shm;
+ worker->data_size = config->data_size;
+ worker->uarea_size = config->uarea_size;
+ }
+
+ return true;
+}
+
+static int run_test(void *args)
+{
+ worker_config_t *config = args;
+ odp_time_t t1, t2;
+ uint32_t head_idx, cur_idx, num_ignore = config->prog_config->num_ignore, val, num_alloc,
+ idx;
+ odp_bool_t is_saved;
+ const uint8_t num_elems = config->prog_config->num_elems;
+ const alloc_elem_t *elems = config->prog_config->alloc_elems, *elem;
+ uint8_t op;
+ void *data = config->data;
+ const alloc_fn_t alloc_fn = config->prog_config->alloc_fn;
+ stats_t *stats = &config->stats;
+ const free_fn_t free_fn = config->prog_config->free_fn;
+
+ odp_barrier_wait(&config->prog_config->init_barrier);
+ t1 = odp_time_local_strict();
+
+ for (uint32_t i = 0U; i < config->prog_config->num_rounds; ++i) {
+ head_idx = 0U;
+ cur_idx = head_idx;
+ is_saved = (num_ignore > 0U ? num_ignore-- : num_ignore) == 0U;
+
+ for (uint8_t j = 0U; j < num_elems; ++j) {
+ elem = &elems[j];
+ val = elem->val;
+ op = elem->op;
+
+ if (op == ALLOC) {
+ num_alloc = alloc_fn(config, data, cur_idx, val, i, j, is_saved);
+
+ if (odp_unlikely(num_alloc < val))
+ ++stats->pattern_errs;
+
+ cur_idx += num_alloc;
+ } else if (op == FREE) {
+ /* Due to potential pattern errors, there might not be expected
+ * amount of freeable events. */
+ val = ODPH_MIN(val, cur_idx - head_idx);
+
+ if (elem->opt == TOP) {
+ idx = head_idx;
+ head_idx += val;
+ } else {
+ cur_idx -= val;
+ idx = cur_idx;
+ }
+
+ free_fn(data, idx, val, stats, i, j, is_saved);
+ } else {
+ odp_time_wait_ns(val);
+ }
+ }
+ }
+
+ t2 = odp_time_local_strict();
+ stats->tot_tm = odp_time_diff_ns(t2, t1);
+ odp_barrier_wait(&config->prog_config->term_barrier);
+
+ return 0;
+}
+
+static odp_bool_t setup_workers(prog_config_t *config)
+{
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_params[config->num_workers], *thr_param;
+
+ odp_barrier_init(&config->init_barrier, config->num_workers + 1);
+ odp_barrier_init(&config->term_barrier, config->num_workers + 1);
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = config->odp_instance;
+ thr_common.cpumask = &config->worker_mask;
+
+ for (uint32_t i = 0; i < config->num_workers; ++i) {
+ thr_param = &thr_params[i];
+ odph_thread_param_init(thr_param);
+ thr_param->start = run_test;
+ thr_param->thr_type = ODP_THREAD_WORKER;
+ thr_param->arg = &config->worker_config[i];
+ }
+
+ if ((uint32_t)odph_thread_create(config->thread_tbl, &thr_common, thr_params,
+ config->num_workers) != config->num_workers) {
+ ODPH_ERR("Error configuring worker threads\n");
+ return false;
+ }
+
+ odp_barrier_wait(&config->init_barrier);
+
+ return true;
+}
+
+static odp_bool_t setup_test(prog_config_t *config)
+{
+ return setup_worker_config(config) && setup_workers(config);
+}
+
+static void stop_test(prog_config_t *config)
+{
+ odp_barrier_wait(&config->term_barrier);
+ (void)odph_thread_join(config->thread_tbl, config->num_workers);
+}
+
+static void print_stats(const prog_config_t *config)
+{
+ const alloc_elem_t *elem;
+ const stats_t *stats;
+ uint64_t ev_rate, ave_b_alloc_tm, b_alloc_min, b_alloc_max, ave_b_free_tm, b_free_min,
+ b_free_max, ave_alloc_tm, ave_free_tm, ave_ua_b_tm, b_ua_min, b_ua_max, ave_ua_tm,
+ tot_b_alloc_tm = 0U, tot_b_free_tm = 0U, tot_alloc_tm = 0U, tot_free_tm = 0U,
+ tot_alloc_min = 0U, tot_alloc_max = 0U, tot_free_min = 0U, tot_free_max = 0U,
+ tot_b_ua_tm = 0U, tot_ua_tm = 0U, tot_ua_min = 0U, tot_ua_max = 0U;
+
+ printf("\n==================\n\n"
+ "Pool latency test done\n\n"
+ " type: %s\n"
+ " event count: %u\n", config->type == BUFFER ? "buffer" :
+ config->type == PACKET ? "packet" : config->type == TMO ? "timeout" : "vector",
+ config->num_evs);
+
+ if (config->type != TMO)
+ printf(" %s %u\n", config->type != VECTOR ? "data size: " : "vector size:",
+ config->data_size);
+
+ printf(" pool policy: %s\n"
+ " round count: %u\n"
+ " ignore count: %u\n"
+ " cache size: %" PRIi64 "\n"
+ " user area: %u (B)\n"
+ " burst pattern:\n", config->policy == SINGLE ? "shared" : "per-worker",
+ config->num_rounds, config->num_ignore, config->cache_size, config->uarea_size);
+
+ for (uint8_t i = 0U; i < config->num_elems; ++i) {
+ elem = &config->alloc_elems[i];
+ printf(" %s %u%s\n", elem->op == ALLOC ? "allocate:" :
+ elem->op == FREE && elem->opt == TOP ? "free (t):" :
+ elem->op == FREE && elem->opt == BOTTOM ? "free (b):" :
+ "delay: ", elem->val, elem->op == DELAY ? " (ns)" : "");
+ }
+
+ printf("\n");
+
+ for (uint32_t i = 0U; i < config->num_workers; ++i) {
+ stats = &config->worker_config[i].stats;
+ ev_rate = stats->tot_tm > 0U ?
+ (double)stats->alloc_cnt / stats->tot_tm * ODP_TIME_SEC_IN_NS : 0U;
+ ave_b_alloc_tm = stats->alloc_b_cnt > 0U ?
+ stats->alloc_tm / stats->alloc_b_cnt : 0U;
+ b_alloc_min = ave_b_alloc_tm > 0U ? stats->min_alloc_tm : 0U;
+ b_alloc_max = ave_b_alloc_tm > 0U ? stats->max_alloc_tm : 0U;
+ ave_b_free_tm = stats->free_b_cnt > 0U ?
+ stats->free_tm / stats->free_b_cnt : 0U;
+ b_free_min = ave_b_free_tm > 0U ? stats->min_free_tm : 0U;
+ b_free_max = ave_b_free_tm > 0U ? stats->max_free_tm : 0U;
+ ave_alloc_tm = stats->alloc_cnt > 0U ? stats->alloc_tm / stats->alloc_cnt : 0U;
+ ave_free_tm = stats->alloc_cnt > 0U ? stats->free_tm / stats->alloc_cnt : 0U;
+
+ printf(" worker %d:\n"
+ " significant events allocated/freed: %" PRIu64 "\n"
+ " allocation retries: %" PRIu64 "\n"
+ " allocation errors: %" PRIu64 "\n"
+ " pattern errors: %" PRIu64 "\n"
+ " run time: %" PRIu64 " (ns)\n"
+ " event rate %" PRIu64 " (evs/s)\n"
+ " average latency breakdown (ns):\n"
+ " per allocation burst: %" PRIu64 " (min: %" PRIu64 " (round: %"
+ PRIu64 ", pattern: %u), max: %" PRIu64 " (round: %" PRIu64 ", pattern: %u))"
+ "\n"
+ " per allocation: %" PRIu64 "\n"
+ " per free burst: %" PRIu64 " (min: %" PRIu64 " (round: %"
+ PRIu64 ", pattern: %u), max: %" PRIu64 " (round: %" PRIu64 ", pattern: %u))"
+ "\n"
+ " per free: %" PRIu64 "\n", i, stats->alloc_cnt,
+ stats->reallocs, stats->alloc_errs, stats->pattern_errs, stats->tot_tm,
+ ev_rate, ave_b_alloc_tm, b_alloc_min, stats->min_alloc_rnd,
+ stats->min_alloc_pt, b_alloc_max, stats->max_alloc_rnd, stats->max_alloc_pt,
+ ave_alloc_tm, ave_b_free_tm, b_free_min, stats->min_free_rnd,
+ stats->min_free_pt, b_free_max, stats->max_free_rnd, stats->max_free_pt,
+ ave_free_tm);
+ tot_b_alloc_tm += ave_b_alloc_tm;
+ tot_b_free_tm += ave_b_free_tm;
+ tot_alloc_tm += ave_alloc_tm;
+ tot_free_tm += ave_free_tm;
+ tot_alloc_min += b_alloc_min;
+ tot_alloc_max += b_alloc_max;
+ tot_free_min += b_free_min;
+ tot_free_max += b_free_max;
+
+ if (config->uarea_size > 0U) {
+ ave_ua_b_tm = stats->alloc_b_cnt > 0U ?
+ stats->uarea_tm / stats->alloc_b_cnt : 0U;
+ ave_ua_tm = stats->alloc_cnt > 0U ?
+ stats->uarea_tm / stats->alloc_cnt : 0U;
+ b_ua_min = ave_ua_b_tm > 0U ? stats->min_uarea_tm : 0U;
+ b_ua_max = ave_ua_b_tm > 0U ? stats->max_uarea_tm : 0U;
+ printf(" per ua write burst: %" PRIu64 " (min: %" PRIu64 " ("
+ "round: %" PRIu64 ", pattern: %u), max: %" PRIu64 " (round: %"
+ PRIu64 ", pattern: %u))\n"
+ " per ua write: %" PRIu64 "\n", ave_ua_b_tm,
+ b_ua_min, stats->min_uarea_rnd, stats->min_uarea_pt, b_ua_max,
+ stats->max_uarea_rnd, stats->max_uarea_pt, ave_ua_tm);
+ tot_b_ua_tm += ave_ua_b_tm;
+ tot_ua_tm += ave_ua_tm;
+ tot_ua_min += b_ua_min;
+ tot_ua_max += b_ua_max;
+ }
+
+ printf("\n");
+ }
+
+ printf(" total (ns):\n"
+ " per allocation burst: %" PRIu64 " (min: %" PRIu64 ", max: %" PRIu64 ")\n"
+ " per allocation: %" PRIu64 "\n"
+ " per free burst: %" PRIu64 " (min: %" PRIu64 ", max: %" PRIu64 ")\n"
+ " per free: %" PRIu64 "\n",
+ tot_b_alloc_tm / config->num_workers, tot_alloc_min / config->num_workers,
+ tot_alloc_max / config->num_workers, tot_alloc_tm / config->num_workers,
+ tot_b_free_tm / config->num_workers, tot_free_min / config->num_workers,
+ tot_free_max / config->num_workers, tot_free_tm / config->num_workers);
+
+ if (config->uarea_size > 0U) {
+ printf(" per ua write burst: %" PRIu64 " (min: %" PRIu64 ", max: %"
+ PRIu64 ")\n"
+ " per ua write: %" PRIu64 "\n",
+ tot_b_ua_tm / config->num_workers, tot_ua_min / config->num_workers,
+ tot_ua_max / config->num_workers, tot_ua_tm / config->num_workers);
+ }
+
+ printf("\n==================\n");
+}
+
+static void destroy_pool(odp_pool_t pool, uint8_t policy)
+{
+ static odp_bool_t is_destroyed;
+
+ if (policy == SINGLE && is_destroyed)
+ return;
+
+ (void)odp_pool_destroy(pool);
+ is_destroyed = true;
+}
+
+static void teardown(const prog_config_t *config)
+{
+ const worker_config_t *worker;
+
+ for (uint32_t i = 0U; i < config->num_workers; ++i) {
+ worker = &config->worker_config[i];
+
+ if (worker->pool != ODP_POOL_INVALID)
+ destroy_pool(worker->pool, config->policy);
+
+ if (worker->shm != ODP_SHM_INVALID)
+ (void)odp_shm_free(worker->shm);
+ }
+}
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t odph_opts;
+ odp_init_t init_param;
+ odp_instance_t odp_instance;
+ odp_shm_t shm_cfg = ODP_SHM_INVALID;
+ int ret = EXIT_SUCCESS;
+ parse_result_t parse_res;
+
+ argc = odph_parse_options(argc, argv);
+
+ if (odph_options(&odph_opts) == -1) {
+ ODPH_ERR("Error while reading ODP helper options, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = odph_opts.mem_model;
+
+ if (odp_init_global(&odp_instance, &init_param, NULL)) {
+ ODPH_ERR("ODP global init failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_init_local(odp_instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("ODP local init failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ shm_cfg = odp_shm_reserve(PROG_NAME "_cfg", sizeof(prog_config_t), ODP_CACHE_LINE_SIZE,
+ 0U);
+
+ if (shm_cfg == ODP_SHM_INVALID) {
+ ODPH_ERR("Error reserving shared memory\n");
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ prog_conf = odp_shm_addr(shm_cfg);
+
+ if (prog_conf == NULL) {
+ ODPH_ERR("Error resolving shared memory address\n");
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ parse_res = parse_options(argc, argv, prog_conf);
+
+ if (parse_res == PRS_NOK) {
+ ret = EXIT_FAILURE;
+ goto out;
+ }
+
+ if (parse_res == PRS_TERM) {
+ ret = EXIT_SUCCESS;
+ goto out;
+ }
+
+ prog_conf->odp_instance = odp_instance;
+
+ if (!setup_test(prog_conf)) {
+ ret = EXIT_FAILURE;
+ goto out_test;
+ }
+
+ stop_test(prog_conf);
+ print_stats(prog_conf);
+
+out_test:
+ teardown(prog_conf);
+
+out:
+ if (shm_cfg != ODP_SHM_INVALID)
+ (void)odp_shm_free(shm_cfg);
+
+ if (odp_term_local()) {
+ ODPH_ERR("ODP local terminate failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(odp_instance)) {
+ ODPH_ERR("ODP global terminate failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return ret;
+}
diff --git a/test/performance/odp_pool_perf.c b/test/performance/odp_pool_perf.c
new file mode 100644
index 000000000..43a39a21e
--- /dev/null
+++ b/test/performance/odp_pool_perf.c
@@ -0,0 +1,750 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_pool_perf.c
+ *
+ * Performance test application for pool APIs
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define STAT_AVAILABLE 0x1
+#define STAT_CACHE 0x2
+#define STAT_THR_CACHE 0x4
+#define STAT_ALLOC_OPS 0x10
+#define STAT_FREE_OPS 0x20
+#define STAT_TOTAL_OPS 0x40
+
+typedef struct test_options_t {
+ uint32_t num_cpu;
+ uint32_t num_event;
+ uint32_t num_round;
+ uint32_t max_burst;
+ uint32_t num_burst;
+ uint32_t data_size;
+ uint32_t cache_size;
+ uint32_t stats_mode;
+ int pool_type;
+
+} test_options_t;
+
+typedef struct test_stat_t {
+ uint64_t rounds;
+ uint64_t frees;
+ uint64_t events;
+ uint64_t nsec;
+ uint64_t cycles;
+
+} test_stat_t;
+
+typedef struct test_global_t {
+ test_options_t test_options;
+
+ odp_barrier_t barrier;
+ odp_pool_t pool;
+ odp_cpumask_t cpumask;
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ test_stat_t stat[ODP_THREAD_COUNT_MAX];
+
+} test_global_t;
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Pool performance test\n"
+ "\n"
+ "Usage: odp_pool_perf [options]\n"
+ "\n"
+ " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs. Default 1.\n"
+ " -e, --num_event Number of events\n"
+ " -r, --num_round Number of rounds\n"
+ " -b, --burst Maximum number of events per operation\n"
+ " -n, --num_burst Number of bursts allocated/freed back-to-back\n"
+ " -s, --data_size Data size in bytes\n"
+ " -S, --stats_mode Pool statistics usage. Enable counters with combination of these flags:\n"
+ " 0: no pool statistics (default)\n"
+ " 0x1: available\n"
+ " 0x2: cache_available\n"
+ " 0x4: thread_cache_available\n"
+ " 0x10: alloc_ops\n"
+ " 0x20: free_ops\n"
+ " 0x40: total_ops\n"
+ " -t, --pool_type 0: Buffer pool (default)\n"
+ " 1: Packet pool\n"
+ " -C, --cache_size Pool cache size (per thread)\n"
+ " -h, --help This help\n"
+ "\n");
+}
+
+static int parse_options(int argc, char *argv[], test_options_t *test_options)
+{
+ int opt;
+ int long_index;
+ int ret = 0;
+
+ static const struct option longopts[] = {
+ {"num_cpu", required_argument, NULL, 'c'},
+ {"num_event", required_argument, NULL, 'e'},
+ {"num_round", required_argument, NULL, 'r'},
+ {"burst", required_argument, NULL, 'b'},
+ {"num_burst", required_argument, NULL, 'n'},
+ {"data_size", required_argument, NULL, 's'},
+ {"stats_mode", required_argument, NULL, 'S'},
+ {"pool_type", required_argument, NULL, 't'},
+ {"cache_size", required_argument, NULL, 'C'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "+c:e:r:b:n:s:S:t:C:h";
+
+ test_options->num_cpu = 1;
+ test_options->num_event = 1000;
+ test_options->num_round = 100000;
+ test_options->max_burst = 100;
+ test_options->num_burst = 1;
+ test_options->data_size = 64;
+ test_options->stats_mode = 0;
+ test_options->pool_type = 0;
+ test_options->cache_size = UINT32_MAX;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'c':
+ test_options->num_cpu = atoi(optarg);
+ break;
+ case 'e':
+ test_options->num_event = atoi(optarg);
+ break;
+ case 'r':
+ test_options->num_round = atoi(optarg);
+ break;
+ case 'b':
+ test_options->max_burst = atoi(optarg);
+ break;
+ case 'n':
+ test_options->num_burst = atoi(optarg);
+ break;
+ case 's':
+ test_options->data_size = atoi(optarg);
+ break;
+ case 'S':
+ test_options->stats_mode = strtoul(optarg, NULL, 0);
+ break;
+ case 't':
+ test_options->pool_type = atoi(optarg);
+ break;
+ case 'C':
+ test_options->cache_size = atoi(optarg);
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (test_options->num_burst * test_options->max_burst >
+ test_options->num_event) {
+ printf("Not enough events (%u) for the burst configuration.\n"
+ "Use smaller burst size (%u) or less bursts (%u)\n",
+ test_options->num_event, test_options->max_burst,
+ test_options->num_burst);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int set_num_cpu(test_global_t *global)
+{
+ int ret;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+
+ /* One thread used for the main thread */
+ if (num_cpu > ODP_THREAD_COUNT_MAX - 1) {
+ printf("Error: Too many workers. Maximum is %i.\n",
+ ODP_THREAD_COUNT_MAX - 1);
+ return -1;
+ }
+
+ ret = odp_cpumask_default_worker(&global->cpumask, num_cpu);
+
+ if (num_cpu && ret != num_cpu) {
+ printf("Error: Too many workers. Max supported %i.\n", ret);
+ return -1;
+ }
+
+ /* Zero: all available workers */
+ if (num_cpu == 0) {
+ num_cpu = ret;
+ test_options->num_cpu = num_cpu;
+ }
+
+ odp_barrier_init(&global->barrier, num_cpu);
+
+ return 0;
+}
+
+static int create_pool(test_global_t *global)
+{
+ odp_pool_capability_t pool_capa;
+ odp_pool_param_t pool_param;
+ odp_pool_t pool;
+ odp_pool_stats_opt_t stats, stats_capa;
+ uint32_t max_num, max_size, min_cache_size, max_cache_size;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_event = test_options->num_event;
+ uint32_t num_round = test_options->num_round;
+ uint32_t max_burst = test_options->max_burst;
+ uint32_t num_burst = test_options->num_burst;
+ uint32_t num_cpu = test_options->num_cpu;
+ uint32_t data_size = test_options->data_size;
+ uint32_t cache_size = test_options->cache_size;
+ uint32_t stats_mode = test_options->stats_mode;
+ int packet_pool = test_options->pool_type;
+
+ stats.all = 0;
+
+ odp_pool_param_init(&pool_param);
+
+ if (cache_size == UINT32_MAX)
+ cache_size = packet_pool ? pool_param.pkt.cache_size :
+ pool_param.buf.cache_size;
+
+ if (stats_mode & STAT_AVAILABLE)
+ stats.bit.available = 1;
+ if (stats_mode & STAT_CACHE)
+ stats.bit.cache_available = 1;
+ if (stats_mode & STAT_THR_CACHE)
+ stats.bit.thread_cache_available = 1;
+ if (stats_mode & STAT_ALLOC_OPS)
+ stats.bit.alloc_ops = 1;
+ if (stats_mode & STAT_FREE_OPS)
+ stats.bit.free_ops = 1;
+ if (stats_mode & STAT_TOTAL_OPS)
+ stats.bit.total_ops = 1;
+
+ printf("\nPool performance test\n");
+ printf(" num cpu %u\n", num_cpu);
+ printf(" num rounds %u\n", num_round);
+ printf(" num events %u\n", num_event);
+ printf(" max burst %u\n", max_burst);
+ printf(" num bursts %u\n", num_burst);
+ printf(" data size %u\n", data_size);
+ printf(" cache size %u\n", cache_size);
+ printf(" stats mode 0x%x\n", stats_mode);
+ printf(" pool type %s\n\n", packet_pool ? "packet" : "buffer");
+
+ if (odp_pool_capability(&pool_capa)) {
+ printf("Error: Pool capa failed.\n");
+ return -1;
+ }
+
+ if (packet_pool) {
+ max_num = pool_capa.pkt.max_num;
+ max_size = pool_capa.pkt.max_len;
+ max_cache_size = pool_capa.pkt.max_cache_size;
+ min_cache_size = pool_capa.pkt.min_cache_size;
+ stats_capa = pool_capa.pkt.stats;
+ } else {
+ max_num = pool_capa.buf.max_num;
+ max_size = pool_capa.buf.max_size;
+ max_cache_size = pool_capa.buf.max_cache_size;
+ min_cache_size = pool_capa.buf.min_cache_size;
+ stats_capa = pool_capa.buf.stats;
+ }
+
+ if ((stats_capa.all & stats.all) != stats.all) {
+ printf("Error: requested statistics not supported (0x%" PRIx64 " / 0x%" PRIx64 ")\n",
+ stats.all, stats_capa.all);
+ return -1;
+ }
+
+ if (cache_size < min_cache_size) {
+ printf("Error: min cache size supported %u\n", min_cache_size);
+ return -1;
+ }
+
+ if (cache_size > max_cache_size) {
+ printf("Error: max cache size supported %u\n", max_cache_size);
+ return -1;
+ }
+
+ if (max_num && num_event > max_num) {
+ printf("Error: max events supported %u\n", max_num);
+ return -1;
+ }
+
+ if (max_size && data_size > max_size) {
+ printf("Error: max data size supported %u\n", max_size);
+ return -1;
+ }
+
+ if (packet_pool) {
+ pool_param.type = ODP_POOL_PACKET;
+ pool_param.pkt.num = num_event;
+ pool_param.pkt.len = data_size;
+ pool_param.pkt.max_num = num_event;
+ pool_param.pkt.max_len = data_size;
+ pool_param.pkt.cache_size = cache_size;
+ } else {
+ pool_param.type = ODP_POOL_BUFFER;
+ pool_param.buf.num = num_event;
+ pool_param.buf.size = data_size;
+ pool_param.buf.cache_size = cache_size;
+ }
+
+ pool_param.stats.all = stats.all;
+
+ pool = odp_pool_create("pool perf", &pool_param);
+
+ if (pool == ODP_POOL_INVALID) {
+ printf("Error: Pool create failed.\n");
+ return -1;
+ }
+
+ global->pool = pool;
+
+ return 0;
+}
+
+static int test_buffer_pool(void *arg)
+{
+ int ret, thr;
+ uint32_t num, num_free, num_freed, i, rounds;
+ uint64_t c1, c2, cycles, nsec;
+ uint64_t events, frees;
+ odp_time_t t1, t2;
+ test_global_t *global = arg;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_round = test_options->num_round;
+ uint32_t max_burst = test_options->max_burst;
+ uint32_t num_burst = test_options->num_burst;
+ uint32_t max_num = num_burst * max_burst;
+ odp_pool_t pool = global->pool;
+ odp_buffer_t buf[max_num];
+
+ thr = odp_thread_id();
+
+ for (i = 0; i < max_num; i++)
+ buf[i] = ODP_BUFFER_INVALID;
+
+ events = 0;
+ frees = 0;
+ ret = 0;
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ t1 = odp_time_local();
+ c1 = odp_cpu_cycles();
+
+ for (rounds = 0; rounds < num_round; rounds++) {
+ num = 0;
+
+ for (i = 0; i < num_burst; i++) {
+ ret = odp_buffer_alloc_multi(pool, &buf[num],
+ max_burst);
+ if (odp_unlikely(ret < 0)) {
+ printf("Error: Alloc failed. Round %u\n",
+ rounds);
+ if (num)
+ odp_buffer_free_multi(buf, num);
+
+ return -1;
+ }
+
+ num += ret;
+ }
+
+ if (odp_unlikely(num == 0))
+ continue;
+
+ events += num;
+ num_freed = 0;
+
+ while (num_freed < num) {
+ num_free = num - num_freed;
+ if (num_free > max_burst)
+ num_free = max_burst;
+
+ odp_buffer_free_multi(&buf[num_freed], num_free);
+ frees++;
+ num_freed += num_free;
+ }
+ }
+
+ c2 = odp_cpu_cycles();
+ t2 = odp_time_local();
+
+ nsec = odp_time_diff_ns(t2, t1);
+ cycles = odp_cpu_cycles_diff(c2, c1);
+
+ /* Update stats*/
+ global->stat[thr].rounds = rounds;
+ global->stat[thr].frees = frees;
+ global->stat[thr].events = events;
+ global->stat[thr].nsec = nsec;
+ global->stat[thr].cycles = cycles;
+
+ return 0;
+}
+
+static int test_packet_pool(void *arg)
+{
+ int ret, thr;
+ uint32_t num, num_free, num_freed, i, rounds;
+ uint64_t c1, c2, cycles, nsec;
+ uint64_t events, frees;
+ odp_time_t t1, t2;
+ test_global_t *global = arg;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_round = test_options->num_round;
+ uint32_t max_burst = test_options->max_burst;
+ uint32_t num_burst = test_options->num_burst;
+ uint32_t max_num = num_burst * max_burst;
+ uint32_t data_size = test_options->data_size;
+ odp_pool_t pool = global->pool;
+ odp_packet_t pkt[max_num];
+
+ thr = odp_thread_id();
+
+ for (i = 0; i < max_num; i++)
+ pkt[i] = ODP_PACKET_INVALID;
+
+ events = 0;
+ frees = 0;
+ ret = 0;
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ t1 = odp_time_local();
+ c1 = odp_cpu_cycles();
+
+ for (rounds = 0; rounds < num_round; rounds++) {
+ num = 0;
+
+ for (i = 0; i < num_burst; i++) {
+ ret = odp_packet_alloc_multi(pool, data_size, &pkt[num],
+ max_burst);
+ if (odp_unlikely(ret < 0)) {
+ printf("Error: Alloc failed. Round %u\n",
+ rounds);
+
+ if (num)
+ odp_packet_free_multi(pkt, num);
+
+ return -1;
+ }
+
+ num += ret;
+ }
+
+ if (odp_unlikely(num == 0))
+ continue;
+
+ events += num;
+ num_freed = 0;
+
+ while (num_freed < num) {
+ num_free = num - num_freed;
+ if (num_free > max_burst)
+ num_free = max_burst;
+
+ odp_packet_free_multi(&pkt[num_freed], num_free);
+ frees++;
+ num_freed += num_free;
+ }
+ }
+
+ c2 = odp_cpu_cycles();
+ t2 = odp_time_local();
+
+ nsec = odp_time_diff_ns(t2, t1);
+ cycles = odp_cpu_cycles_diff(c2, c1);
+
+ /* Update stats*/
+ global->stat[thr].rounds = rounds;
+ global->stat[thr].frees = frees;
+ global->stat[thr].events = events;
+ global->stat[thr].nsec = nsec;
+ global->stat[thr].cycles = cycles;
+
+ return 0;
+}
+
+static int start_workers(test_global_t *global, odp_instance_t instance)
+{
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ int packet_pool = test_options->pool_type;
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &global->cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.arg = global;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ if (packet_pool)
+ thr_param.start = test_packet_pool;
+ else
+ thr_param.start = test_buffer_pool;
+
+ if (odph_thread_create(global->thread_tbl, &thr_common, &thr_param,
+ num_cpu) != num_cpu)
+ return -1;
+
+ return 0;
+}
+
+static void test_stats_perf(test_global_t *global)
+{
+ odp_pool_stats_t stats;
+ odp_time_t t1, t2;
+ uint64_t nsec;
+ int i;
+ int num_thr = global->test_options.num_cpu + 1; /* workers + main thread */
+ odp_pool_t pool = global->pool;
+ double nsec_ave = 0.0;
+ const int rounds = 1000;
+
+ if (num_thr > ODP_POOL_MAX_THREAD_STATS)
+ num_thr = ODP_POOL_MAX_THREAD_STATS;
+
+ memset(&stats, 0, sizeof(odp_pool_stats_t));
+ stats.thread.first = 0;
+ stats.thread.last = num_thr - 1;
+
+ t1 = odp_time_local_strict();
+
+ for (i = 0; i < rounds; i++) {
+ if (odp_pool_stats(pool, &stats)) {
+ printf("Error: Stats request failed on round %i\n", i);
+ break;
+ }
+ }
+
+ t2 = odp_time_local_strict();
+ nsec = odp_time_diff_ns(t2, t1);
+
+ if (i > 0)
+ nsec_ave = (double)nsec / i;
+
+ printf("Pool statistics:\n");
+ printf(" odp_pool_stats() calls %i\n", i);
+ printf(" ave call latency %.2f nsec\n", nsec_ave);
+ printf(" num threads %i\n", num_thr);
+ printf(" alloc_ops %" PRIu64 "\n", stats.alloc_ops);
+ printf(" free_ops %" PRIu64 "\n", stats.free_ops);
+ printf(" total_ops %" PRIu64 "\n", stats.total_ops);
+ printf(" available %" PRIu64 "\n", stats.available);
+ printf(" cache_available %" PRIu64 "\n", stats.cache_available);
+ for (i = 0; i < num_thr; i++) {
+ printf(" thr[%2i] cache_available %" PRIu64 "\n",
+ i, stats.thread.cache_available[i]);
+ }
+
+ printf("\n");
+}
+
+static void print_stat(test_global_t *global)
+{
+ int i, num;
+ double rounds_ave, allocs_ave, frees_ave;
+ double events_ave, nsec_ave, cycles_ave;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ uint32_t num_burst = test_options->num_burst;
+ uint64_t rounds_sum = 0;
+ uint64_t frees_sum = 0;
+ uint64_t events_sum = 0;
+ uint64_t nsec_sum = 0;
+ uint64_t cycles_sum = 0;
+
+ /* Averages */
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ rounds_sum += global->stat[i].rounds;
+ frees_sum += global->stat[i].frees;
+ events_sum += global->stat[i].events;
+ nsec_sum += global->stat[i].nsec;
+ cycles_sum += global->stat[i].cycles;
+ }
+
+ if (rounds_sum == 0) {
+ printf("No results.\n");
+ return;
+ }
+
+ rounds_ave = rounds_sum / num_cpu;
+ allocs_ave = (num_burst * rounds_sum) / num_cpu;
+ frees_ave = frees_sum / num_cpu;
+ events_ave = events_sum / num_cpu;
+ nsec_ave = nsec_sum / num_cpu;
+ cycles_ave = cycles_sum / num_cpu;
+ num = 0;
+
+ printf("RESULTS - per thread (Million events per sec):\n");
+ printf("----------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->stat[i].rounds) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%6.1f ", (1000.0 * global->stat[i].events) /
+ global->stat[i].nsec);
+ num++;
+ }
+ }
+ printf("\n\n");
+
+ printf("RESULTS - average over %i threads:\n", num_cpu);
+ printf("----------------------------------\n");
+ printf(" alloc calls: %.3f\n", allocs_ave);
+ printf(" free calls: %.3f\n", frees_ave);
+ printf(" duration: %.3f msec\n", nsec_ave / 1000000);
+ printf(" num cycles: %.3f M\n", cycles_ave / 1000000);
+ printf(" cycles per round: %.3f\n",
+ cycles_ave / rounds_ave);
+ printf(" cycles per event: %.3f\n",
+ cycles_ave / events_ave);
+ printf(" ave events allocated: %.3f\n",
+ events_ave / allocs_ave);
+ printf(" allocs per sec: %.3f M\n",
+ (1000.0 * allocs_ave) / nsec_ave);
+ printf(" frees per sec: %.3f M\n",
+ (1000.0 * frees_ave) / nsec_ave);
+ printf(" events per sec: %.3f M\n\n",
+ (1000.0 * events_ave) / nsec_ave);
+}
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t helper_options;
+ odp_instance_t instance;
+ odp_init_t init;
+ odp_shm_t shm;
+ test_global_t *global;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: Reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* List features not to be used */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.schedule = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ init.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ printf("Error: Global init failed.\n");
+ return -1;
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ printf("Error: Local init failed.\n");
+ return -1;
+ }
+
+ shm = odp_shm_reserve("pool_perf_global", sizeof(test_global_t), ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Error: Shared mem reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ global = odp_shm_addr(shm);
+ if (global == NULL) {
+ ODPH_ERR("Error: Shared mem alloc failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(global, 0, sizeof(test_global_t));
+ global->pool = ODP_POOL_INVALID;
+
+ if (parse_options(argc, argv, &global->test_options))
+ return -1;
+
+ odp_sys_info_print();
+
+ if (set_num_cpu(global))
+ return -1;
+
+ if (create_pool(global))
+ return -1;
+
+ /* Start workers */
+ start_workers(global, instance);
+
+ /* Wait workers to exit */
+ odph_thread_join(global->thread_tbl, global->test_options.num_cpu);
+
+ if (global->test_options.stats_mode)
+ test_stats_perf(global);
+
+ print_stat(global);
+
+ if (odp_pool_destroy(global->pool)) {
+ printf("Error: Pool destroy failed.\n");
+ return -1;
+ }
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Error: Shared mem free failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ printf("Error: term local failed.\n");
+ return -1;
+ }
+
+ if (odp_term_global(instance)) {
+ printf("Error: term global failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/test/performance/odp_queue_perf.c b/test/performance/odp_queue_perf.c
new file mode 100644
index 000000000..7d4612cb8
--- /dev/null
+++ b/test/performance/odp_queue_perf.c
@@ -0,0 +1,651 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_queue_perf.c
+ *
+ * Performance test application for queue APIs
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define MAX_QUEUES (32 * 1024)
+
+typedef struct test_options_t {
+ uint32_t num_queue;
+ uint32_t num_event;
+ uint32_t num_round;
+ uint32_t max_burst;
+ odp_nonblocking_t nonblock;
+ int single;
+ int num_cpu;
+
+} test_options_t;
+
+typedef struct test_stat_t {
+ uint64_t rounds;
+ uint64_t events;
+ uint64_t nsec;
+ uint64_t cycles;
+ uint64_t deq_retry;
+ uint64_t enq_retry;
+
+} test_stat_t;
+
+typedef struct test_global_t {
+ odp_barrier_t barrier;
+ test_options_t options;
+ odp_instance_t instance;
+ odp_shm_t shm;
+ odp_pool_t pool;
+ odp_queue_t queue[MAX_QUEUES];
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ test_stat_t stat[ODP_THREAD_COUNT_MAX];
+
+} test_global_t;
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Plain queue performance test\n"
+ "\n"
+ "Usage: odp_queue_perf [options]\n"
+ "\n"
+ " -c, --num_cpu Number of worker threads. Default: 1\n"
+ " -q, --num_queue Number of queues. Default: 1\n"
+ " -e, --num_event Number of events per queue. Default: 1\n"
+ " -b, --burst_size Maximum number of events per operation. Default: 1\n"
+ " -r, --num_round Number of rounds\n"
+ " -l, --lockfree Lockfree queues\n"
+ " -w, --waitfree Waitfree queues\n"
+ " -s, --single Single producer, single consumer\n"
+ " -h, --help This help\n"
+ "\n");
+}
+
+static int parse_options(int argc, char *argv[], test_options_t *test_options)
+{
+ int opt;
+ int long_index;
+ int ret = 0;
+
+ static const struct option longopts[] = {
+ {"num_cpu", required_argument, NULL, 'c'},
+ {"num_queue", required_argument, NULL, 'q'},
+ {"num_event", required_argument, NULL, 'e'},
+ {"burst_size", required_argument, NULL, 'b'},
+ {"num_round", required_argument, NULL, 'r'},
+ {"lockfree", no_argument, NULL, 'l'},
+ {"waitfree", no_argument, NULL, 'w'},
+ {"single", no_argument, NULL, 's'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "+c:q:e:b:r:lwsh";
+
+ test_options->num_cpu = 1;
+ test_options->num_queue = 1;
+ test_options->num_event = 1;
+ test_options->max_burst = 1;
+ test_options->num_round = 1000;
+ test_options->nonblock = ODP_BLOCKING;
+ test_options->single = 0;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'c':
+ test_options->num_cpu = atoi(optarg);
+ break;
+ case 'q':
+ test_options->num_queue = atoi(optarg);
+ break;
+ case 'e':
+ test_options->num_event = atoi(optarg);
+ break;
+ case 'b':
+ test_options->max_burst = atoi(optarg);
+ break;
+ case 'r':
+ test_options->num_round = atoi(optarg);
+ break;
+ case 'l':
+ test_options->nonblock = ODP_NONBLOCKING_LF;
+ break;
+ case 'w':
+ test_options->nonblock = ODP_NONBLOCKING_WF;
+ break;
+ case 's':
+ test_options->single = 1;
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (test_options->num_queue > MAX_QUEUES) {
+ printf("Too many queues %u. Test maximum %u.\n",
+ test_options->num_queue, MAX_QUEUES);
+ return -1;
+ }
+
+ return ret;
+}
+
+static int create_queues(test_global_t *global)
+{
+ odp_pool_capability_t pool_capa;
+ odp_queue_capability_t queue_capa;
+ odp_pool_param_t pool_param;
+ odp_queue_param_t queue_param;
+ odp_pool_t pool;
+ uint32_t i, j, max_size, max_num;
+ test_options_t *test_options = &global->options;
+ odp_nonblocking_t nonblock = test_options->nonblock;
+ uint32_t num_queue = test_options->num_queue;
+ uint32_t num_event = test_options->num_event;
+ uint32_t num_round = test_options->num_round;
+ uint32_t tot_event = num_queue * num_event;
+ int ret = 0;
+ odp_queue_t *queue = global->queue;
+ odp_event_t event[tot_event];
+
+ printf("\nTesting %s queues\n",
+ nonblock == ODP_BLOCKING ? "NORMAL" :
+ (nonblock == ODP_NONBLOCKING_LF ? "LOCKFREE" :
+ (nonblock == ODP_NONBLOCKING_WF ? "WAITFREE" : "???")));
+ printf(" num rounds %u\n", num_round);
+ printf(" num queues %u\n", num_queue);
+ printf(" num events per queue %u\n", num_event);
+ printf(" max burst size %u\n", test_options->max_burst);
+
+ for (i = 0; i < num_queue; i++)
+ queue[i] = ODP_QUEUE_INVALID;
+
+ for (i = 0; i < tot_event; i++)
+ event[i] = ODP_EVENT_INVALID;
+
+ if (odp_queue_capability(&queue_capa)) {
+ printf("Error: Queue capa failed.\n");
+ return -1;
+ }
+
+ if (odp_pool_capability(&pool_capa)) {
+ printf("Error: Pool capa failed.\n");
+ return -1;
+ }
+
+ if (nonblock == ODP_BLOCKING) {
+ if (num_queue > queue_capa.plain.max_num) {
+ printf("Max queues supported %u\n",
+ queue_capa.plain.max_num);
+ return -1;
+ }
+
+ max_size = queue_capa.plain.max_size;
+ if (max_size && num_event > max_size) {
+ printf("Max queue size supported %u\n", max_size);
+ return -1;
+ }
+ } else if (nonblock == ODP_NONBLOCKING_LF) {
+ if (queue_capa.plain.lockfree.max_num == 0) {
+ printf("Lockfree queues not supported\n");
+ return -1;
+ }
+
+ if (num_queue > queue_capa.plain.lockfree.max_num) {
+ printf("Max lockfree queues supported %u\n",
+ queue_capa.plain.lockfree.max_num);
+ return -1;
+ }
+
+ max_size = queue_capa.plain.lockfree.max_size;
+ if (max_size && num_event > max_size) {
+ printf("Max lockfree queue size supported %u\n",
+ max_size);
+ return -1;
+ }
+ } else if (nonblock == ODP_NONBLOCKING_WF) {
+ if (queue_capa.plain.waitfree.max_num == 0) {
+ printf("Waitfree queues not supported\n");
+ return -1;
+ }
+
+ if (num_queue > queue_capa.plain.waitfree.max_num) {
+ printf("Max waitfree queues supported %u\n",
+ queue_capa.plain.waitfree.max_num);
+ return -1;
+ }
+
+ max_size = queue_capa.plain.waitfree.max_size;
+ if (max_size && num_event > max_size) {
+ printf("Max waitfree queue size supported %u\n",
+ max_size);
+ return -1;
+ }
+ } else {
+ printf("Error: Bad queue blocking type\n");
+ return -1;
+ }
+
+ max_num = pool_capa.buf.max_num;
+
+ if (max_num && tot_event > max_num) {
+ printf("Error: max events supported %u\n", max_num);
+ return -1;
+ }
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_BUFFER;
+ pool_param.buf.num = tot_event;
+
+ pool = odp_pool_create("queue perf pool", &pool_param);
+
+ if (pool == ODP_POOL_INVALID) {
+ printf("Error: Pool create failed.\n");
+ return -1;
+ }
+
+ global->pool = pool;
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_PLAIN;
+ queue_param.nonblocking = nonblock;
+ queue_param.size = num_event;
+
+ if (test_options->single) {
+ queue_param.enq_mode = ODP_QUEUE_OP_MT_UNSAFE;
+ queue_param.deq_mode = ODP_QUEUE_OP_MT_UNSAFE;
+ }
+
+ for (i = 0; i < num_queue; i++) {
+ queue[i] = odp_queue_create(NULL, &queue_param);
+
+ if (queue[i] == ODP_QUEUE_INVALID) {
+ printf("Error: Queue create failed %u.\n", i);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < tot_event; i++) {
+ event[i] = odp_buffer_to_event(odp_buffer_alloc(pool));
+
+ if (event[i] == ODP_EVENT_INVALID) {
+ printf("Error: Event alloc failed %u.\n", i);
+ ret = -1;
+ goto free_events;
+ }
+ }
+
+ for (i = 0; i < num_queue; i++) {
+ for (j = 0; j < num_event; j++) {
+ uint32_t id = i * num_event + j;
+
+ if (odp_queue_enq(queue[i], event[id])) {
+ printf("Error: Queue enq failed %u/%u\n", i, j);
+ ret = -1;
+ goto free_events;
+ }
+
+ event[id] = ODP_EVENT_INVALID;
+ }
+ }
+
+free_events:
+ /* Free events that were not stored into queues */
+ for (i = 0; i < tot_event; i++) {
+ if (event[i] != ODP_EVENT_INVALID)
+ odp_event_free(event[i]);
+ }
+
+ return ret;
+}
+
+static int destroy_queues(test_global_t *global)
+{
+ odp_event_t ev;
+ uint32_t i, j;
+ int ret = 0;
+ test_options_t *test_options = &global->options;
+ uint32_t num_queue = test_options->num_queue;
+ uint32_t num_event = test_options->num_event;
+ odp_queue_t *queue = global->queue;
+ odp_pool_t pool = global->pool;
+
+ for (i = 0; i < num_queue; i++) {
+ if (queue[i] == ODP_QUEUE_INVALID) {
+ printf("Error: Invalid queue handle (i: %u).\n", i);
+ break;
+ }
+
+ for (j = 0; j < num_event; j++) {
+ ev = odp_queue_deq(queue[i]);
+
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ }
+
+ if (odp_queue_destroy(queue[i])) {
+ printf("Error: Queue destroy failed %u.\n", i);
+ ret = -1;
+ break;
+ }
+ }
+
+ if (pool != ODP_POOL_INVALID && odp_pool_destroy(pool)) {
+ printf("Error: Pool destroy failed.\n");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int run_test(void *arg)
+{
+ uint64_t c1, c2, cycles, nsec;
+ odp_time_t t1, t2;
+ uint32_t rounds;
+ int num_ev;
+ test_stat_t *stat;
+ test_global_t *global = arg;
+ test_options_t *test_options = &global->options;
+ odp_queue_t queue;
+ uint64_t num_deq_retry = 0;
+ uint64_t num_enq_retry = 0;
+ uint64_t events = 0;
+ uint32_t num_queue = test_options->num_queue;
+ uint32_t num_round = test_options->num_round;
+ int thr = odp_thread_id();
+ int ret = 0;
+ uint32_t i = 0;
+ uint32_t max_burst = test_options->max_burst;
+ odp_event_t ev[max_burst];
+
+ stat = &global->stat[thr];
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ t1 = odp_time_local();
+ c1 = odp_cpu_cycles();
+
+ for (rounds = 0; rounds < num_round; rounds++) {
+ int num_enq = 0;
+
+ do {
+ queue = global->queue[i++];
+
+ if (i == num_queue)
+ i = 0;
+
+ num_ev = odp_queue_deq_multi(queue, ev, max_burst);
+
+ if (odp_unlikely(num_ev < 0))
+ ODPH_ABORT("odp_queue_deq_multi() failed\n");
+
+ if (odp_unlikely(num_ev == 0))
+ num_deq_retry++;
+
+ } while (num_ev == 0);
+
+ while (num_enq < num_ev) {
+ int num = odp_queue_enq_multi(queue, &ev[num_enq], num_ev - num_enq);
+
+ if (odp_unlikely(num < 0))
+ ODPH_ABORT("odp_queue_enq_multi() failed\n");
+
+ num_enq += num;
+
+ if (odp_unlikely(num_enq != num_ev))
+ num_enq_retry++;
+ }
+ events += num_ev;
+ }
+
+ c2 = odp_cpu_cycles();
+ t2 = odp_time_local();
+
+ nsec = odp_time_diff_ns(t2, t1);
+ cycles = odp_cpu_cycles_diff(c2, c1);
+
+ stat->rounds = rounds;
+ stat->events = events;
+ stat->nsec = nsec;
+ stat->cycles = cycles;
+ stat->deq_retry = num_deq_retry;
+ stat->enq_retry = num_enq_retry;
+
+ return ret;
+}
+
+static int start_workers(test_global_t *global)
+{
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
+ odp_cpumask_t cpumask;
+ int ret;
+ test_options_t *test_options = &global->options;
+ int num_cpu = test_options->num_cpu;
+
+ ret = odp_cpumask_default_worker(&cpumask, num_cpu);
+
+ if (num_cpu && ret != num_cpu) {
+ printf("Error: Too many workers. Max supported %i\n.", ret);
+ return -1;
+ }
+
+ /* Zero: all available workers */
+ if (num_cpu == 0) {
+ num_cpu = ret;
+ test_options->num_cpu = num_cpu;
+ }
+
+ printf(" num workers %u\n\n", num_cpu);
+
+ odp_barrier_init(&global->barrier, num_cpu);
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = global->instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_test;
+ thr_param.arg = global;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ if (odph_thread_create(global->thread_tbl, &thr_common, &thr_param,
+ num_cpu) != num_cpu)
+ return -1;
+
+ return 0;
+}
+
+static void print_stat(test_global_t *global)
+{
+ int i, num;
+ double rounds_ave, events_ave, nsec_ave, cycles_ave;
+ test_options_t *test_options = &global->options;
+ int num_cpu = test_options->num_cpu;
+ uint64_t rounds_sum = 0;
+ uint64_t events_sum = 0;
+ uint64_t nsec_sum = 0;
+ uint64_t cycles_sum = 0;
+ uint64_t deq_retry_sum = 0;
+ uint64_t enq_retry_sum = 0;
+
+ /* Averages */
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ rounds_sum += global->stat[i].rounds;
+ events_sum += global->stat[i].events;
+ nsec_sum += global->stat[i].nsec;
+ cycles_sum += global->stat[i].cycles;
+ deq_retry_sum += global->stat[i].deq_retry;
+ enq_retry_sum += global->stat[i].enq_retry;
+ }
+
+ if (rounds_sum == 0) {
+ printf("No results.\n");
+ return;
+ }
+
+ rounds_ave = rounds_sum / num_cpu;
+ events_ave = events_sum / num_cpu;
+ nsec_ave = nsec_sum / num_cpu;
+ cycles_ave = cycles_sum / num_cpu;
+ num = 0;
+
+ printf("RESULTS - per thread (Million events per sec):\n");
+ printf("----------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->stat[i].rounds) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%6.1f ", (1000.0 * global->stat[i].events) /
+ global->stat[i].nsec);
+ num++;
+ }
+ }
+ printf("\n\n");
+
+ printf("RESULTS - per thread average (%i threads):\n", num_cpu);
+ printf("------------------------------------------\n");
+ printf(" duration: %.3f msec\n", nsec_ave / 1000000);
+ printf(" num cycles: %.3f M\n", cycles_ave / 1000000);
+ printf(" events per dequeue: %.3f\n",
+ events_ave / rounds_ave);
+ printf(" cycles per event: %.3f\n",
+ cycles_ave / events_ave);
+ printf(" dequeue retries: %" PRIu64 "\n", deq_retry_sum);
+ printf(" enqueue retries: %" PRIu64 "\n", enq_retry_sum);
+ printf(" events per sec: %.3f M\n\n",
+ (1000.0 * events_ave) / nsec_ave);
+
+ printf("TOTAL events per sec: %.3f M\n\n",
+ (1000.0 * events_sum) / nsec_ave);
+}
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t helper_options;
+ odp_instance_t instance;
+ odp_init_t init;
+ odp_shm_t shm;
+ test_global_t *global;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: Reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* List features not to be used */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.schedule = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ init.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ printf("Error: Global init failed.\n");
+ return -1;
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_WORKER)) {
+ printf("Error: Local init failed.\n");
+ return -1;
+ }
+
+ shm = odp_shm_reserve("queue_perf_global", sizeof(test_global_t), ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Error: Shared mem reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ global = odp_shm_addr(shm);
+ if (global == NULL) {
+ ODPH_ERR("Error: Shared mem alloc failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(global, 0, sizeof(test_global_t));
+
+ if (parse_options(argc, argv, &global->options))
+ return -1;
+
+ odp_sys_info_print();
+
+ global->instance = instance;
+
+ if (create_queues(global)) {
+ printf("Error: Create queues failed.\n");
+ goto destroy;
+ }
+
+ if (start_workers(global)) {
+ printf("Error: Test start failed.\n");
+ return -1;
+ }
+
+ /* Wait workers to exit */
+ odph_thread_join(global->thread_tbl, global->options.num_cpu);
+
+ print_stat(global);
+
+destroy:
+ if (destroy_queues(global)) {
+ printf("Error: Destroy queues failed.\n");
+ return -1;
+ }
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Error: Shared mem free failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ printf("Error: term local failed.\n");
+ return -1;
+ }
+
+ if (odp_term_global(instance)) {
+ printf("Error: term global failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/test/performance/odp_random.c b/test/performance/odp_random.c
new file mode 100644
index 000000000..99714d7b3
--- /dev/null
+++ b/test/performance/odp_random.c
@@ -0,0 +1,552 @@
+/* Copyright (c) 2021-2022, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_random.c
+ *
+ * Performance test application for random data APIs
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define PSEUDO_RANDOM (-1)
+
+#define MB (1024ull * 1024ull)
+
+typedef struct test_global_t test_global_t;
+
+typedef struct thread_arg_t {
+ test_global_t *global;
+ int thread_idx;
+ uint8_t *data;
+
+} thread_arg_t;
+
+struct test_global_t {
+ odp_barrier_t barrier;
+ odp_random_kind_t type;
+ uint8_t *data;
+ uint32_t rounds;
+
+ thread_arg_t thread_arg[ODP_THREAD_COUNT_MAX];
+
+ struct {
+ uint64_t nsec[ODP_THREAD_COUNT_MAX];
+ uint64_t sum[ODP_THREAD_COUNT_MAX];
+ uint64_t min[ODP_THREAD_COUNT_MAX];
+ uint64_t max[ODP_THREAD_COUNT_MAX];
+ } stat;
+};
+
+/* Command line options */
+typedef struct {
+ int mode;
+ int num_threads;
+ uint32_t size;
+ uint32_t rounds;
+ uint64_t delay;
+
+} options_t;
+
+static options_t options;
+static const options_t options_def = {
+ .mode = 0,
+ .num_threads = 1,
+ .size = 256,
+ .rounds = 100000,
+ .delay = 0,
+};
+
+static void print_usage(void)
+{
+ printf("\n"
+ "random data performance test\n"
+ "\n"
+ "Usage: odp_random [options]\n"
+ "\n"
+ " -m, --mode Test mode select (default: 0):\n"
+ " 0: Data throughput\n"
+ " 1: Data generation latency (size: 8B by default)\n"
+ " -t, --threads Number of worker threads (default %u)\n"
+ " -s, --size Size of buffer in bytes (default %u)\n"
+ " -r, --rounds Number of test rounds (default %u)\n"
+ " Divided by 100 for ODP_RANDOM_TRUE\n"
+ " -d, --delay Delay (nsec) between buffer fills (default %" PRIu64 ").\n"
+ " Affects only latency mode.\n"
+ " -h, --help This help\n"
+ "\n",
+ options_def.num_threads, options_def.size, options_def.rounds, options_def.delay);
+}
+
+static int parse_options(int argc, char *argv[])
+{
+ int opt;
+ int long_index;
+ int ret = 0;
+
+ static const struct option longopts[] = {
+ { "mode", required_argument, NULL, 'm' },
+ { "threads", required_argument, NULL, 't' },
+ { "size", required_argument, NULL, 's' },
+ { "rounds", required_argument, NULL, 'r' },
+ { "delay", required_argument, NULL, 'd' },
+ { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ static const char *shortopts = "+m:t:s:r:d:h";
+
+ options = options_def;
+ options.size = 0;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'm':
+ options.mode = atoi(optarg);
+ break;
+ case 't':
+ options.num_threads = atol(optarg);
+ break;
+ case 's':
+ options.size = atol(optarg);
+ break;
+ case 'r':
+ options.rounds = atol(optarg);
+ break;
+ case 'd':
+ options.delay = atol(optarg);
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (options.num_threads < 1 || options.num_threads > ODP_THREAD_COUNT_MAX) {
+ ODPH_ERR("Bad number of threads: %i\n", options.num_threads);
+ return -1;
+ }
+
+ if (options.size == 0) {
+ options.size = options_def.size;
+
+ if (options.mode)
+ options.size = 8;
+ }
+
+ printf("\nOptions:\n");
+ printf("------------------------\n");
+ printf(" mode: %i\n", options.mode);
+ printf(" threads: %i\n", options.num_threads);
+ printf(" size: %u\n", options.size);
+ printf(" rounds: %u\n", options.rounds);
+ printf(" delay: %" PRIu64 "\n", options.delay);
+ printf("\n");
+
+ return ret;
+}
+
+static inline void random_data_loop(odp_random_kind_t type, uint32_t rounds,
+ uint8_t *data, uint32_t size)
+{
+ uint32_t i;
+ int32_t ret;
+
+ if ((int)type == PSEUDO_RANDOM) {
+ uint64_t seed = 0;
+
+ for (i = 0; i < rounds; i++) {
+ uint32_t pos = 0;
+
+ while (pos < size) {
+ ret = odp_random_test_data(data + pos, size - pos, &seed);
+
+ if (ret < 0) {
+ ODPH_ERR("odp_random_test_data() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ pos += ret;
+ }
+ }
+ } else {
+ for (i = 0; i < rounds; i++) {
+ uint32_t pos = 0;
+
+ while (pos < size) {
+ ret = odp_random_data(data + pos, size - pos, type);
+
+ if (ret < 0) {
+ ODPH_ERR("odp_random_data() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ pos += ret;
+ }
+ }
+ }
+}
+
+static int test_random_perf(void *ptr)
+{
+ odp_time_t start;
+ uint64_t nsec;
+ thread_arg_t *thread_arg = ptr;
+ test_global_t *global = thread_arg->global;
+ odp_random_kind_t type = global->type;
+ int thread_idx = thread_arg->thread_idx;
+ uint8_t *data = thread_arg->data;
+ uint32_t size = options.size;
+ uint32_t rounds = global->rounds;
+
+ /* One warm up round */
+ random_data_loop(type, 1, data, size);
+
+ odp_barrier_wait(&global->barrier);
+
+ /* Test run */
+ start = odp_time_local();
+
+ random_data_loop(type, rounds, data, size);
+
+ nsec = odp_time_diff_ns(odp_time_local(), start);
+
+ global->stat.nsec[thread_idx] = nsec;
+
+ return 0;
+}
+
+static inline void random_data_latency(test_global_t *global, int thread_idx,
+ uint32_t rounds, uint8_t *data, uint32_t size)
+{
+ uint32_t i;
+ int32_t ret;
+ odp_time_t t1, t2, start;
+ uint64_t nsec;
+ odp_random_kind_t type = global->type;
+ uint64_t delay = options.delay;
+ uint64_t min = UINT64_MAX;
+ uint64_t max = 0;
+ uint64_t sum = 0;
+ uint64_t seed = 0;
+
+ start = odp_time_local();
+
+ for (i = 0; i < rounds; i++) {
+ uint32_t pos = 0;
+
+ if (delay)
+ odp_time_wait_ns(delay);
+
+ if ((int)type == PSEUDO_RANDOM) {
+ t1 = odp_time_local_strict();
+ while (pos < size) {
+ ret = odp_random_test_data(data + pos, size - pos, &seed);
+
+ if (ret < 0) {
+ ODPH_ERR("odp_random_test_data() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ pos += ret;
+ }
+ t2 = odp_time_local_strict();
+ } else {
+ t1 = odp_time_local_strict();
+ while (pos < size) {
+ ret = odp_random_data(data + pos, size - pos, type);
+
+ if (ret < 0) {
+ ODPH_ERR("odp_random_data() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ pos += ret;
+ }
+ t2 = odp_time_local_strict();
+ }
+
+ nsec = odp_time_diff_ns(t2, t1);
+ sum += nsec;
+
+ if (nsec > max)
+ max = nsec;
+ if (nsec < min)
+ min = nsec;
+ }
+
+ nsec = odp_time_diff_ns(odp_time_local(), start);
+
+ global->stat.nsec[thread_idx] = nsec;
+ global->stat.sum[thread_idx] = sum;
+ global->stat.min[thread_idx] = min;
+ global->stat.max[thread_idx] = max;
+}
+
+static int test_random_latency(void *ptr)
+{
+ thread_arg_t *thread_arg = ptr;
+ test_global_t *global = thread_arg->global;
+ odp_random_kind_t type = global->type;
+ int thread_idx = thread_arg->thread_idx;
+ uint8_t *data = thread_arg->data;
+ uint32_t size = options.size;
+ uint32_t rounds = global->rounds;
+
+ /* One warm up round */
+ random_data_loop(type, 1, data, size);
+
+ odp_barrier_wait(&global->barrier);
+
+ /* Test run */
+ random_data_latency(global, thread_idx, rounds, data, size);
+
+ return 0;
+}
+
+static uint32_t type_rounds(odp_random_kind_t type)
+{
+ switch (type) {
+ case ODP_RANDOM_TRUE:
+ return options.rounds / 100;
+ default:
+ return options.rounds;
+ }
+}
+
+static void test_type(odp_instance_t instance, test_global_t *global, odp_random_kind_t type)
+{
+ int i;
+ int num_threads = options.num_threads;
+ uint32_t rounds = type_rounds(type);
+ uint32_t size = options.size;
+
+ memset(&global->stat, 0, sizeof(global->stat));
+ global->type = type;
+ global->rounds = rounds;
+ odp_barrier_init(&global->barrier, num_threads);
+
+ odp_cpumask_t cpumask;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[num_threads];
+ odph_thread_t thr_worker[num_threads];
+
+ if (odp_cpumask_default_worker(&cpumask, num_threads) != num_threads) {
+ ODPH_ERR("Failed to get default CPU mask.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+
+ for (i = 0; i < num_threads; i++) {
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ thr_param[i].arg = &global->thread_arg[i];
+
+ if (options.mode == 0)
+ thr_param[i].start = test_random_perf;
+ else
+ thr_param[i].start = test_random_latency;
+ }
+
+ memset(&thr_worker, 0, sizeof(thr_worker));
+
+ if (odph_thread_create(thr_worker, &thr_common, thr_param, num_threads) != num_threads) {
+ ODPH_ERR("Failed to create worker threads.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odph_thread_join(thr_worker, num_threads) != num_threads) {
+ ODPH_ERR("Failed to join worker threads.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ double mb, seconds, nsec = 0;
+
+ for (i = 0; i < num_threads; i++)
+ nsec += global->stat.nsec[i];
+
+ nsec /= num_threads;
+
+ switch (type) {
+ case ODP_RANDOM_BASIC:
+ printf("ODP_RANDOM_BASIC\n");
+ break;
+ case ODP_RANDOM_CRYPTO:
+ printf("ODP_RANDOM_CRYPTO\n");
+ break;
+ case ODP_RANDOM_TRUE:
+ printf("ODP_RANDOM_TRUE\n");
+ break;
+ default:
+ printf("odp_random_test_data\n");
+ }
+
+ printf("--------------------\n");
+ printf("threads: %d size: %u B rounds: %u ", num_threads, size, rounds);
+ mb = (uint64_t)num_threads * (uint64_t)size * (uint64_t)rounds;
+ mb /= MB;
+ seconds = (double)nsec / (double)ODP_TIME_SEC_IN_NS;
+ printf("MB: %.3f seconds: %.3f ", mb, seconds);
+ printf("MB/s: %.3f ", mb / seconds);
+ printf("MB/s/thread: %.3f\n", mb / seconds / (double)num_threads);
+
+ if (options.mode) {
+ double ave;
+ uint64_t min = UINT64_MAX;
+ uint64_t max = 0;
+ uint64_t sum = 0;
+
+ printf(" latency (nsec)\n");
+ printf(" thread min max ave\n");
+ for (i = 0; i < num_threads; i++) {
+ ave = (double)global->stat.sum[i] / rounds;
+ sum += global->stat.sum[i];
+
+ if (global->stat.min[i] < min)
+ min = global->stat.min[i];
+
+ if (global->stat.max[i] > max)
+ max = global->stat.max[i];
+
+ printf("%8i %8" PRIu64 " %8" PRIu64 " %10.1f\n", i, global->stat.min[i],
+ global->stat.max[i], ave);
+ }
+
+ printf(" all %8" PRIu64 " %8" PRIu64 " %10.1f\n",
+ min, max, ((double)sum / rounds) / num_threads);
+ }
+
+ printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+ odp_instance_t instance;
+ odp_init_t init;
+ odp_shm_t shm_glb, shm_data;
+ test_global_t *global;
+ int num_threads, i;
+ uint64_t tot_size, size;
+ uint8_t *addr;
+
+ if (parse_options(argc, argv))
+ exit(EXIT_FAILURE);
+
+ /* List features not to be used */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.schedule = 1;
+ init.not_used.feat.stash = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("Global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_sys_info_print();
+
+ global = NULL;
+ shm_glb = odp_shm_reserve("test_globals", sizeof(test_global_t), ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm_glb != ODP_SHM_INVALID)
+ global = (test_global_t *)odp_shm_addr(shm_glb);
+
+ if (!global) {
+ ODPH_ERR("Failed to reserve shm\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(global, 0, sizeof(test_global_t));
+
+ num_threads = options.num_threads;
+ addr = NULL;
+ size = ODP_CACHE_LINE_SIZE + ODP_CACHE_LINE_ROUNDUP(options.size);
+ tot_size = num_threads * size;
+ shm_data = odp_shm_reserve("test_data", tot_size, ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm_data != ODP_SHM_INVALID)
+ addr = odp_shm_addr(shm_data);
+
+ if (!addr) {
+ ODPH_ERR("Failed to reserve shm: size %" PRIu64 " bytes\n", tot_size);
+ exit(EXIT_FAILURE);
+ }
+
+ for (i = 0; i < num_threads; i++) {
+ global->thread_arg[i].global = global;
+ global->thread_arg[i].thread_idx = i;
+ global->thread_arg[i].data = addr + i * size;
+ }
+
+ odp_shm_print_all();
+
+ switch (odp_random_max_kind()) {
+ case ODP_RANDOM_TRUE:
+ test_type(instance, global, ODP_RANDOM_TRUE);
+ /* fall through */
+ case ODP_RANDOM_CRYPTO:
+ test_type(instance, global, ODP_RANDOM_CRYPTO);
+ /* fall through */
+ default:
+ test_type(instance, global, ODP_RANDOM_BASIC);
+ test_type(instance, global, PSEUDO_RANDOM);
+ }
+
+ if (odp_shm_free(shm_data)) {
+ ODPH_ERR("odp_shm_free() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_shm_free(shm_glb)) {
+ ODPH_ERR("odp_shm_free() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Local terminate failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Global terminate failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
diff --git a/test/common_plat/performance/odp_sched_latency.c b/test/performance/odp_sched_latency.c
index 2b28cd7bc..0fec49fb9 100644
--- a/test/common_plat/performance/odp_sched_latency.c
+++ b/test/performance/odp_sched_latency.c
@@ -1,21 +1,22 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2020-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**
- * @file
+ * @example odp_sched_latency.c
*
- * @example odp_sched_latency.c ODP scheduling latency benchmark application
+ * Scheduling latency benchmark application
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
*/
#include <string.h>
#include <stdlib.h>
#include <inttypes.h>
-#include <test_debug.h>
-
/* ODP main header */
#include <odp_api.h>
@@ -25,26 +26,10 @@
/* GNU lib C */
#include <getopt.h>
-#define MAX_WORKERS 64 /**< Maximum number of worker threads */
#define MAX_QUEUES 4096 /**< Maximum number of queues */
+#define MAX_GROUPS 64
#define EVENT_POOL_SIZE (1024 * 1024) /**< Event pool size */
-#define TEST_ROUNDS (4 * 1024 * 1024) /**< Test rounds for each thread */
-#define MAIN_THREAD 1 /**< Thread ID performing maintenance tasks */
-
-/* Default values for command line arguments */
-#define SAMPLE_EVENT_PER_PRIO 0 /**< Allocate a separate sample event for
- each priority */
-#define HI_PRIO_EVENTS 0 /**< Number of high priority events */
-#define LO_PRIO_EVENTS 32 /**< Number of low priority events */
-#define HI_PRIO_QUEUES 16 /**< Number of high priority queues */
-#define LO_PRIO_QUEUES 64 /**< Number of low priority queues */
-
-#define EVENTS_PER_HI_PRIO_QUEUE 0 /**< Alloc HI_PRIO_QUEUES x HI_PRIO_EVENTS
- events */
-#define EVENTS_PER_LO_PRIO_QUEUE 1 /**< Alloc LO_PRIO_QUEUES x LO_PRIO_EVENTS
- events */
-ODP_STATIC_ASSERT(HI_PRIO_QUEUES <= MAX_QUEUES, "Too many HI priority queues");
-ODP_STATIC_ASSERT(LO_PRIO_QUEUES <= MAX_QUEUES, "Too many LO priority queues");
+#define MAIN_THREAD 1 /**< Thread ID performing maintenance tasks */
#define CACHE_ALIGN_ROUNDUP(x)\
((ODP_CACHE_LINE_SIZE) * \
@@ -55,28 +40,41 @@ ODP_STATIC_ASSERT(LO_PRIO_QUEUES <= MAX_QUEUES, "Too many LO priority queues");
#define HI_PRIO 0
#define LO_PRIO 1
+/* Test event forwarding mode */
+#define EVENT_FORWARD_RAND 0
+#define EVENT_FORWARD_INC 1
+#define EVENT_FORWARD_NONE 2
+
/** Test event types */
typedef enum {
- WARM_UP, /**< Warm up event */
- TRAFFIC, /**< Event used only as traffic load */
- SAMPLE /**< Event used to measure latency */
+ WARM_UP, /**< Warm-up event */
+ COOL_DOWN,/**< Last event on queue */
+ TRAFFIC, /**< Event used only as traffic load */
+ SAMPLE /**< Event used to measure latency */
} event_type_t;
/** Test event */
typedef struct {
- uint64_t ts; /**< Send timestamp */
+ odp_time_t time_stamp; /**< Send timestamp */
event_type_t type; /**< Message type */
int src_idx[NUM_PRIOS]; /**< Source ODP queue */
int prio; /**< Source queue priority */
+ int warm_up_rounds; /**< Number of completed warm-up rounds */
} test_event_t;
/** Test arguments */
typedef struct {
- int cpu_count; /**< CPU count */
+ unsigned int cpu_count; /**< CPU count */
odp_schedule_sync_t sync_type; /**< Scheduler sync type */
+ int forward_mode; /**< Event forwarding mode */
+ int num_group;
+ int isolate;
+ int test_rounds; /**< Number of test rounds (millions) */
+ int warm_up_rounds; /**< Number of warm-up rounds */
struct {
int queues; /**< Number of scheduling queues */
int events; /**< Number of events */
+ int sample_events;
odp_bool_t events_per_queue; /**< Allocate 'queues' x 'events'
test events */
} prio[NUM_PRIOS];
@@ -91,41 +89,76 @@ typedef struct {
uint64_t tot; /**< Total event latency. Sum of all events. */
uint64_t min; /**< Minimum event latency */
uint64_t max; /**< Maximum event latency */
+ uint64_t max_idx; /**< Index of the maximum latency sample event */
} test_stat_t;
/** Performance test statistics (per core) */
-typedef union {
+typedef struct ODP_ALIGNED_CACHE {
test_stat_t prio[NUM_PRIOS]; /**< Test statistics per priority */
-
- uint8_t pad[CACHE_ALIGN_ROUNDUP(NUM_PRIOS * sizeof(test_stat_t))];
-} core_stat_t ODP_ALIGNED_CACHE;
+} core_stat_t;
/** Test global variables */
typedef struct {
- core_stat_t core_stat[MAX_WORKERS]; /**< Core specific stats */
+ /** Core specific stats */
+ core_stat_t core_stat[ODP_THREAD_COUNT_MAX];
odp_barrier_t barrier; /**< Barrier for thread synchronization */
odp_pool_t pool; /**< Pool for allocating test events */
test_args_t args; /**< Parsed command line arguments */
odp_queue_t queue[NUM_PRIOS][MAX_QUEUES]; /**< Scheduled queues */
+
+ odp_schedule_group_t group[NUM_PRIOS][MAX_GROUPS];
+
} test_globals_t;
/**
* Clear all scheduled queues.
*
- * Retry to be sure that all buffers have been scheduled.
+ * Use special cool_down event to guarantee that queue is drained.
*/
-static void clear_sched_queues(void)
+static void clear_sched_queues(test_globals_t *globals)
{
odp_event_t ev;
+ odp_buffer_t buf;
+ test_event_t *event;
+ int i, j;
+ odp_queue_t fromq;
- while (1) {
- ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ /* Allocate the cool_down event. */
+ buf = odp_buffer_alloc(globals->pool);
+ if (buf == ODP_BUFFER_INVALID)
+ ODPH_ABORT("Buffer alloc failed.\n");
- if (ev == ODP_EVENT_INVALID)
- break;
+ event = odp_buffer_addr(buf);
+ event->type = COOL_DOWN;
+ ev = odp_buffer_to_event(buf);
- odp_event_free(ev);
+ for (i = 0; i < NUM_PRIOS; i++) {
+ for (j = 0; j < globals->args.prio[i].queues; j++) {
+ /* Enqueue cool_down event on each queue. */
+ if (odp_queue_enq(globals->queue[i][j], ev))
+ ODPH_ABORT("Queue enqueue failed.\n");
+
+ /* Invoke scheduler until cool_down event has been
+ * received. */
+ while (1) {
+ ev = odp_schedule(NULL, ODP_SCHED_WAIT);
+ buf = odp_buffer_from_event(ev);
+ event = odp_buffer_addr(buf);
+ if (event->type == COOL_DOWN)
+ break;
+ odp_event_free(ev);
+ }
+ }
}
+
+ /* Free the cool_down event. */
+ odp_event_free(ev);
+
+ /* Call odp_schedule() to trigger a release of any scheduler context. */
+ ev = odp_schedule(&fromq, ODP_SCHED_NO_WAIT);
+ if (ev != ODP_EVENT_INVALID)
+ ODPH_ABORT("Queue %" PRIu64 " not empty.\n",
+ odp_queue_to_u64(fromq));
}
/**
@@ -171,14 +204,14 @@ static int enqueue_events(int prio, int num_queues, int num_events,
ret = odp_buffer_alloc_multi(globals->pool, buf,
events_per_queue);
if (ret != events_per_queue) {
- LOG_ERR("Buffer alloc failed. Try increasing EVENT_POOL_SIZE.\n");
+ ODPH_ERR("Buffer alloc failed. Try increasing EVENT_POOL_SIZE.\n");
ret = ret < 0 ? 0 : ret;
odp_buffer_free_multi(buf, ret);
return -1;
}
for (j = 0; j < events_per_queue; j++) {
if (!odp_buffer_is_valid(buf[j])) {
- LOG_ERR("Buffer alloc failed\n");
+ ODPH_ERR("Buffer alloc failed\n");
odp_buffer_free_multi(buf, events_per_queue);
return -1;
}
@@ -187,9 +220,10 @@ static int enqueue_events(int prio, int num_queues, int num_events,
memset(event, 0, sizeof(test_event_t));
/* Latency isn't measured from the first processing
- * round. */
+ * rounds. */
if (num_samples > 0) {
event->type = WARM_UP;
+ event->warm_up_rounds = 0;
num_samples--;
} else {
event->type = TRAFFIC;
@@ -205,7 +239,7 @@ static int enqueue_events(int prio, int num_queues, int num_events,
events_per_queue -
enq_events);
if (ret < 0) {
- LOG_ERR("Queue enqueue failed.\n");
+ ODPH_ERR("Queue enqueue failed.\n");
return -1;
}
enq_events += ret;
@@ -230,7 +264,7 @@ static void print_results(test_globals_t *globals)
test_stat_t total;
test_args_t *args;
uint64_t avg;
- int i, j;
+ unsigned int i, j;
args = &globals->args;
stype = globals->args.sync_type;
@@ -239,6 +273,11 @@ static void print_results(test_globals_t *globals)
(stype == ODP_SCHED_SYNC_ATOMIC) ? "ATOMIC" :
((stype == ODP_SCHED_SYNC_ORDERED) ? "ORDERED" : "PARALLEL"));
+ printf(" Forwarding mode: %s\n",
+ (args->forward_mode == EVENT_FORWARD_RAND) ? "random" :
+ ((args->forward_mode == EVENT_FORWARD_INC) ? "incremental" :
+ "none"));
+
printf(" LO_PRIO queues: %i\n", args->prio[LO_PRIO].queues);
if (args->prio[LO_PRIO].events_per_queue)
printf(" LO_PRIO event per queue: %i\n",
@@ -246,20 +285,24 @@ static void print_results(test_globals_t *globals)
else
printf(" LO_PRIO events: %i\n", args->prio[LO_PRIO].events);
+ printf(" LO_PRIO sample events: %i\n", args->prio[LO_PRIO].sample_events);
+
printf(" HI_PRIO queues: %i\n", args->prio[HI_PRIO].queues);
if (args->prio[HI_PRIO].events_per_queue)
printf(" HI_PRIO event per queue: %i\n\n",
args->prio[HI_PRIO].events);
else
- printf(" HI_PRIO events: %i\n\n", args->prio[HI_PRIO].events);
+ printf(" HI_PRIO events: %i\n", args->prio[HI_PRIO].events);
+
+ printf(" HI_PRIO sample events: %i\n\n", args->prio[HI_PRIO].sample_events);
for (i = 0; i < NUM_PRIOS; i++) {
memset(&total, 0, sizeof(test_stat_t));
total.min = UINT64_MAX;
printf("%s priority\n"
- "Thread Avg[ns] Min[ns] Max[ns] Samples Total\n"
- "---------------------------------------------------------------\n",
+ "Thread Avg[ns] Min[ns] Max[ns] Samples Total Max idx\n"
+ "-----------------------------------------------------------------------\n",
i == HI_PRIO ? "HIGH" : "LOW");
for (j = 1; j <= args->cpu_count; j++) {
lat = &globals->core_stat[j].prio[i];
@@ -279,11 +322,11 @@ static void print_results(test_globals_t *globals)
avg = lat->events ? lat->tot / lat->sample_events : 0;
printf("%-8d %-10" PRIu64 " %-10" PRIu64 " "
- "%-10" PRIu64 " %-10" PRIu64 " %-10" PRIu64 "\n",
+ "%-10" PRIu64 " %-10" PRIu64 " %-10" PRIu64 " %-10" PRIu64 "\n",
j, avg, lat->min, lat->max, lat->sample_events,
- lat->events);
+ lat->events, lat->max_idx);
}
- printf("---------------------------------------------------------------\n");
+ printf("-----------------------------------------------------------------------\n");
if (total.sample_events == 0) {
printf("Total N/A\n\n");
continue;
@@ -295,10 +338,42 @@ static void print_results(test_globals_t *globals)
}
}
+static int join_groups(test_globals_t *globals, int thr)
+{
+ odp_thrmask_t thrmask;
+ odp_schedule_group_t group;
+ int i, num;
+ int num_group = globals->args.num_group;
+
+ if (num_group <= 0)
+ return 0;
+
+ num = num_group;
+ if (globals->args.isolate)
+ num = 2 * num_group;
+
+ odp_thrmask_zero(&thrmask);
+ odp_thrmask_set(&thrmask, thr);
+
+ for (i = 0; i < num; i++) {
+ if (globals->args.isolate)
+ group = globals->group[i % 2][i / 2];
+ else
+ group = globals->group[0][i];
+
+ if (odp_schedule_group_join(group, &thrmask)) {
+ ODPH_ERR("Group join failed %i (thr %i)\n", i, thr);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
/**
* Measure latency of scheduled ODP events
*
- * Schedule and enqueue events until 'TEST_ROUNDS' events have been processed.
+ * Schedule and enqueue events until 'test_rounds' events have been processed.
* Scheduling latency is measured only from type 'SAMPLE' events. Other events
* are simply enqueued back to the scheduling queues.
*
@@ -314,22 +389,30 @@ static void print_results(test_globals_t *globals)
*/
static int test_schedule(int thr, test_globals_t *globals)
{
+ odp_time_t time;
odp_event_t ev;
odp_buffer_t buf;
- odp_queue_t src_queue;
odp_queue_t dst_queue;
uint64_t latency;
- uint32_t i;
+ uint64_t i;
test_event_t *event;
test_stat_t *stats;
- int dst_idx;
+ int dst_idx, change_queue;
+ int warm_up_rounds = globals->args.warm_up_rounds;
+ uint64_t test_rounds = globals->args.test_rounds * (uint64_t)1000000;
memset(&globals->core_stat[thr], 0, sizeof(core_stat_t));
globals->core_stat[thr].prio[HI_PRIO].min = UINT64_MAX;
globals->core_stat[thr].prio[LO_PRIO].min = UINT64_MAX;
- for (i = 0; i < TEST_ROUNDS; i++) {
- ev = odp_schedule(&src_queue, ODP_SCHED_WAIT);
+ change_queue = globals->args.forward_mode != EVENT_FORWARD_NONE ? 1 : 0;
+
+ odp_barrier_wait(&globals->barrier);
+
+ for (i = 0; i < test_rounds; i++) {
+ ev = odp_schedule(NULL, ODP_SCHED_WAIT);
+
+ time = odp_time_global_strict();
buf = odp_buffer_from_event(ev);
event = odp_buffer_addr(buf);
@@ -337,10 +420,12 @@ static int test_schedule(int thr, test_globals_t *globals)
stats = &globals->core_stat[thr].prio[event->prio];
if (event->type == SAMPLE) {
- latency = odp_time_to_ns(odp_time_global()) - event->ts;
+ latency = odp_time_to_ns(time) - odp_time_to_ns(event->time_stamp);
- if (latency > stats->max)
+ if (latency > stats->max) {
stats->max = latency;
+ stats->max_idx = stats->sample_events;
+ }
if (latency < stats->min)
stats->min = latency;
stats->tot += latency;
@@ -352,23 +437,29 @@ static int test_schedule(int thr, test_globals_t *globals)
event->prio = !event->prio;
}
- if (odp_unlikely(event->type == WARM_UP))
- event->type = SAMPLE;
- else
+ if (odp_unlikely(event->type == WARM_UP)) {
+ event->warm_up_rounds++;
+ if (event->warm_up_rounds >= warm_up_rounds)
+ event->type = SAMPLE;
+ } else {
stats->events++;
+ }
- /* Move event to next queue */
- dst_idx = event->src_idx[event->prio] + 1;
+ /* Move event to next queue if forwarding is enabled */
+ if (change_queue)
+ dst_idx = event->src_idx[event->prio] + 1;
+ else
+ dst_idx = event->src_idx[event->prio];
if (dst_idx >= globals->args.prio[event->prio].queues)
dst_idx = 0;
event->src_idx[event->prio] = dst_idx;
dst_queue = globals->queue[event->prio][dst_idx];
if (event->type == SAMPLE)
- event->ts = odp_time_to_ns(odp_time_global());
+ event->time_stamp = odp_time_global_strict();
if (odp_queue_enq(dst_queue, ev)) {
- LOG_ERR("[%i] Queue enqueue failed.\n", thr);
+ ODPH_ERR("[%i] Queue enqueue failed.\n", thr);
odp_event_free(ev);
return -1;
}
@@ -378,26 +469,27 @@ static int test_schedule(int thr, test_globals_t *globals)
odp_schedule_pause();
while (1) {
+ odp_queue_t src_queue;
+
ev = odp_schedule(&src_queue, ODP_SCHED_NO_WAIT);
if (ev == ODP_EVENT_INVALID)
break;
if (odp_queue_enq(src_queue, ev)) {
- LOG_ERR("[%i] Queue enqueue failed.\n", thr);
+ ODPH_ERR("[%i] Queue enqueue failed.\n", thr);
odp_event_free(ev);
return -1;
}
}
- odp_schedule_resume();
-
odp_barrier_wait(&globals->barrier);
- clear_sched_queues();
-
- if (thr == MAIN_THREAD)
+ if (thr == MAIN_THREAD) {
+ odp_schedule_resume();
+ clear_sched_queues(globals);
print_results(globals);
+ }
return 0;
}
@@ -416,7 +508,6 @@ static int run_thread(void *arg ODP_UNUSED)
test_globals_t *globals;
test_args_t *args;
int thr;
- int sample_events = 0;
thr = odp_thread_id();
@@ -424,31 +515,29 @@ static int run_thread(void *arg ODP_UNUSED)
globals = odp_shm_addr(shm);
if (globals == NULL) {
- LOG_ERR("Shared mem lookup failed\n");
+ ODPH_ERR("Shared mem lookup failed\n");
return -1;
}
+ if (join_groups(globals, thr))
+ return -1;
+
if (thr == MAIN_THREAD) {
args = &globals->args;
if (enqueue_events(HI_PRIO, args->prio[HI_PRIO].queues,
- args->prio[HI_PRIO].events, 1,
+ args->prio[HI_PRIO].events, args->prio[HI_PRIO].sample_events,
!args->prio[HI_PRIO].events_per_queue,
globals))
return -1;
- if (!args->prio[HI_PRIO].queues || args->sample_per_prio)
- sample_events = 1;
-
if (enqueue_events(LO_PRIO, args->prio[LO_PRIO].queues,
- args->prio[LO_PRIO].events, sample_events,
+ args->prio[LO_PRIO].events, args->prio[LO_PRIO].sample_events,
!args->prio[LO_PRIO].events_per_queue,
globals))
return -1;
}
- odp_barrier_wait(&globals->barrier);
-
if (test_schedule(thr, globals))
return -1;
@@ -465,15 +554,28 @@ static void usage(void)
"\n"
"Usage: ./odp_sched_latency [options]\n"
"Optional OPTIONS:\n"
- " -c, --count <number> CPU count\n"
- " -l, --lo-prio-queues <number> Number of low priority scheduled queues\n"
- " -t, --hi-prio-queues <number> Number of high priority scheduled queues\n"
- " -m, --lo-prio-events-per-queue <number> Number of events per low priority queue\n"
- " -n, --hi-prio-events-per-queue <number> Number of events per high priority queues\n"
- " -o, --lo-prio-events <number> Total number of low priority events (overrides the\n"
- " number of events per queue)\n"
- " -p, --hi-prio-events <number> Total number of high priority events (overrides the\n"
- " number of events per queue)\n"
+ " -c, --count <number> CPU count, 0=all available, default=1\n"
+ " -d, --duration <number> Test duration in scheduling rounds (millions), default=10, min=1\n"
+ " -f, --forward-mode <mode> Selection of target queue\n"
+ " 0: Random (default)\n"
+ " 1: Incremental\n"
+ " 2: Use source queue\n"
+ " -g, --num_group <num> Number of schedule groups. Round robins queues into groups.\n"
+ " -1: SCHED_GROUP_WORKER\n"
+ " 0: SCHED_GROUP_ALL (default)\n"
+ " -i, --isolate <mode> Select if shared or isolated groups are used. Ignored when num_group <= 0.\n"
+ " 0: All queues share groups (default)\n"
+ " 1: Separate groups for high and low priority queues. Creates 2xnum_group groups.\n"
+ " -l, --lo-prio-queues <number> Number of low priority scheduled queues (default=64)\n"
+ " -t, --hi-prio-queues <number> Number of high priority scheduled queues (default=16)\n"
+ " -m, --lo-prio-events-per-queue <number> Number of events per low priority queue (default=32).\n"
+ " Does not include sample event.\n"
+ " -n, --hi-prio-events-per-queue <number> Number of events per high priority queues (default=0)\n"
+ " Does not include sample event.\n"
+ " -o, --lo-prio-events <number> Total number of low priority events. Overrides the\n"
+ " number of events per queue, does not include sample event.\n"
+ " -p, --hi-prio-events <number> Total number of high priority events. Overrides the\n"
+ " number of events per queue, does not include sample event.\n"
" -r --sample-per-prio Allocate a separate sample event for each priority. By default\n"
" a single sample event is used and its priority is changed after\n"
" each processing round.\n"
@@ -481,8 +583,8 @@ static void usage(void)
" 0: ODP_SCHED_SYNC_PARALLEL (default)\n"
" 1: ODP_SCHED_SYNC_ATOMIC\n"
" 2: ODP_SCHED_SYNC_ORDERED\n"
- " -h, --help Display help and exit.\n\n"
- );
+ " -w, --warm-up <number> Number of warm-up rounds, default=100, min=1\n"
+ " -h, --help Display help and exit.\n\n");
}
/**
@@ -500,33 +602,42 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
static const struct option longopts[] = {
{"count", required_argument, NULL, 'c'},
+ {"duration", required_argument, NULL, 'd'},
+ {"forward-mode", required_argument, NULL, 'f'},
+ {"num_group", required_argument, NULL, 'g'},
+ {"isolate", required_argument, NULL, 'i'},
{"lo-prio-queues", required_argument, NULL, 'l'},
{"hi-prio-queues", required_argument, NULL, 't'},
{"lo-prio-events-per-queue", required_argument, NULL, 'm'},
{"hi-prio-events-per-queue", required_argument, NULL, 'n'},
{"lo-prio-events", required_argument, NULL, 'o'},
{"hi-prio-events", required_argument, NULL, 'p'},
- {"sample-per-prio", no_argument, NULL, 'r'},
{"sync", required_argument, NULL, 's'},
+ {"warm-up", required_argument, NULL, 'w'},
+ {"sample-per-prio", no_argument, NULL, 'r'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:s:l:t:m:n:o:p:rh";
-
- /* Let helper collect its own arguments (e.g. --odph_proc) */
- odph_parse_options(argc, argv, shortopts, longopts);
+ static const char *shortopts = "+c:d:f:g:i:l:t:m:n:o:p:s:w:rh";
+ args->cpu_count = 1;
+ args->forward_mode = EVENT_FORWARD_RAND;
+ args->num_group = 0;
+ args->isolate = 0;
+ args->test_rounds = 10;
+ args->warm_up_rounds = 100;
args->sync_type = ODP_SCHED_SYNC_PARALLEL;
- args->sample_per_prio = SAMPLE_EVENT_PER_PRIO;
- args->prio[LO_PRIO].queues = LO_PRIO_QUEUES;
- args->prio[HI_PRIO].queues = HI_PRIO_QUEUES;
- args->prio[LO_PRIO].events = LO_PRIO_EVENTS;
- args->prio[HI_PRIO].events = HI_PRIO_EVENTS;
- args->prio[LO_PRIO].events_per_queue = EVENTS_PER_LO_PRIO_QUEUE;
- args->prio[HI_PRIO].events_per_queue = EVENTS_PER_HI_PRIO_QUEUE;
-
- opterr = 0; /* Do not issue errors on helper options */
+ args->sample_per_prio = 0;
+ args->prio[LO_PRIO].queues = 64;
+ args->prio[HI_PRIO].queues = 16;
+ args->prio[LO_PRIO].events = 32;
+ args->prio[HI_PRIO].events = 0;
+ args->prio[LO_PRIO].events_per_queue = 1;
+ args->prio[HI_PRIO].events_per_queue = 0;
+ args->prio[LO_PRIO].sample_events = 0;
+ args->prio[HI_PRIO].sample_events = 1;
+
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -537,6 +648,18 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
case 'c':
args->cpu_count = atoi(optarg);
break;
+ case 'd':
+ args->test_rounds = atoi(optarg);
+ break;
+ case 'f':
+ args->forward_mode = atoi(optarg);
+ break;
+ case 'g':
+ args->num_group = atoi(optarg);
+ break;
+ case 'i':
+ args->isolate = atoi(optarg);
+ break;
case 'l':
args->prio[LO_PRIO].queues = atoi(optarg);
break;
@@ -571,6 +694,9 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
case 'r':
args->sample_per_prio = 1;
break;
+ case 'w':
+ args->warm_up_rounds = atoi(optarg);
+ break;
case 'h':
usage();
exit(EXIT_SUCCESS);
@@ -582,17 +708,115 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
}
/* Make sure arguments are valid */
- if (args->cpu_count > MAX_WORKERS)
- args->cpu_count = MAX_WORKERS;
+ /* -1 for main thread */
+ if (args->cpu_count > ODP_THREAD_COUNT_MAX - 1)
+ args->cpu_count = ODP_THREAD_COUNT_MAX - 1;
if (args->prio[LO_PRIO].queues > MAX_QUEUES)
args->prio[LO_PRIO].queues = MAX_QUEUES;
if (args->prio[HI_PRIO].queues > MAX_QUEUES)
args->prio[HI_PRIO].queues = MAX_QUEUES;
+ if (args->test_rounds < 1)
+ args->test_rounds = 1;
if (!args->prio[HI_PRIO].queues && !args->prio[LO_PRIO].queues) {
printf("No queues configured\n");
usage();
exit(EXIT_FAILURE);
}
+ if (args->forward_mode > EVENT_FORWARD_NONE ||
+ args->forward_mode < EVENT_FORWARD_RAND) {
+ printf("Invalid forwarding mode\n");
+ usage();
+ exit(EXIT_FAILURE);
+ }
+
+ if (args->num_group > MAX_GROUPS) {
+ ODPH_ERR("Too many groups. Max supported %i.\n", MAX_GROUPS);
+ exit(EXIT_FAILURE);
+ }
+
+ if (args->prio[HI_PRIO].queues == 0 || args->sample_per_prio)
+ args->prio[LO_PRIO].sample_events = 1;
+}
+
+static void randomize_queues(odp_queue_t queues[], uint32_t num, uint64_t *seed)
+{
+ uint32_t i;
+
+ for (i = 0; i < num; i++) {
+ uint32_t new_index;
+ odp_queue_t swap_queue;
+ odp_queue_t cur_queue = queues[i];
+
+ odp_random_test_data((uint8_t *)&new_index, sizeof(new_index),
+ seed);
+ new_index = new_index % num;
+ swap_queue = queues[new_index];
+
+ queues[new_index] = cur_queue;
+ queues[i] = swap_queue;
+ }
+}
+
+static int create_groups(test_globals_t *globals, odp_schedule_group_t group[], int num)
+{
+ odp_schedule_capability_t sched_capa;
+ odp_thrmask_t zeromask;
+ int i, j, max;
+
+ if (num <= 0)
+ return 0;
+
+ if (odp_schedule_capability(&sched_capa)) {
+ ODPH_ERR("Schedule capability failed\n");
+ return 0;
+ }
+
+ max = sched_capa.max_groups - 3;
+ if (num > max) {
+ printf("Too many schedule groups %i (max %u)\n", num, max);
+ return 0;
+ }
+
+ for (i = 0; i < NUM_PRIOS; i++)
+ for (j = 0; j < MAX_GROUPS; j++)
+ globals->group[i][j] = ODP_SCHED_GROUP_INVALID;
+
+ odp_thrmask_zero(&zeromask);
+
+ for (i = 0; i < num; i++) {
+ group[i] = odp_schedule_group_create("test_group", &zeromask);
+
+ if (group[i] == ODP_SCHED_GROUP_INVALID) {
+ ODPH_ERR("Group create failed %i\n", i);
+ break;
+ }
+
+ if (globals->args.isolate) {
+ globals->group[i % 2][i / 2] = group[i];
+ } else {
+ globals->group[0][i] = group[i];
+ globals->group[1][i] = group[i];
+ }
+ }
+
+ return i;
+}
+
+static int destroy_groups(odp_schedule_group_t group[], int num)
+{
+ int i;
+
+ if (num <= 0)
+ return 0;
+
+ for (i = 0; i < num; i++) {
+ if (odp_schedule_group_destroy(group[i])) {
+ ODPH_ERR("Group destroy failed %i\n", i);
+ return -1;
+ }
+ }
+
+ return 0;
}
/**
@@ -601,28 +825,45 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
int main(int argc, char *argv[])
{
odp_instance_t instance;
- odph_odpthread_t *thread_tbl;
- odph_odpthread_params_t thr_params;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
odp_cpumask_t cpumask;
- odp_pool_t pool;
+ odp_pool_capability_t pool_capa;
odp_pool_param_t params;
- odp_shm_t shm;
test_globals_t *globals;
test_args_t args;
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
- int i, j;
- int ret = 0;
+ uint32_t pool_size;
+ int i, j, ret;
+ int num_group, tot_group;
+ odp_schedule_group_t group[2 * MAX_GROUPS];
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ int err = 0;
int num_workers = 0;
+ odp_shm_t shm = ODP_SHM_INVALID;
+ odp_pool_t pool = ODP_POOL_INVALID;
printf("\nODP scheduling latency benchmark starts\n\n");
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
memset(&args, 0, sizeof(args));
parse_args(argc, argv, &args);
/* ODP global init */
- if (odp_init_global(&instance, NULL, NULL)) {
- LOG_ERR("ODP global init failed.\n");
- return -1;
+ if (odp_init_global(&instance, &init_param, NULL)) {
+ ODPH_ERR("ODP global init failed.\n");
+ exit(EXIT_FAILURE);
}
/*
@@ -630,20 +871,17 @@ int main(int argc, char *argv[])
* setting up resources for worker threads.
*/
if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
- LOG_ERR("ODP global init failed.\n");
- return -1;
+ ODPH_ERR("ODP global init failed.\n");
+ exit(EXIT_FAILURE);
}
- printf("\n");
- printf("ODP system info\n");
- printf("---------------\n");
- printf("ODP API version: %s\n", odp_version_api_str());
- printf("ODP impl name: %s\n", odp_version_impl_name());
- printf("ODP impl details: %s\n", odp_version_impl_str());
- printf("CPU model: %s\n", odp_cpu_model_str());
- printf("CPU freq (hz): %" PRIu64 "\n", odp_cpu_hz_max());
- printf("Cache line size: %i\n", odp_sys_cache_line_size());
- printf("Max CPU count: %i\n", odp_cpu_count());
+ odp_sys_info_print();
+
+ num_group = args.num_group;
+
+ tot_group = 0;
+ if (num_group > 0)
+ tot_group = args.isolate ? 2 * num_group : num_group;
/* Get default worker cpumask */
if (args.cpu_count)
@@ -654,44 +892,67 @@ int main(int argc, char *argv[])
(void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
- printf("Worker threads: %i\n", num_workers);
- printf("First CPU: %i\n", odp_cpumask_first(&cpumask));
- printf("CPU mask: %s\n\n", cpumaskstr);
-
- thread_tbl = calloc(sizeof(odph_odpthread_t), num_workers);
- if (!thread_tbl) {
- LOG_ERR("no memory for thread_tbl\n");
- return -1;
- }
+ printf("Test options:\n");
+ printf(" Worker threads: %i\n", num_workers);
+ printf(" First CPU: %i\n", odp_cpumask_first(&cpumask));
+ printf(" CPU mask: %s\n", cpumaskstr);
+ printf(" Test rounds: %iM\n", args.test_rounds);
+ printf(" Warm-up rounds: %i\n", args.warm_up_rounds);
+ printf(" Isolated groups: %i\n", args.isolate);
+ printf(" Number of groups: %i\n", num_group);
+ printf(" Created groups: %i\n", tot_group);
+ printf("\n");
- shm = odp_shm_reserve("test_globals",
- sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
+ shm = odp_shm_reserve("test_globals", sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
if (shm == ODP_SHM_INVALID) {
- LOG_ERR("Shared memory reserve failed.\n");
- return -1;
+ ODPH_ERR("Shared memory reserve failed.\n");
+ err = -1;
+ goto error;
}
globals = odp_shm_addr(shm);
memset(globals, 0, sizeof(test_globals_t));
memcpy(&globals->args, &args, sizeof(test_args_t));
+ odp_schedule_config(NULL);
+
/*
* Create event pool
*/
+ if (odp_pool_capability(&pool_capa)) {
+ ODPH_ERR("pool capa failed\n");
+ err = -1;
+ goto error;
+ }
+
+ pool_size = EVENT_POOL_SIZE;
+ if (pool_capa.buf.max_num && pool_capa.buf.max_num < EVENT_POOL_SIZE)
+ pool_size = pool_capa.buf.max_num;
+
odp_pool_param_init(&params);
params.buf.size = sizeof(test_event_t);
params.buf.align = 0;
- params.buf.num = EVENT_POOL_SIZE;
+ params.buf.num = pool_size;
params.type = ODP_POOL_BUFFER;
pool = odp_pool_create("event_pool", &params);
if (pool == ODP_POOL_INVALID) {
- LOG_ERR("Pool create failed.\n");
- return -1;
+ ODPH_ERR("Pool create failed.\n");
+ err = -1;
+ goto error;
}
globals->pool = pool;
+ /* Create groups */
+ ret = create_groups(globals, group, tot_group);
+ if (ret != tot_group) {
+ ODPH_ERR("Group create failed.\n");
+ tot_group = ret;
+ err = -1;
+ goto error;
+ }
+
/*
* Create queues for schedule test
*/
@@ -699,12 +960,17 @@ int main(int argc, char *argv[])
char name[] = "sched_XX_YY";
odp_queue_t queue;
odp_queue_param_t param;
+ odp_schedule_group_t grp;
int prio;
+ grp = ODP_SCHED_GROUP_ALL;
+ if (num_group < 0)
+ grp = ODP_SCHED_GROUP_WORKER;
+
if (i == HI_PRIO)
- prio = ODP_SCHED_PRIO_HIGHEST;
+ prio = odp_schedule_max_prio();
else
- prio = ODP_SCHED_PRIO_LOWEST;
+ prio = odp_schedule_min_prio();
name[6] = '0' + (prio / 10);
name[7] = '0' + prio - (10 * (prio / 10));
@@ -713,36 +979,53 @@ int main(int argc, char *argv[])
param.type = ODP_QUEUE_TYPE_SCHED;
param.sched.prio = prio;
param.sched.sync = args.sync_type;
- param.sched.group = ODP_SCHED_GROUP_ALL;
for (j = 0; j < args.prio[i].queues; j++) {
name[9] = '0' + j / 10;
name[10] = '0' + j - 10 * (j / 10);
+ /* Round robin queues into groups */
+ if (num_group > 0)
+ grp = globals->group[i][j % num_group];
+
+ param.sched.group = grp;
+
queue = odp_queue_create(name, &param);
if (queue == ODP_QUEUE_INVALID) {
- LOG_ERR("Scheduled queue create failed.\n");
- return -1;
+ ODPH_ERR("Scheduled queue create failed.\n");
+ exit(EXIT_FAILURE);
}
globals->queue[i][j] = queue;
}
+ if (args.forward_mode == EVENT_FORWARD_RAND) {
+ uint64_t seed = i;
+
+ randomize_queues(globals->queue[i], args.prio[i].queues,
+ &seed);
+ }
}
odp_barrier_init(&globals->barrier, num_workers);
/* Create and launch worker threads */
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
- thr_params.start = run_thread;
- thr_params.arg = NULL;
- odph_odpthreads_create(thread_tbl, &cpumask, &thr_params);
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_thread;
+ thr_param.arg = NULL;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_create(thread_tbl, &thr_common, &thr_param, num_workers);
/* Wait for worker threads to terminate */
- odph_odpthreads_join(thread_tbl);
- free(thread_tbl);
+ odph_thread_join(thread_tbl, num_workers);
printf("ODP scheduling latency test complete\n\n");
@@ -754,14 +1037,36 @@ int main(int argc, char *argv[])
for (j = 0; j < num_queues; j++) {
queue = globals->queue[i][j];
- ret += odp_queue_destroy(queue);
+ if (odp_queue_destroy(queue)) {
+ ODPH_ERR("Queue destroy failed [%i][%i]\n", i, j);
+ err = -1;
+ break;
+ }
+ }
+ }
+
+error:
+ if (destroy_groups(group, tot_group)) {
+ ODPH_ERR("Group destroy failed\n");
+ err = -1;
+ }
+
+ if (pool != ODP_POOL_INVALID) {
+ if (odp_pool_destroy(pool)) {
+ ODPH_ERR("Pool destroy failed\n");
+ err = -1;
+ }
+ }
+
+ if (shm != ODP_SHM_INVALID) {
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("SHM destroy failed\n");
+ err = -1;
}
}
- ret += odp_shm_free(shm);
- ret += odp_pool_destroy(pool);
- ret += odp_term_local();
- ret += odp_term_global(instance);
+ err += odp_term_local();
+ err += odp_term_global(instance);
- return ret;
+ return err;
}
diff --git a/test/common_plat/performance/odp_sched_latency_run.sh b/test/performance/odp_sched_latency_run.sh
index 6048f5816..b051c1a4e 100755
--- a/test/common_plat/performance/odp_sched_latency_run.sh
+++ b/test/performance/odp_sched_latency_run.sh
@@ -1,6 +1,6 @@
#!/bin/sh
#
-# Copyright (c) 2016, Linaro Limited
+# Copyright (c) 2016-2018, Linaro Limited
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
@@ -14,9 +14,13 @@ ALL=0
run()
{
echo odp_sched_latency_run starts requesting $1 worker threads
- echo ===============================================
+ echo =========================================================
- $TEST_DIR/odp_sched_latency${EXEEXT} -c $1 || exit $?
+ if [ $(nproc) -lt $1 ]; then
+ echo "Not enough CPU cores. Skipping test."
+ else
+ $TEST_DIR/odp_sched_latency${EXEEXT} -c $1 -d 1 || exit $?
+ fi
}
run 1
diff --git a/test/performance/odp_sched_perf.c b/test/performance/odp_sched_perf.c
new file mode 100644
index 000000000..47f703338
--- /dev/null
+++ b/test/performance/odp_sched_perf.c
@@ -0,0 +1,1518 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2020-2024, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_sched_perf.c
+ *
+ * Performance test application for scheduling
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE /* Needed for sigaction */
+#endif
+
+#include <signal.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define MAX_QUEUES (256 * 1024)
+#define MAX_GROUPS 256
+
+/* Max time to wait for new events in nanoseconds */
+#define MAX_SCHED_WAIT_NS (10 * ODP_TIME_SEC_IN_NS)
+
+/* Scheduling round interval to check for MAX_SCHED_WAIT_NS */
+#define TIME_CHECK_INTERVAL (1024 * 1024)
+
+/* Round up 'X' to a multiple of 'NUM' */
+#define ROUNDUP(X, NUM) ((NUM) * (((X) + (NUM) - 1) / (NUM)))
+
+typedef struct test_options_t {
+ uint32_t num_cpu;
+ uint32_t num_queue;
+ uint32_t num_low;
+ uint32_t num_high;
+ uint32_t num_dummy;
+ uint32_t num_event;
+ uint32_t num_sched;
+ int num_group;
+ uint32_t num_join;
+ uint32_t max_burst;
+ odp_pool_type_t pool_type;
+ int queue_type;
+ int forward;
+ int fairness;
+ uint32_t event_size;
+ uint32_t queue_size;
+ uint32_t tot_queue;
+ uint32_t tot_event;
+ int touch_data;
+ uint32_t rd_words;
+ uint32_t rw_words;
+ uint32_t ctx_size;
+ uint32_t ctx_rd_words;
+ uint32_t ctx_rw_words;
+ uint32_t uarea_rd;
+ uint32_t uarea_rw;
+ uint32_t uarea_size;
+ uint64_t wait_ns;
+ int verbose;
+
+} test_options_t;
+
+typedef struct test_stat_t {
+ uint64_t rounds;
+ uint64_t enqueues;
+ uint64_t events;
+ uint64_t nsec;
+ uint64_t cycles;
+ uint64_t waits;
+ uint64_t dummy_sum;
+ uint8_t failed;
+
+} test_stat_t;
+
+typedef struct thread_arg_t {
+ void *global;
+ int first_group;
+
+} thread_arg_t;
+
+typedef struct test_global_t {
+ test_options_t test_options;
+ odp_schedule_config_t schedule_config;
+ odp_barrier_t barrier;
+ odp_pool_t pool;
+ odp_cpumask_t cpumask;
+ odp_shm_t ctx_shm;
+ odp_queue_t queue[MAX_QUEUES];
+ odp_schedule_group_t group[MAX_GROUPS];
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ test_stat_t stat[ODP_THREAD_COUNT_MAX];
+ thread_arg_t thread_arg[ODP_THREAD_COUNT_MAX];
+ odp_atomic_u32_t num_worker;
+ odp_atomic_u32_t exit_threads;
+
+} test_global_t;
+
+typedef struct {
+ odp_queue_t next;
+ odp_atomic_u64_t count;
+} queue_context_t;
+
+static test_global_t *test_globals;
+
+static void sig_handler(int signum ODP_UNUSED)
+{
+ odp_atomic_store_u32(&test_globals->exit_threads, 1);
+}
+
+static int setup_sig_handler(void)
+{
+ struct sigaction action = { .sa_handler = sig_handler };
+
+ if (sigemptyset(&action.sa_mask) || sigaction(SIGINT, &action, NULL))
+ return -1;
+
+ return 0;
+}
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Scheduler performance test\n"
+ "\n"
+ "Usage: odp_sched_perf [options]\n"
+ "\n"
+ " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs. Default: 1.\n"
+ " -q, --num_queue Number of queues. Default: 1.\n"
+ " -L, --num_low Number of lowest priority queues out of '--num_queue' queues. Rest of\n"
+ " the queues are default (or highest) priority. Default: 0.\n"
+ " -H, --num_high Number of highest priority queues out of '--num_queue' queues. Rest of\n"
+ " the queues are default (or lowest) priority. Default: 0.\n"
+ " -d, --num_dummy Number of empty queues. Default: 0.\n"
+ " -e, --num_event Number of events per queue. Default: 100.\n"
+ " -s, --num_sched Number of events to schedule per thread. If zero, the application runs\n"
+ " until SIGINT is received. Default: 100 000.\n"
+ " -g, --num_group Number of schedule groups. Round robins threads and queues into groups.\n"
+ " -1: SCHED_GROUP_WORKER\n"
+ " 0: SCHED_GROUP_ALL (default)\n"
+ " -j, --num_join Number of groups a thread joins. Threads are divide evenly into groups,\n"
+ " if num_cpu is multiple of num_group and num_group is multiple of num_join.\n"
+ " 0: join all groups (default)\n"
+ " -b, --burst Maximum number of events per operation. Default: 100.\n"
+ " -t, --type Queue type. 0: parallel, 1: atomic, 2: ordered. Default: 0.\n"
+ " -f, --forward 0: Keep event in the original queue, 1: Forward event to the next queue. Default: 0.\n"
+ " -a, --fairness 0: Don't count events per queue, 1: Count and report events relative to average. Default: 0.\n"
+ " -w, --wait_ns Number of nsec to wait before enqueueing events. Default: 0.\n"
+ " -k, --ctx_rd_words Number of queue context words (uint64_t) to read on every event. Default: 0.\n"
+ " -l, --ctx_rw_words Number of queue context words (uint64_t) to modify on every event. Default: 0.\n"
+ " -n, --rd_words Number of event data words (uint64_t) to read before enqueueing it. Default: 0.\n"
+ " -m, --rw_words Number of event data words (uint64_t) to modify before enqueueing it. Default: 0.\n"
+ " -u, --uarea_rd Number of user area words (uint64_t) to read on every event. Default: 0.\n"
+ " -U, --uarea_rw Number of user area words (uint64_t) to modify on every event. Default: 0.\n"
+ " -p, --pool_type Pool type. 0: buffer, 1: packet. Default: 0.\n"
+ " -v, --verbose Verbose output.\n"
+ " -h, --help This help\n"
+ "\n");
+}
+
+static int parse_options(int argc, char *argv[], test_options_t *test_options)
+{
+ int opt, long_index, num_group, num_join;
+ int ret = 0;
+ uint32_t ctx_size = 0;
+ int pool_type = 0;
+
+ static const struct option longopts[] = {
+ {"num_cpu", required_argument, NULL, 'c'},
+ {"num_queue", required_argument, NULL, 'q'},
+ {"num_low", required_argument, NULL, 'L'},
+ {"num_high", required_argument, NULL, 'H'},
+ {"num_dummy", required_argument, NULL, 'd'},
+ {"num_event", required_argument, NULL, 'e'},
+ {"num_sched", required_argument, NULL, 's'},
+ {"num_group", required_argument, NULL, 'g'},
+ {"num_join", required_argument, NULL, 'j'},
+ {"burst", required_argument, NULL, 'b'},
+ {"type", required_argument, NULL, 't'},
+ {"forward", required_argument, NULL, 'f'},
+ {"fairness", required_argument, NULL, 'a'},
+ {"wait_ns", required_argument, NULL, 'w'},
+ {"ctx_rd_words", required_argument, NULL, 'k'},
+ {"ctx_rw_words", required_argument, NULL, 'l'},
+ {"rd_words", required_argument, NULL, 'n'},
+ {"rw_words", required_argument, NULL, 'm'},
+ {"uarea_rd", required_argument, NULL, 'u'},
+ {"uarea_rw", required_argument, NULL, 'U'},
+ {"pool_type", required_argument, NULL, 'p'},
+ {"verbose", no_argument, NULL, 'v'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "+c:q:L:H:d:e:s:g:j:b:t:f:a:w:k:l:n:m:p:u:U:vh";
+
+ test_options->num_cpu = 1;
+ test_options->num_queue = 1;
+ test_options->num_low = 0;
+ test_options->num_high = 0;
+ test_options->num_dummy = 0;
+ test_options->num_event = 100;
+ test_options->num_sched = 100000;
+ test_options->num_group = 0;
+ test_options->num_join = 0;
+ test_options->max_burst = 100;
+ test_options->queue_type = 0;
+ test_options->forward = 0;
+ test_options->fairness = 0;
+ test_options->ctx_rd_words = 0;
+ test_options->ctx_rw_words = 0;
+ test_options->rd_words = 0;
+ test_options->rw_words = 0;
+ test_options->uarea_rd = 0;
+ test_options->uarea_rw = 0;
+ test_options->wait_ns = 0;
+ test_options->verbose = 0;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'c':
+ test_options->num_cpu = atoi(optarg);
+ break;
+ case 'q':
+ test_options->num_queue = atoi(optarg);
+ break;
+ case 'L':
+ test_options->num_low = atoi(optarg);
+ break;
+ case 'H':
+ test_options->num_high = atoi(optarg);
+ break;
+ case 'd':
+ test_options->num_dummy = atoi(optarg);
+ break;
+ case 'e':
+ test_options->num_event = atoi(optarg);
+ break;
+ case 's':
+ test_options->num_sched = atoi(optarg);
+ break;
+ case 'g':
+ test_options->num_group = atoi(optarg);
+ break;
+ case 'j':
+ test_options->num_join = atoi(optarg);
+ break;
+ case 'b':
+ test_options->max_burst = atoi(optarg);
+ break;
+ case 't':
+ test_options->queue_type = atoi(optarg);
+ break;
+ case 'f':
+ test_options->forward = atoi(optarg);
+ break;
+ case 'a':
+ test_options->fairness = atoi(optarg);
+ break;
+ case 'k':
+ test_options->ctx_rd_words = atoi(optarg);
+ break;
+ case 'l':
+ test_options->ctx_rw_words = atoi(optarg);
+ break;
+ case 'n':
+ test_options->rd_words = atoi(optarg);
+ break;
+ case 'm':
+ test_options->rw_words = atoi(optarg);
+ break;
+ case 'u':
+ test_options->uarea_rd = atoi(optarg);
+ break;
+ case 'U':
+ test_options->uarea_rw = atoi(optarg);
+ break;
+ case 'p':
+ pool_type = atoi(optarg);
+ break;
+ case 'w':
+ test_options->wait_ns = atoll(optarg);
+ break;
+ case 'v':
+ test_options->verbose = 1;
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+ if (pool_type == 0) {
+ test_options->pool_type = ODP_POOL_BUFFER;
+ } else if (pool_type == 1) {
+ test_options->pool_type = ODP_POOL_PACKET;
+ } else {
+ ODPH_ERR("Invalid pool type: %d.\n", pool_type);
+ ret = -1;
+ }
+
+ test_options->touch_data = test_options->rd_words ||
+ test_options->rw_words;
+
+ if ((test_options->num_queue + test_options->num_dummy) > MAX_QUEUES) {
+ ODPH_ERR("Too many queues. Max supported %i.\n", MAX_QUEUES);
+ ret = -1;
+ }
+
+ if ((test_options->num_low + test_options->num_high) > test_options->num_queue) {
+ ODPH_ERR("Number of low/high prio %u/%u exceed number of queues %u.\n",
+ test_options->num_low, test_options->num_high, test_options->num_queue);
+ ret = -1;
+ }
+
+ num_group = test_options->num_group;
+ num_join = test_options->num_join;
+ if (num_group > MAX_GROUPS) {
+ ODPH_ERR("Too many groups. Max supported %i.\n", MAX_GROUPS);
+ ret = -1;
+ }
+
+ if (num_group > 0 && num_join > num_group) {
+ ODPH_ERR("num_join (%i) larger than num_group (%i).\n", num_join, num_group);
+ ret = -1;
+ }
+
+ if (num_join && num_group > (int)(test_options->num_cpu * num_join)) {
+ printf("WARNING: Too many groups (%i). Some groups (%i) are not served.\n\n",
+ num_group, num_group - (test_options->num_cpu * num_join));
+
+ if (test_options->forward) {
+ printf("Error: Cannot forward when some queues are not served.\n");
+ ret = -1;
+ }
+ }
+
+ test_options->tot_queue = test_options->num_queue +
+ test_options->num_dummy;
+ test_options->tot_event = test_options->num_queue *
+ test_options->num_event;
+
+ test_options->queue_size = test_options->num_event;
+
+ if (test_options->forward) {
+ /* When forwarding, all events may end up into
+ * a single queue */
+ test_options->queue_size = test_options->tot_event;
+ }
+
+ if (test_options->forward || test_options->fairness)
+ ctx_size = sizeof(queue_context_t);
+
+ if (test_options->ctx_rd_words || test_options->ctx_rw_words) {
+ /* Round up queue handle size to a multiple of 8 for correct
+ * context data alignment */
+ ctx_size = ROUNDUP(ctx_size, 8);
+ ctx_size += 8 * test_options->ctx_rd_words;
+ ctx_size += 8 * test_options->ctx_rw_words;
+ }
+
+ /* When context data is modified, round up to cache line size to avoid
+ * false sharing */
+ if (test_options->fairness || test_options->ctx_rw_words)
+ ctx_size = ROUNDUP(ctx_size, ODP_CACHE_LINE_SIZE);
+
+ test_options->ctx_size = ctx_size;
+ test_options->uarea_size = 8 * (test_options->uarea_rd + test_options->uarea_rw);
+
+ return ret;
+}
+
+static int set_num_cpu(test_global_t *global)
+{
+ int ret;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+
+ /* One thread used for the main thread */
+ if (num_cpu > ODP_THREAD_COUNT_MAX - 1) {
+ printf("Error: Too many workers. Maximum is %i.\n",
+ ODP_THREAD_COUNT_MAX - 1);
+ return -1;
+ }
+
+ ret = odp_cpumask_default_worker(&global->cpumask, num_cpu);
+
+ if (num_cpu && ret != num_cpu) {
+ printf("Error: Too many workers. Max supported %i\n.", ret);
+ return -1;
+ }
+
+ /* Zero: all available workers */
+ if (num_cpu == 0) {
+ num_cpu = ret;
+ test_options->num_cpu = num_cpu;
+ }
+
+ odp_barrier_init(&global->barrier, num_cpu);
+
+ return 0;
+}
+
+static int create_pool(test_global_t *global)
+{
+ odp_pool_capability_t pool_capa;
+ odp_pool_param_t pool_param;
+ odp_pool_t pool;
+ uint32_t max_num, max_size, max_uarea;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+ uint32_t num_queue = test_options->num_queue;
+ uint32_t num_dummy = test_options->num_dummy;
+ uint32_t num_event = test_options->num_event;
+ uint32_t num_sched = test_options->num_sched;
+ uint32_t max_burst = test_options->max_burst;
+ uint32_t tot_queue = test_options->tot_queue;
+ uint32_t tot_event = test_options->tot_event;
+ uint32_t queue_size = test_options->queue_size;
+ int num_group = test_options->num_group;
+ uint32_t num_join = test_options->num_join;
+ int forward = test_options->forward;
+ uint64_t wait_ns = test_options->wait_ns;
+ uint32_t event_size = 16;
+ int touch_data = test_options->touch_data;
+ uint32_t ctx_size = test_options->ctx_size;
+ uint32_t uarea_size = test_options->uarea_size;
+
+ if (touch_data) {
+ event_size = test_options->rd_words + test_options->rw_words;
+ event_size = 8 * event_size;
+ }
+ test_options->event_size = event_size;
+
+ printf("\nScheduler performance test\n");
+ printf(" num sched %u\n", num_sched);
+ printf(" num cpu %u\n", num_cpu);
+ printf(" num queues %u\n", num_queue);
+ printf(" num lowest prio queues %u\n", test_options->num_low);
+ printf(" num highest prio queues %u\n", test_options->num_high);
+ printf(" num empty queues %u\n", num_dummy);
+ printf(" total queues %u\n", tot_queue);
+ printf(" num groups %i", num_group);
+ if (num_group == -1)
+ printf(" (ODP_SCHED_GROUP_WORKER)\n");
+ else if (num_group == 0)
+ printf(" (ODP_SCHED_GROUP_ALL)\n");
+ else
+ printf("\n");
+
+ printf(" num join %u\n", num_join);
+ printf(" forward events %i\n", forward ? 1 : 0);
+ printf(" wait nsec %" PRIu64 "\n", wait_ns);
+ printf(" events per queue %u\n", num_event);
+ printf(" queue size %u\n", queue_size);
+ printf(" max burst size %u\n", max_burst);
+ printf(" total events %u\n", tot_event);
+ printf(" event size %u bytes", event_size);
+ if (touch_data)
+ printf(" (rd: %u, rw: %u)", 8 * test_options->rd_words, 8 * test_options->rw_words);
+ printf("\n");
+
+ printf(" context size %u bytes", ctx_size);
+ if (test_options->ctx_rd_words || test_options->ctx_rw_words) {
+ printf(" (rd: %u, rw: %u)",
+ 8 * test_options->ctx_rd_words,
+ 8 * test_options->ctx_rw_words);
+ }
+ printf("\n");
+
+ printf(" user area size %u bytes", uarea_size);
+ if (uarea_size)
+ printf(" (rd: %u, rw: %u)", 8 * test_options->uarea_rd, 8 * test_options->uarea_rw);
+ printf("\n");
+
+ if (odp_pool_capability(&pool_capa)) {
+ ODPH_ERR("Error: pool capa failed\n");
+ return -1;
+ }
+
+ if (test_options->pool_type == ODP_POOL_BUFFER) {
+ printf(" pool type buffer\n");
+ max_num = pool_capa.buf.max_num;
+ max_size = pool_capa.buf.max_size;
+ max_uarea = pool_capa.buf.max_uarea_size;
+ } else {
+ printf(" pool type packet\n");
+ max_num = pool_capa.pkt.max_num;
+ max_size = pool_capa.pkt.max_seg_len;
+ max_uarea = pool_capa.pkt.max_uarea_size;
+ }
+
+ if (max_num && tot_event > max_num) {
+ ODPH_ERR("Error: max events supported %u\n", max_num);
+ return -1;
+ }
+
+ if (max_size && event_size > max_size) {
+ ODPH_ERR("Error: max supported event size %u\n", max_size);
+ return -1;
+ }
+
+ if (uarea_size > max_uarea) {
+ ODPH_ERR("Error: max supported user area size %u\n", max_uarea);
+ return -1;
+ }
+
+ odp_pool_param_init(&pool_param);
+ if (test_options->pool_type == ODP_POOL_BUFFER) {
+ pool_param.type = ODP_POOL_BUFFER;
+ pool_param.buf.num = tot_event;
+ pool_param.buf.size = event_size;
+ pool_param.buf.align = 8;
+ pool_param.buf.uarea_size = uarea_size;
+ } else {
+ pool_param.type = ODP_POOL_PACKET;
+ pool_param.pkt.num = tot_event;
+ pool_param.pkt.len = event_size;
+ pool_param.pkt.seg_len = event_size;
+ pool_param.pkt.align = 8;
+ pool_param.pkt.uarea_size = uarea_size;
+ }
+
+ pool = odp_pool_create("sched perf", &pool_param);
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error: pool create failed\n");
+ return -1;
+ }
+
+ global->pool = pool;
+
+ return 0;
+}
+
+static int create_groups(test_global_t *global)
+{
+ odp_schedule_capability_t sched_capa;
+ odp_thrmask_t thrmask;
+ uint32_t i;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_group = test_options->num_group;
+
+ if (test_options->num_group <= 0)
+ return 0;
+
+ if (odp_schedule_capability(&sched_capa)) {
+ printf("Error: schedule capability failed\n");
+ return -1;
+ }
+
+ if (num_group > sched_capa.max_groups) {
+ printf("Error: Too many sched groups (max_groups capa %u)\n",
+ sched_capa.max_groups);
+ return -1;
+ }
+
+ odp_thrmask_zero(&thrmask);
+
+ for (i = 0; i < num_group; i++) {
+ odp_schedule_group_t group;
+
+ group = odp_schedule_group_create("test_group", &thrmask);
+
+ if (group == ODP_SCHED_GROUP_INVALID) {
+ printf("Error: Group create failed %u\n", i);
+ return -1;
+ }
+
+ global->group[i] = group;
+ }
+
+ return 0;
+}
+
+static int create_queues(test_global_t *global)
+{
+ odp_queue_param_t queue_param;
+ odp_queue_t queue;
+ odp_schedule_sync_t sync;
+ odp_schedule_prio_t prio;
+ const char *type_str;
+ uint32_t i, j, first;
+ test_options_t *test_options = &global->test_options;
+ uint32_t event_size = test_options->event_size;
+ uint32_t num_event = test_options->num_event;
+ uint32_t queue_size = test_options->queue_size;
+ uint32_t tot_queue = test_options->tot_queue;
+ uint32_t num_low = test_options->num_low;
+ uint32_t num_high = test_options->num_high;
+ uint32_t num_default = test_options->num_queue - num_low - num_high;
+ int num_group = test_options->num_group;
+ int type = test_options->queue_type;
+ odp_pool_t pool = global->pool;
+ uint8_t *ctx = NULL;
+ uint32_t ctx_size = test_options->ctx_size;
+
+ if (type == 0) {
+ type_str = "parallel";
+ sync = ODP_SCHED_SYNC_PARALLEL;
+ } else if (type == 1) {
+ type_str = "atomic";
+ sync = ODP_SCHED_SYNC_ATOMIC;
+ } else {
+ type_str = "ordered";
+ sync = ODP_SCHED_SYNC_ORDERED;
+ }
+
+ printf(" queue type %s\n\n", type_str);
+
+ if (tot_queue > global->schedule_config.num_queues) {
+ printf("Max queues supported %u\n",
+ global->schedule_config.num_queues);
+ return -1;
+ }
+
+ if (global->schedule_config.queue_size &&
+ queue_size > global->schedule_config.queue_size) {
+ printf("Max queue size %u\n",
+ global->schedule_config.queue_size);
+ return -1;
+ }
+
+ if (ctx_size) {
+ ctx = odp_shm_addr(global->ctx_shm);
+ if (ctx == NULL) {
+ printf("Bad queue context\n");
+ return -1;
+ }
+ }
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = sync;
+ queue_param.size = queue_size;
+ if (num_group == -1)
+ queue_param.sched.group = ODP_SCHED_GROUP_WORKER;
+ else
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ first = test_options->num_dummy;
+
+ for (i = 0; i < tot_queue; i++) {
+ if (num_group > 0) {
+ odp_schedule_group_t group;
+
+ /* Divide all queues evenly into groups */
+ group = global->group[i % num_group];
+ queue_param.sched.group = group;
+ }
+
+ /* Create low, high and default queues in a mixed order. Dummy queues are created
+ * first and with default priority. */
+ prio = odp_schedule_default_prio();
+ if (i >= first) {
+ switch (i % 3) {
+ case 0:
+ if (num_low) {
+ num_low--;
+ prio = odp_schedule_min_prio();
+ } else if (num_high) {
+ num_high--;
+ prio = odp_schedule_max_prio();
+ } else {
+ num_default--;
+ }
+ break;
+ case 1:
+ if (num_high) {
+ num_high--;
+ prio = odp_schedule_max_prio();
+ } else if (num_low) {
+ num_low--;
+ prio = odp_schedule_min_prio();
+ } else {
+ num_default--;
+ }
+ break;
+ default:
+ if (num_default) {
+ num_default--;
+ } else if (num_high) {
+ num_high--;
+ prio = odp_schedule_max_prio();
+ } else {
+ num_low--;
+ prio = odp_schedule_min_prio();
+ }
+ break;
+ }
+ }
+
+ queue_param.sched.prio = prio;
+
+ queue = odp_queue_create(NULL, &queue_param);
+
+ global->queue[i] = queue;
+
+ if (queue == ODP_QUEUE_INVALID) {
+ printf("Error: Queue create failed %u\n", i);
+ return -1;
+ }
+ }
+
+ /* Store events into queues. Dummy queues are allocated from
+ * the beginning of the array, so that usage of those affect allocation
+ * of active queues. Dummy queues are left empty. */
+ for (i = first; i < tot_queue; i++) {
+ queue = global->queue[i];
+
+ if (ctx_size) {
+ /*
+ * Cast increases alignment, but it's ok, since ctx and
+ * ctx_size are both cache line aligned.
+ */
+ queue_context_t *qc = (queue_context_t *)(uintptr_t)ctx;
+
+ if (test_options->forward) {
+ uint32_t next = i + 1;
+
+ if (next == tot_queue)
+ next = first;
+
+ qc->next = global->queue[next];
+ }
+
+ if (test_options->fairness)
+ odp_atomic_init_u64(&qc->count, 0);
+
+ if (odp_queue_context_set(queue, ctx, ctx_size)) {
+ printf("Error: Context set failed %u\n", i);
+ return -1;
+ }
+
+ ctx += ctx_size;
+ }
+
+ for (j = 0; j < num_event; j++) {
+ odp_event_t ev;
+
+ if (test_options->pool_type == ODP_POOL_BUFFER) {
+ odp_buffer_t buf = odp_buffer_alloc(pool);
+
+ if (buf == ODP_BUFFER_INVALID) {
+ ODPH_ERR("Error: alloc failed %u/%u\n", i, j);
+ return -1;
+ }
+ ev = odp_buffer_to_event(buf);
+ } else {
+ odp_packet_t pkt = odp_packet_alloc(pool, event_size);
+
+ if (pkt == ODP_PACKET_INVALID) {
+ ODPH_ERR("Error: alloc failed %u/%u\n", i, j);
+ return -1;
+ }
+ ev = odp_packet_to_event(pkt);
+ }
+ if (odp_queue_enq(queue, ev)) {
+ ODPH_ERR("Error: enqueue failed %u/%u\n", i, j);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int join_group(test_global_t *global, int grp_index, int thr)
+{
+ odp_thrmask_t thrmask;
+ odp_schedule_group_t group;
+
+ odp_thrmask_zero(&thrmask);
+ odp_thrmask_set(&thrmask, thr);
+ group = global->group[grp_index];
+
+ if (odp_schedule_group_join(group, &thrmask)) {
+ printf("Error: Group %i join failed (thr %i)\n",
+ grp_index, thr);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int join_all_groups(test_global_t *global, int thr)
+{
+ int i;
+ test_options_t *test_options = &global->test_options;
+ int num_group = test_options->num_group;
+
+ if (num_group <= 0)
+ return 0;
+
+ for (i = 0; i < num_group; i++) {
+ if (join_group(global, i, thr)) {
+ printf("Error: Group %u join failed (thr %i)\n",
+ i, thr);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void print_queue_fairness(test_global_t *global)
+{
+ uint32_t i;
+ queue_context_t *ctx;
+ test_options_t *test_options = &global->test_options;
+ uint32_t first = test_options->num_dummy;
+ uint32_t num_queue = test_options->num_queue;
+ uint32_t tot_queue = test_options->tot_queue;
+ uint64_t total = 0;
+ double average;
+
+ if (!test_options->fairness)
+ return;
+
+ for (i = first; i < tot_queue; i++) {
+ ctx = odp_queue_context(global->queue[i]);
+ total += odp_atomic_load_u64(&ctx->count);
+ }
+
+ average = (double)total / (double)num_queue;
+
+ printf("\n");
+ printf("RESULTS - events per queue (percent of average):\n");
+ printf("------------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = first; i < tot_queue; i++) {
+ ctx = odp_queue_context(global->queue[i]);
+
+ if ((i % 10) == 0)
+ printf("\n ");
+
+ printf("%6.1f ", (double)odp_atomic_load_u64(&ctx->count) /
+ average * 100.0);
+ }
+
+ printf("\n");
+}
+
+static int destroy_queues(test_global_t *global)
+{
+ uint32_t i;
+ odp_event_t ev;
+ uint64_t wait;
+ test_options_t *test_options = &global->test_options;
+ uint32_t tot_queue = test_options->tot_queue;
+ int thr = odp_thread_id();
+
+ if (join_all_groups(global, thr))
+ return -1;
+
+ wait = odp_schedule_wait_time(200 * ODP_TIME_MSEC_IN_NS);
+
+ while ((ev = odp_schedule(NULL, wait)) != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+
+ for (i = 0; i < tot_queue; i++) {
+ if (global->queue[i] != ODP_QUEUE_INVALID) {
+ if (odp_queue_destroy(global->queue[i])) {
+ printf("Error: Queue destroy failed %u\n", i);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int destroy_groups(test_global_t *global)
+{
+ int i;
+ test_options_t *test_options = &global->test_options;
+ int num_group = test_options->num_group;
+
+ if (num_group <= 0)
+ return 0;
+
+ for (i = 0; i < num_group; i++) {
+ odp_schedule_group_t group = global->group[i];
+
+ if (odp_schedule_group_destroy(group)) {
+ printf("Error: Group destroy failed %u\n", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static uint64_t rw_uarea(odp_event_t ev[], int num, uint32_t rd_words, uint32_t rw_words)
+{
+ uint64_t *data;
+ int i;
+ uint32_t j;
+ uint64_t sum = 0;
+
+ for (i = 0; i < num; i++) {
+ data = odp_event_user_area(ev[i]);
+
+ for (j = 0; j < rd_words; j++)
+ sum += data[j];
+
+ for (; j < rd_words + rw_words; j++) {
+ sum += data[j];
+ data[j] += 1;
+ }
+ }
+
+ return sum;
+}
+
+static inline uint64_t rw_ctx_data(void *ctx, uint32_t offset,
+ uint32_t rd_words, uint32_t rw_words)
+{
+ uint64_t *data;
+ uint32_t i;
+ uint64_t sum = 0;
+
+ data = (uint64_t *)(uintptr_t)((uint8_t *)ctx + offset);
+
+ for (i = 0; i < rd_words; i++)
+ sum += data[i];
+
+ for (; i < rd_words + rw_words; i++) {
+ sum += data[i];
+ data[i] += 1;
+ }
+
+ return sum;
+}
+
+static uint64_t rw_data(odp_event_t ev[], int num,
+ uint32_t rd_words, uint32_t rw_words, odp_pool_type_t pool_type)
+{
+ uint64_t *data;
+ int i;
+ uint32_t j;
+ uint64_t sum = 0;
+
+ for (i = 0; i < num; i++) {
+ if (pool_type == ODP_POOL_BUFFER)
+ data = odp_buffer_addr(odp_buffer_from_event(ev[i]));
+ else
+ data = odp_packet_data(odp_packet_from_event(ev[i]));
+
+ for (j = 0; j < rd_words; j++)
+ sum += data[j];
+
+ for (; j < rd_words + rw_words; j++) {
+ sum += data[j];
+ data[j] += 1;
+ }
+ }
+
+ return sum;
+}
+
+static int test_sched(void *arg)
+{
+ int num, num_enq, ret, thr;
+ uint32_t i, rounds;
+ uint64_t c1, c2, cycles, nsec;
+ uint64_t events, enqueues, waits, events_prev;
+ odp_time_t t1, t2, last_retry_ts;
+ odp_queue_t queue;
+ thread_arg_t *thread_arg = arg;
+ test_global_t *global = thread_arg->global;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_sched = test_options->num_sched;
+ uint32_t max_burst = test_options->max_burst;
+ int num_group = test_options->num_group;
+ int forward = test_options->forward;
+ int fairness = test_options->fairness;
+ int touch_data = test_options->touch_data;
+ uint32_t rd_words = test_options->rd_words;
+ uint32_t rw_words = test_options->rw_words;
+ uint32_t ctx_size = test_options->ctx_size;
+ uint32_t ctx_rd_words = test_options->ctx_rd_words;
+ uint32_t ctx_rw_words = test_options->ctx_rw_words;
+ const uint32_t uarea_size = test_options->uarea_size;
+ const uint32_t uarea_rd = test_options->uarea_rd;
+ const uint32_t uarea_rw = test_options->uarea_rw;
+ odp_pool_type_t pool_type = test_options->pool_type;
+ int touch_ctx = ctx_rd_words || ctx_rw_words;
+ odp_atomic_u32_t *exit_threads = &global->exit_threads;
+ uint32_t ctx_offset = 0;
+ uint32_t sched_retries = 0;
+ uint64_t data_sum = 0;
+ uint64_t ctx_sum = 0;
+ uint64_t uarea_sum = 0;
+ uint64_t wait_ns = test_options->wait_ns;
+ odp_event_t ev[max_burst];
+
+ thr = odp_thread_id();
+
+ if (forward || fairness)
+ ctx_offset = ROUNDUP(sizeof(queue_context_t), 8);
+
+ if (num_group > 0) {
+ uint32_t num_join = test_options->num_join;
+
+ if (num_join) {
+ int pos = 0;
+ int n = 512;
+ char str[n];
+ int group_index = thread_arg->first_group;
+
+ pos += snprintf(&str[pos], n - pos,
+ "Thread %i joined groups:", thr);
+
+ for (i = 0; i < num_join; i++) {
+ if (join_group(global, group_index, thr))
+ return -1;
+
+ pos += snprintf(&str[pos], n - pos, " %i",
+ group_index);
+
+ group_index = (group_index + 1) % num_group;
+ }
+
+ printf("%s\n", str);
+
+ } else {
+ if (join_all_groups(global, thr))
+ return -1;
+ }
+ }
+
+ for (i = 0; i < max_burst; i++)
+ ev[i] = ODP_EVENT_INVALID;
+
+ enqueues = 0;
+ events = 0;
+ events_prev = 0;
+ waits = 0;
+ ret = 0;
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ t1 = odp_time_local();
+ c1 = odp_cpu_cycles();
+ last_retry_ts = t1;
+
+ for (rounds = 0; odp_likely(!odp_atomic_load_u32(exit_threads)); rounds++) {
+ if (odp_unlikely(num_sched && events >= num_sched))
+ break;
+
+ num = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT,
+ ev, max_burst);
+
+ if (odp_likely(num > 0)) {
+ sched_retries = 0;
+ events += num;
+ i = 0;
+
+ if (odp_unlikely(uarea_size))
+ uarea_sum += rw_uarea(ev, num, uarea_rd, uarea_rw);
+
+ if (odp_unlikely(ctx_size)) {
+ queue_context_t *ctx = odp_queue_context(queue);
+
+ if (forward)
+ queue = ctx->next;
+
+ if (fairness)
+ odp_atomic_add_u64(&ctx->count, num);
+
+ if (odp_unlikely(touch_ctx))
+ ctx_sum += rw_ctx_data(ctx, ctx_offset,
+ ctx_rd_words,
+ ctx_rw_words);
+ }
+
+ if (odp_unlikely(touch_data))
+ data_sum += rw_data(ev, num, rd_words,
+ rw_words, pool_type);
+
+ if (odp_unlikely(wait_ns)) {
+ waits++;
+ odp_time_wait_ns(wait_ns);
+ }
+
+ while (num) {
+ num_enq = odp_queue_enq_multi(queue, &ev[i],
+ num);
+
+ if (num_enq < 0) {
+ printf("Error: Enqueue failed. Round %u\n",
+ rounds);
+ odp_event_free_multi(&ev[i], num);
+ ret = -1;
+ break;
+ }
+
+ num -= num_enq;
+ i += num_enq;
+ enqueues++;
+ }
+
+ if (odp_unlikely(ret))
+ break;
+
+ continue;
+ } else if (num == 0) {
+ sched_retries++;
+ if (odp_unlikely(sched_retries > TIME_CHECK_INTERVAL)) {
+ odp_time_t cur_time = odp_time_local();
+
+ /* Measure time from the last received event and
+ * break if MAX_SCHED_WAIT_NS is exceeded */
+ sched_retries = 0;
+ if (events_prev != events) {
+ events_prev = events;
+ last_retry_ts = cur_time;
+ } else if (odp_time_diff_ns(cur_time,
+ last_retry_ts) >
+ MAX_SCHED_WAIT_NS) {
+ printf("Error: scheduling timed out\n");
+ ret = -1;
+ break;
+ }
+ }
+ }
+
+ /* <0 not specified as an error but checking anyway */
+ if (num < 0) {
+ printf("Error: Sched failed. Round %u\n", rounds);
+ ret = -1;
+ break;
+ }
+ }
+
+ c2 = odp_cpu_cycles();
+ t2 = odp_time_local();
+
+ nsec = odp_time_diff_ns(t2, t1);
+ cycles = odp_cpu_cycles_diff(c2, c1);
+
+ /* Update stats*/
+ global->stat[thr].rounds = rounds;
+ global->stat[thr].enqueues = enqueues;
+ global->stat[thr].events = events;
+ global->stat[thr].nsec = nsec;
+ global->stat[thr].cycles = cycles;
+ global->stat[thr].waits = waits;
+ global->stat[thr].dummy_sum = data_sum + ctx_sum + uarea_sum;
+ global->stat[thr].failed = ret;
+
+ if (odp_atomic_fetch_dec_u32(&global->num_worker) == 1) {
+ /* The last worker frees all events. This is needed when the main
+ * thread cannot do the clean up (ODP_SCHED_GROUP_WORKER). */
+ odp_event_t event;
+ uint64_t sched_wait = odp_schedule_wait_time(200 * ODP_TIME_MSEC_IN_NS);
+
+ /* Print queue and scheduler status at the end of the test, before any queues
+ * are emptied or destroyed. */
+ if (test_options->verbose) {
+ odp_queue_print_all();
+ odp_schedule_print();
+ }
+
+ while ((event = odp_schedule(NULL, sched_wait)) != ODP_EVENT_INVALID)
+ odp_event_free(event);
+ }
+
+ /* Pause scheduling before thread exit */
+ odp_schedule_pause();
+
+ while (1) {
+ ev[0] = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
+
+ if (ev[0] == ODP_EVENT_INVALID)
+ break;
+
+ if (odp_unlikely(forward))
+ queue = ((queue_context_t *)odp_queue_context(queue))->next;
+
+ if (odp_queue_enq(queue, ev[0])) {
+ printf("Error: Queue enqueue failed\n");
+ odp_event_free(ev[0]);
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+static int start_workers(test_global_t *global, odp_instance_t instance)
+{
+ odph_thread_common_param_t thr_common;
+ int i, ret;
+ test_options_t *test_options = &global->test_options;
+ int num_group = test_options->num_group;
+ uint32_t num_join = test_options->num_join;
+ int num_cpu = test_options->num_cpu;
+ odph_thread_param_t thr_param[num_cpu];
+
+ odp_atomic_init_u32(&global->num_worker, num_cpu);
+
+ memset(global->thread_tbl, 0, sizeof(global->thread_tbl));
+ odph_thread_common_param_init(&thr_common);
+
+ thr_common.instance = instance;
+ thr_common.cpumask = &global->cpumask;
+
+ for (i = 0; i < num_cpu; i++) {
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = test_sched;
+ thr_param[i].arg = &global->thread_arg[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+
+ global->thread_arg[i].global = global;
+ global->thread_arg[i].first_group = 0;
+
+ if (num_group > 0 && num_join) {
+ /* Each thread joins only num_join groups, starting
+ * from this group index and wrapping around the group
+ * table. */
+ int first_group = (i * num_join) % num_group;
+
+ global->thread_arg[i].first_group = first_group;
+ }
+ }
+
+ ret = odph_thread_create(global->thread_tbl, &thr_common, thr_param,
+ num_cpu);
+
+ if (ret != num_cpu) {
+ printf("Error: thread create failed %i\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static double measure_wait_time_cycles(uint64_t wait_ns)
+{
+ uint64_t i, c1, c2, diff;
+ uint64_t rounds;
+ double wait_cycles;
+
+ if (wait_ns == 0)
+ return 0.0;
+
+ /* Run measurement for 100msec or at least two times, so that effect
+ * from CPU frequency scaling is minimized. */
+ rounds = (100 * ODP_TIME_MSEC_IN_NS) / wait_ns;
+ if (rounds == 0)
+ rounds = 2;
+
+ c1 = odp_cpu_cycles();
+
+ for (i = 0; i < rounds; i++)
+ odp_time_wait_ns(wait_ns);
+
+ c2 = odp_cpu_cycles();
+ diff = odp_cpu_cycles_diff(c2, c1);
+ wait_cycles = (double)diff / rounds;
+
+ printf("\nMeasured wait cycles: %.3f\n", wait_cycles);
+
+ return wait_cycles;
+}
+
+static void print_stat(test_global_t *global)
+{
+ int i, num;
+ double rounds_ave, enqueues_ave, events_ave, nsec_ave, cycles_ave;
+ double waits_ave, wait_cycles, wait_cycles_ave;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ uint64_t wait_ns = test_options->wait_ns;
+ uint64_t rounds_sum = 0;
+ uint64_t enqueues_sum = 0;
+ uint64_t events_sum = 0;
+ uint64_t nsec_sum = 0;
+ uint64_t cycles_sum = 0;
+ uint64_t waits_sum = 0;
+
+ wait_cycles = measure_wait_time_cycles(wait_ns);
+
+ /* Averages */
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->stat[i].failed) {
+ num_cpu--;
+ continue;
+ }
+ rounds_sum += global->stat[i].rounds;
+ enqueues_sum += global->stat[i].enqueues;
+ events_sum += global->stat[i].events;
+ nsec_sum += global->stat[i].nsec;
+ cycles_sum += global->stat[i].cycles;
+ waits_sum += global->stat[i].waits;
+ }
+
+ if (rounds_sum == 0 || num_cpu <= 0) {
+ printf("No results.\n");
+ return;
+ }
+
+ rounds_ave = rounds_sum / num_cpu;
+ enqueues_ave = enqueues_sum / num_cpu;
+ events_ave = events_sum / num_cpu;
+ nsec_ave = nsec_sum / num_cpu;
+ cycles_ave = cycles_sum / num_cpu;
+ waits_ave = waits_sum / num_cpu;
+ wait_cycles_ave = waits_ave * wait_cycles;
+ num = 0;
+
+ printf("\n");
+ printf("RESULTS - per thread (Million events per sec):\n");
+ printf("----------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->stat[i].rounds) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ if (global->stat[i].failed)
+ printf(" n/a ");
+ else
+ printf("%6.1f ",
+ (1000.0 * global->stat[i].events) /
+ global->stat[i].nsec);
+
+ num++;
+ }
+ }
+ printf("\n\n");
+
+ printf("RESULTS - average over %i threads:\n", num_cpu);
+ printf("----------------------------------\n");
+ printf(" schedule calls: %.3f\n", rounds_ave);
+ printf(" enqueue calls: %.3f\n", enqueues_ave);
+ printf(" duration: %.3f msec\n", nsec_ave / 1000000);
+ printf(" num cycles: %.3f M\n", cycles_ave / 1000000);
+ printf(" cycles per round: %.3f\n",
+ cycles_ave / rounds_ave);
+ printf(" cycles per event: %.3f\n",
+ cycles_ave / events_ave);
+ if (wait_ns) {
+ printf(" without wait_ns cycles: %.3f\n",
+ (cycles_ave - wait_cycles_ave) / events_ave);
+ }
+ printf(" ave events received: %.3f\n",
+ events_ave / rounds_ave);
+ printf(" rounds per sec: %.3f M\n",
+ (1000.0 * rounds_ave) / nsec_ave);
+ printf(" events per sec: %.3f M\n\n",
+ (1000.0 * events_ave) / nsec_ave);
+
+ printf("TOTAL events per sec: %.3f M\n\n",
+ (1000.0 * events_sum) / nsec_ave);
+}
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t helper_options;
+ odp_instance_t instance;
+ odp_init_t init;
+ odp_shm_t shm;
+ test_global_t *global;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: Reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* List features not to be used */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ init.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ printf("Error: Global init failed.\n");
+ return -1;
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ printf("Error: Local init failed.\n");
+ return -1;
+ }
+
+ shm = odp_shm_reserve("sched_perf_global", sizeof(test_global_t), ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Error: SHM reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ global = odp_shm_addr(shm);
+ if (global == NULL) {
+ ODPH_ERR("Error: SHM alloc failed\n");
+ exit(EXIT_FAILURE);
+ }
+ test_globals = global;
+
+ memset(global, 0, sizeof(test_global_t));
+ global->pool = ODP_POOL_INVALID;
+ global->ctx_shm = ODP_SHM_INVALID;
+ odp_atomic_init_u32(&global->exit_threads, 0);
+
+ if (setup_sig_handler()) {
+ ODPH_ERR("Error: signal handler setup failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (parse_options(argc, argv, &global->test_options))
+ return -1;
+
+ odp_sys_info_print();
+
+ if (global->test_options.ctx_size) {
+ uint64_t size = (uint64_t)global->test_options.ctx_size *
+ global->test_options.tot_queue;
+
+ global->ctx_shm = odp_shm_reserve("queue contexts", size,
+ ODP_CACHE_LINE_SIZE, 0);
+ if (global->ctx_shm == ODP_SHM_INVALID) {
+ printf("Error: SHM reserve %" PRIu64 " bytes failed\n",
+ size);
+ return -1;
+ }
+ }
+
+ odp_schedule_config_init(&global->schedule_config);
+ odp_schedule_config(&global->schedule_config);
+
+ if (set_num_cpu(global))
+ return -1;
+
+ if (create_pool(global))
+ return -1;
+
+ if (create_groups(global))
+ return -1;
+
+ if (create_queues(global))
+ return -1;
+
+ if (global->test_options.verbose)
+ odp_shm_print_all();
+
+ /* Start workers */
+ start_workers(global, instance);
+
+ /* Wait workers to exit */
+ odph_thread_join(global->thread_tbl, global->test_options.num_cpu);
+
+ print_queue_fairness(global);
+
+ if (destroy_queues(global))
+ return -1;
+
+ if (destroy_groups(global))
+ return -1;
+
+ print_stat(global);
+
+ if (odp_pool_destroy(global->pool)) {
+ printf("Error: Pool destroy failed.\n");
+ return -1;
+ }
+
+ if (global->ctx_shm != ODP_SHM_INVALID)
+ odp_shm_free(global->ctx_shm);
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Error: SHM free failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ printf("Error: term local failed.\n");
+ return -1;
+ }
+
+ if (odp_term_global(instance)) {
+ printf("Error: term global failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/test/performance/odp_sched_perf_run.sh b/test/performance/odp_sched_perf_run.sh
new file mode 100755
index 000000000..8e7911290
--- /dev/null
+++ b/test/performance/odp_sched_perf_run.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+#
+# Copyright (c) 2021, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TEST_DIR="${TEST_DIR:-$(dirname $0)}"
+
+echo odp_sched_perf: buffer pool
+echo ===============================================
+
+$TEST_DIR/odp_sched_perf${EXEEXT} -p 0
+
+RET_VAL=$?
+if [ $RET_VAL -ne 0 ]; then
+ echo odp_sched_perf -p 0: FAILED
+ exit $RET_VAL
+fi
+
+echo odp_sched_perf: packet pool
+echo ===============================================
+
+$TEST_DIR/odp_sched_perf${EXEEXT} -p 1
+
+RET_VAL=$?
+if [ $RET_VAL -ne 0 ]; then
+ echo odp_sched_perf -p 1: FAILED
+ exit $RET_VAL
+fi
+
+exit 0
diff --git a/test/performance/odp_sched_pktio.c b/test/performance/odp_sched_pktio.c
new file mode 100644
index 000000000..d8ab1b279
--- /dev/null
+++ b/test/performance/odp_sched_pktio.c
@@ -0,0 +1,1600 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_sched_pktio.c
+ *
+ * Test application for scheduled packet IO
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define DEBUG_PRINT 0
+#define MAX_WORKERS 64
+#define MAX_PKTIOS (ODP_PKTIO_MAX_INDEX + 1)
+#define MAX_PKTIO_NAME 31
+#define MAX_PKTIO_QUEUES MAX_WORKERS
+#define MAX_PIPE_STAGES 64
+#define MAX_PIPE_QUEUES 1024
+#define MAX_PKT_LEN 1514
+#define MAX_PKT_NUM (128 * 1024)
+#define MIN_PKT_SEG_LEN 64
+#define CHECK_PERIOD 10000
+#define TEST_PASSED_LIMIT 5000
+#define SCHED_MODE_PARAL 1
+#define SCHED_MODE_ATOMIC 2
+#define SCHED_MODE_ORDER 3
+
+typedef struct test_options_t {
+ long int timeout_us;
+ int sched_mode;
+ int num_worker;
+ int num_pktio;
+ int num_pktio_queue;
+ int burst_size;
+ int pipe_stages;
+ int pipe_queues;
+ uint32_t pipe_queue_size;
+ uint8_t collect_stat;
+ char pktio_name[MAX_PKTIOS][MAX_PKTIO_NAME + 1];
+
+} test_options_t;
+
+typedef struct {
+ int worker_id;
+ void *test_global_ptr;
+} worker_arg_t;
+
+typedef struct ODP_ALIGNED_CACHE {
+ uint64_t rx_pkt;
+ uint64_t tx_pkt;
+ uint64_t pipe_pkt;
+ uint64_t tx_drop;
+ uint64_t pipe_drop;
+ uint64_t tmo;
+} worker_stat_t;
+
+typedef struct pktin_queue_context_t {
+ /* Queue context must start with stage and idx */
+ uint16_t stage;
+ uint16_t queue_idx;
+
+ uint8_t dst_pktio;
+ uint8_t dst_queue;
+ uint8_t src_pktio;
+ uint8_t src_queue;
+ odp_pktout_queue_t dst_pktout;
+} pktin_queue_context_t;
+
+typedef struct pipe_queue_context_t {
+ /* Queue context must start with stage and idx. */
+ uint16_t stage;
+ uint16_t queue_idx;
+} pipe_queue_context_t;
+
+typedef struct {
+ volatile int stop_workers;
+ odp_barrier_t worker_start;
+
+ test_options_t opt;
+
+ int max_workers;
+ odp_cpumask_t cpumask;
+ odp_instance_t instance;
+
+ int worker_cpu[MAX_WORKERS];
+
+ odp_pool_t pool;
+ uint32_t pkt_len;
+ uint32_t pkt_num;
+
+ struct {
+ odp_pktio_t pktio;
+ int pktio_index;
+ int started;
+ odph_ethaddr_t my_addr;
+ odp_queue_t input_queue[MAX_PKTIO_QUEUES];
+ odp_pktout_queue_t pktout[MAX_PKTIO_QUEUES];
+ pktin_queue_context_t queue_context[MAX_PKTIO_QUEUES];
+
+ } pktio[MAX_PKTIOS];
+
+ struct {
+ odp_timer_pool_t timer_pool;
+ odp_pool_t timeout_pool;
+ uint64_t timeout_tick;
+ odp_timer_t timer[MAX_PKTIOS][MAX_PKTIO_QUEUES];
+
+ } timer;
+
+ struct {
+ odp_queue_t queue[MAX_PIPE_QUEUES];
+ } pipe_queue[MAX_PKTIOS][MAX_PIPE_STAGES];
+
+ struct {
+ pipe_queue_context_t ctx;
+ } pipe_queue_ctx[MAX_PIPE_STAGES][MAX_PIPE_QUEUES];
+
+ worker_arg_t worker_arg[MAX_WORKERS];
+
+ worker_stat_t worker_stat[MAX_WORKERS];
+ uint64_t rx_pkt_sum;
+ uint64_t tx_pkt_sum;
+
+ odp_schedule_config_t schedule_config;
+
+} test_global_t;
+
+static test_global_t *test_global;
+
+static inline void set_dst_eth_addr(odph_ethaddr_t *eth_addr, int index)
+{
+ eth_addr->addr[0] = 0x02;
+ eth_addr->addr[1] = 0;
+ eth_addr->addr[2] = 0;
+ eth_addr->addr[3] = 0;
+ eth_addr->addr[4] = 0;
+ eth_addr->addr[5] = index;
+}
+
+static inline void fill_eth_addr(odp_packet_t pkt[], int num,
+ test_global_t *test_global, int out)
+{
+ odph_ethhdr_t *eth;
+ int i;
+
+ for (i = 0; i < num; ++i) {
+ eth = odp_packet_data(pkt[i]);
+
+ eth->src = test_global->pktio[out].my_addr;
+ set_dst_eth_addr(&eth->dst, out);
+ }
+}
+
+static inline void send_packets(test_global_t *test_global,
+ odp_packet_t pkt[], int num_pkt,
+ int output, odp_pktout_queue_t pktout,
+ int worker_id)
+{
+ int sent, drop;
+
+ fill_eth_addr(pkt, num_pkt, test_global, output);
+
+ sent = odp_pktout_send(pktout, pkt, num_pkt);
+
+ if (odp_unlikely(sent < 0))
+ sent = 0;
+
+ drop = num_pkt - sent;
+
+ if (odp_unlikely(drop > 0))
+ odp_packet_free_multi(&pkt[sent], drop);
+
+ if (odp_unlikely(test_global->opt.collect_stat)) {
+ test_global->worker_stat[worker_id].tx_pkt += sent;
+ test_global->worker_stat[worker_id].tx_drop += drop;
+ }
+}
+
+static int worker_thread_direct(void *arg)
+{
+ int num_pkt, out;
+ odp_pktout_queue_t pktout;
+ odp_queue_t queue;
+ pktin_queue_context_t *queue_context;
+ worker_arg_t *worker_arg = arg;
+ test_global_t *test_global = worker_arg->test_global_ptr;
+ int worker_id = worker_arg->worker_id;
+ uint32_t polls = 0;
+ int burst_size = test_global->opt.burst_size;
+
+ printf("Worker %i started\n", worker_id);
+
+ /* Wait for other workers to start */
+ odp_barrier_wait(&test_global->worker_start);
+
+ while (1) {
+ odp_event_t ev[burst_size];
+ odp_packet_t pkt[burst_size];
+
+ polls++;
+
+ if (polls == CHECK_PERIOD) {
+ polls = 0;
+ if (test_global->stop_workers)
+ break;
+ }
+
+ num_pkt = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT,
+ ev, burst_size);
+
+ if (num_pkt <= 0)
+ continue;
+
+ queue_context = odp_queue_context(queue);
+
+ if (DEBUG_PRINT)
+ printf("worker %i: [%i/%i] -> [%i/%i], %i packets\n",
+ worker_id,
+ queue_context->src_pktio,
+ queue_context->src_queue,
+ queue_context->dst_pktio,
+ queue_context->dst_queue, num_pkt);
+
+ odp_packet_from_event_multi(pkt, ev, num_pkt);
+
+ pktout = queue_context->dst_pktout;
+ out = queue_context->dst_pktio;
+
+ send_packets(test_global, pkt, num_pkt, out, pktout, worker_id);
+
+ if (odp_unlikely(test_global->opt.collect_stat))
+ test_global->worker_stat[worker_id].rx_pkt += num_pkt;
+ }
+
+ /*
+ * Free prefetched packets before exiting worker thread as
+ * such packets can block main thread event cleanup or
+ * cause buffer leak.
+ */
+ odp_schedule_pause();
+ while (1) {
+ odp_event_t ev;
+
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ if (ev == ODP_EVENT_INVALID)
+ break;
+ odp_event_free(ev);
+ }
+
+ /* Non-prefetched events in scheduler are cleaned up by main thread */
+ printf("Worker %i stopped\n", worker_id);
+
+ return 0;
+}
+
+static inline void enqueue_events(odp_queue_t dst_queue, odp_event_t ev[],
+ int num, int worker_id)
+{
+ int sent, drop;
+
+ sent = odp_queue_enq_multi(dst_queue, ev, num);
+
+ if (odp_unlikely(sent < 0))
+ sent = 0;
+
+ drop = num - sent;
+
+ if (odp_unlikely(drop))
+ odp_event_free_multi(&ev[sent], drop);
+
+ if (odp_unlikely(test_global->opt.collect_stat))
+ test_global->worker_stat[worker_id].pipe_drop += drop;
+}
+
+static inline odp_queue_t next_queue(test_global_t *test_global, int input,
+ uint16_t stage, uint16_t queue_idx)
+{
+ return test_global->pipe_queue[input][stage].queue[queue_idx];
+}
+
+static int worker_thread_pipeline(void *arg)
+{
+ int i, num_pkt, input, output, output_queue;
+ odp_queue_t queue, dst_queue;
+ odp_pktout_queue_t pktout;
+ pipe_queue_context_t *pipe_context;
+ uint16_t stage, queue_idx;
+ worker_arg_t *worker_arg = arg;
+ test_global_t *test_global = worker_arg->test_global_ptr;
+ int worker_id = worker_arg->worker_id;
+ int pipe_stages = test_global->opt.pipe_stages;
+ int pipe_queues = test_global->opt.pipe_queues;
+ int num_pktio = test_global->opt.num_pktio;
+ int num_pktio_queue = test_global->opt.num_pktio_queue;
+ uint32_t polls = 0;
+ int burst_size = test_global->opt.burst_size;
+
+ printf("Worker %i started\n", worker_id);
+
+ /* Wait for other workers to start */
+ odp_barrier_wait(&test_global->worker_start);
+
+ while (1) {
+ odp_event_t ev[burst_size];
+ odp_packet_t pkt[burst_size];
+
+ num_pkt = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT,
+ ev, burst_size);
+
+ polls++;
+
+ if (polls == CHECK_PERIOD) {
+ polls = 0;
+ if (test_global->stop_workers)
+ break;
+ }
+
+ if (num_pkt <= 0)
+ continue;
+
+ pipe_context = odp_queue_context(queue);
+ stage = pipe_context->stage;
+ queue_idx = pipe_context->queue_idx;
+
+ /* A queue is connected to a single input interface. All
+ * packets from a queue are from the same interface. */
+ input = odp_packet_input_index(odp_packet_from_event(ev[0]));
+
+ if (DEBUG_PRINT)
+ printf("worker %i: stage %u, idx %u, %i packets\n",
+ worker_id, stage, queue_idx, num_pkt);
+
+ if (stage == 0) {
+ if (odp_unlikely(test_global->opt.collect_stat))
+ test_global->worker_stat[worker_id].rx_pkt +=
+ num_pkt;
+
+ /* The first stage (packet input). Forward packet flows
+ * into first pipeline queues. */
+ if (pipe_queues > num_pktio_queue) {
+ /* More pipeline queues than input queues.
+ * Use flow hash to spread flows into pipeline
+ * queues. */
+ odp_packet_t p;
+ worker_stat_t *stat;
+ uint32_t hash;
+ uint16_t idx;
+ int drop = 0;
+
+ stat = &test_global->worker_stat[worker_id];
+
+ for (i = 0; i < num_pkt; i++) {
+ p = odp_packet_from_event(ev[i]);
+ hash = odp_packet_flow_hash(p);
+ idx = queue_idx;
+
+ if (odp_packet_has_flow_hash(p))
+ idx = hash % pipe_queues;
+
+ dst_queue = next_queue(test_global,
+ input, stage,
+ idx);
+
+ if (odp_queue_enq(dst_queue, ev[i])) {
+ odp_event_free(ev[i]);
+ drop++;
+ }
+ }
+
+ if (odp_unlikely(test_global->opt.collect_stat))
+ stat->pipe_drop += drop;
+ } else {
+ queue_idx = queue_idx % pipe_queues;
+ dst_queue = next_queue(test_global, input,
+ stage, queue_idx);
+
+ enqueue_events(dst_queue, ev, num_pkt,
+ worker_id);
+ }
+ continue;
+ }
+
+ if (stage < pipe_stages) {
+ /* Middle stages */
+ dst_queue = next_queue(test_global, input, stage,
+ queue_idx);
+ enqueue_events(dst_queue, ev, num_pkt, worker_id);
+
+ if (odp_unlikely(test_global->opt.collect_stat))
+ test_global->worker_stat[worker_id].pipe_pkt +=
+ num_pkt;
+
+ continue;
+ }
+
+ /* The last stage, send packets out */
+ odp_packet_from_event_multi(pkt, ev, num_pkt);
+
+ /* If single interface loopback, otherwise forward to the next
+ * interface. */
+ output = (input + 1) % num_pktio;
+ output_queue = queue_idx % num_pktio_queue;
+ pktout = test_global->pktio[output].pktout[output_queue];
+
+ send_packets(test_global, pkt, num_pkt, output, pktout,
+ worker_id);
+ }
+
+ printf("Worker %i stopped\n", worker_id);
+
+ return 0;
+}
+
+static int worker_thread_timers(void *arg)
+{
+ int num, num_pkt, out, tmos, i, src_pktio, src_queue;
+ odp_pktout_queue_t pktout;
+ odp_queue_t queue;
+ pktin_queue_context_t *queue_context;
+ odp_timer_t timer;
+ odp_timer_retval_t ret;
+ odp_timer_start_t start_param;
+ worker_arg_t *worker_arg = arg;
+ test_global_t *test_global = worker_arg->test_global_ptr;
+ int worker_id = worker_arg->worker_id;
+ uint32_t polls = 0;
+ int burst_size = test_global->opt.burst_size;
+ uint64_t tick = test_global->timer.timeout_tick;
+
+ printf("Worker (timers) %i started\n", worker_id);
+
+ /* Wait for other workers to start */
+ odp_barrier_wait(&test_global->worker_start);
+
+ while (1) {
+ odp_event_t ev[burst_size];
+ odp_packet_t pkt[burst_size];
+
+ num = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT,
+ ev, burst_size);
+
+ polls++;
+
+ if (polls == CHECK_PERIOD) {
+ polls = 0;
+ if (test_global->stop_workers)
+ break;
+ }
+
+ if (num <= 0)
+ continue;
+
+ tmos = 0;
+ queue_context = odp_queue_context(queue);
+ src_pktio = queue_context->src_pktio;
+ src_queue = queue_context->src_queue;
+ timer = test_global->timer.timer[src_pktio][src_queue];
+ start_param.tick_type = ODP_TIMER_TICK_REL;
+ start_param.tick = tick;
+ start_param.tmo_ev = ODP_EVENT_INVALID;
+
+ for (i = 0; i < num; i++) {
+ if (odp_unlikely(odp_event_type(ev[i]) ==
+ ODP_EVENT_TIMEOUT)) {
+ tmos++;
+
+ start_param.tmo_ev = ev[i];
+ ret = odp_timer_start(timer, &start_param);
+
+ if (odp_unlikely(ret != ODP_TIMER_SUCCESS)) {
+ /* Should never happen. Timeout event
+ * has been received, timer should be
+ * ready to be set again. */
+ printf("Expired timer reset failed "
+ "%i\n", ret);
+ odp_event_free(ev[i]);
+ }
+
+ if (odp_unlikely(tmos > 1)) {
+ /* Should never happen */
+ printf("Too many timeouts\n");
+ }
+ } else {
+ pkt[i - tmos] = odp_packet_from_event(ev[i]);
+ }
+ }
+
+ if (tmos == 0) {
+ /* Reset timer with existing timeout event */
+ ret = odp_timer_restart(timer, &start_param);
+
+ if (odp_unlikely(ret != ODP_TIMER_SUCCESS &&
+ ret != ODP_TIMER_FAIL)) {
+ /* Tick period is too short or long. Normally,
+ * reset either succeeds or fails due to timer
+ * expiration, in which case timeout event will
+ * be received soon and reset will be done
+ * then. */
+ printf("Timer reset failed %i\n", ret);
+ }
+ }
+
+ num_pkt = num - tmos;
+
+ if (DEBUG_PRINT)
+ printf("worker %i: [%i/%i] -> [%i/%i], %i packets "
+ "%i timeouts\n",
+ worker_id,
+ queue_context->src_pktio,
+ queue_context->src_queue,
+ queue_context->dst_pktio,
+ queue_context->dst_queue, num_pkt, tmos);
+
+ if (odp_unlikely(test_global->opt.collect_stat && tmos))
+ test_global->worker_stat[worker_id].tmo += tmos;
+
+ if (odp_unlikely(num_pkt == 0))
+ continue;
+
+ pktout = queue_context->dst_pktout;
+ out = queue_context->dst_pktio;
+
+ send_packets(test_global, pkt, num_pkt, out, pktout, worker_id);
+
+ if (odp_unlikely(test_global->opt.collect_stat))
+ test_global->worker_stat[worker_id].rx_pkt += num_pkt;
+ }
+
+ printf("Worker %i stopped\n", worker_id);
+
+ return 0;
+}
+
+static void sig_handler(int signo)
+{
+ (void)signo;
+
+ if (test_global) {
+ test_global->stop_workers = 1;
+ odp_mb_full();
+ }
+}
+
+/* Get rid of path in filename - only for unix-type paths using '/' */
+#define NO_PATH(x) (strrchr((x), '/') ? strrchr((x), '/') + 1 : (x))
+
+static void print_usage(const char *progname)
+{
+ printf("\n"
+ "Scheduler with packet IO test application.\n"
+ "\n"
+ "Usage: %s [options]\n"
+ "\n"
+ "OPTIONS:\n"
+ " -i, --interface <name> Packet IO interfaces (comma-separated, no spaces)\n"
+ " -c, --num_cpu <number> Worker thread count. Default: 1\n"
+ " -q, --num_queue <number> Number of pktio queues. Default: Worker thread count\n"
+ " -b, --burst <number> Maximum number of events requested from scheduler. Default: 32\n"
+ " -t, --timeout <number> Flow inactivity timeout (in usec) per packet. Default: 0 (don't use timers)\n"
+ " --pipe-stages <number> Number of pipeline stages per interface\n"
+ " --pipe-queues <number> Number of queues per pipeline stage\n"
+ " --pipe-queue-size <num> Number of events a pipeline queue must be able to store. Default 256.\n"
+ " -m, --sched_mode <mode> Scheduler synchronization mode for all queues. 1: parallel, 2: atomic, 3: ordered. Default: 2\n"
+ " -s, --stat Collect statistics.\n"
+ " -h, --help Display help and exit.\n\n",
+ NO_PATH(progname));
+}
+
+static int parse_options(int argc, char *argv[], test_options_t *test_options)
+{
+ int i, opt, long_index;
+ char *name, *str;
+ int len, str_len, sched_mode;
+ const struct option longopts[] = {
+ {"interface", required_argument, NULL, 'i'},
+ {"num_cpu", required_argument, NULL, 'c'},
+ {"num_queue", required_argument, NULL, 'q'},
+ {"burst", required_argument, NULL, 'b'},
+ {"timeout", required_argument, NULL, 't'},
+ {"sched_mode", required_argument, NULL, 'm'},
+ {"pipe-stages", required_argument, NULL, 0},
+ {"pipe-queues", required_argument, NULL, 1},
+ {"pipe-queue-size", required_argument, NULL, 2},
+ {"stat", no_argument, NULL, 's'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+ const char *shortopts = "+i:c:q:b:t:m:sh";
+ int ret = 0;
+
+ memset(test_options, 0, sizeof(test_options_t));
+
+ test_options->sched_mode = SCHED_MODE_ATOMIC;
+ test_options->num_worker = 1;
+ test_options->num_pktio_queue = 0;
+ test_options->burst_size = 32;
+ test_options->pipe_queue_size = 256;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break; /* No more options */
+
+ switch (opt) {
+ case 0:
+ test_options->pipe_stages = atoi(optarg);
+ break;
+ case 1:
+ test_options->pipe_queues = atoi(optarg);
+ break;
+ case 2:
+ test_options->pipe_queue_size = atoi(optarg);
+ break;
+ case 'i':
+ i = 0;
+ str = optarg;
+ str_len = strlen(str);
+
+ while (str_len > 0) {
+ len = strcspn(str, ",");
+ str_len -= len + 1;
+
+ if (i == MAX_PKTIOS) {
+ printf("Error: Too many interfaces\n");
+ ret = -1;
+ break;
+ }
+
+ if (len > MAX_PKTIO_NAME) {
+ printf("Error: Too long interface name %s\n",
+ str);
+ ret = -1;
+ break;
+ }
+
+ name = test_options->pktio_name[i];
+ memcpy(name, str, len);
+ str += len + 1;
+ i++;
+ }
+
+ test_options->num_pktio = i;
+
+ break;
+ case 'c':
+ test_options->num_worker = atoi(optarg);
+ break;
+ case 'q':
+ test_options->num_pktio_queue = atoi(optarg);
+ break;
+ case 'b':
+ test_options->burst_size = atoi(optarg);
+ break;
+ case 't':
+ test_options->timeout_us = atol(optarg);
+ break;
+ case 'm':
+ test_options->sched_mode = atoi(optarg);
+ break;
+ case 's':
+ test_options->collect_stat = 1;
+ break;
+ case 'h':
+ print_usage(argv[0]);
+ ret = -1;
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ }
+
+ if (test_options->timeout_us && test_options->pipe_stages) {
+ printf("Error: Cannot run timeout and pipeline tests simultaneously\n");
+ ret = -1;
+ }
+
+ if (test_options->pipe_stages > MAX_PIPE_STAGES) {
+ printf("Error: Too many pipeline stages\n");
+ ret = -1;
+ }
+
+ if (test_options->pipe_queues > MAX_PIPE_QUEUES) {
+ printf("Error: Too many queues per pipeline stage\n");
+ ret = -1;
+ }
+
+ if (test_options->num_pktio == 0) {
+ printf("Error: At least one pktio interface needed.\n");
+ ret = -1;
+ }
+
+ sched_mode = test_options->sched_mode;
+ if (sched_mode != SCHED_MODE_PARAL &&
+ sched_mode != SCHED_MODE_ATOMIC &&
+ sched_mode != SCHED_MODE_ORDER) {
+ printf("Error: Bad scheduler mode: %i\n", sched_mode);
+ ret = -1;
+ }
+
+ if (test_options->num_pktio_queue == 0)
+ test_options->num_pktio_queue = test_options->num_worker;
+
+ return ret;
+}
+
+static odp_schedule_sync_t sched_sync_mode(test_global_t *test_global)
+{
+ switch (test_global->opt.sched_mode) {
+ case SCHED_MODE_PARAL:
+ return ODP_SCHED_SYNC_PARALLEL;
+ case SCHED_MODE_ATOMIC:
+ return ODP_SCHED_SYNC_ATOMIC;
+ case SCHED_MODE_ORDER:
+ return ODP_SCHED_SYNC_ORDERED;
+ default:
+ return -1;
+ }
+}
+
+static int config_setup(test_global_t *test_global)
+{
+ int i, cpu;
+ odp_pool_capability_t pool_capa;
+ uint32_t pkt_len, pkt_num;
+ odp_cpumask_t *cpumask = &test_global->cpumask;
+
+ test_global->max_workers = odp_cpumask_default_worker(cpumask, 0);
+
+ if (test_global->opt.num_worker > test_global->max_workers ||
+ test_global->opt.num_worker > MAX_WORKERS) {
+ printf("Error: Too many workers %i.\n",
+ test_global->opt.num_worker);
+ return -1;
+ }
+
+ cpu = odp_cpumask_first(cpumask);
+ for (i = 0; i < test_global->opt.num_worker; ++i) {
+ test_global->worker_cpu[i] = cpu;
+ cpu = odp_cpumask_next(cpumask, cpu);
+ }
+
+ odp_schedule_config_init(&test_global->schedule_config);
+ odp_schedule_config(&test_global->schedule_config);
+
+ if (odp_pool_capability(&pool_capa)) {
+ printf("Error: Pool capability failed.\n");
+ return -1;
+ }
+
+ pkt_len = MAX_PKT_LEN;
+ pkt_num = MAX_PKT_NUM;
+
+ if (pool_capa.pkt.max_len && pkt_len > pool_capa.pkt.max_len)
+ pkt_len = pool_capa.pkt.max_len;
+
+ if (pool_capa.pkt.max_num && pkt_num > pool_capa.pkt.max_num) {
+ pkt_num = pool_capa.pkt.max_num;
+ printf("Warning: Pool size rounded down to %u\n", pkt_num);
+ }
+
+ test_global->pkt_len = pkt_len;
+ test_global->pkt_num = pkt_num;
+
+ return 0;
+}
+
+static void print_config(test_global_t *test_global)
+{
+ char cpumask_str[ODP_CPUMASK_STR_SIZE];
+ int i;
+
+ odp_cpumask_to_str(&test_global->cpumask, cpumask_str,
+ ODP_CPUMASK_STR_SIZE);
+
+ printf("\n"
+ "Test configuration:\n"
+ " max workers: %i\n"
+ " available worker cpus: %s\n"
+ " num workers: %i\n"
+ " worker cpus: ",
+ test_global->max_workers,
+ cpumask_str,
+ test_global->opt.num_worker);
+
+ for (i = 0; i < test_global->opt.num_worker; i++)
+ printf(" %i", test_global->worker_cpu[i]);
+
+ printf("\n"
+ " num interfaces: %i\n"
+ " interface names: ", test_global->opt.num_pktio);
+
+ for (i = 0; i < test_global->opt.num_pktio; i++)
+ printf(" %s", test_global->opt.pktio_name[i]);
+
+ printf("\n"
+ " queues per interface: %i\n",
+ test_global->opt.num_pktio_queue);
+
+ printf(" burst size: %u\n", test_global->opt.burst_size);
+ printf(" collect statistics: %u\n", test_global->opt.collect_stat);
+ printf(" timeout usec: %li\n", test_global->opt.timeout_us);
+
+ printf("\n");
+}
+
+static void print_stat(test_global_t *test_global, uint64_t nsec)
+{
+ int i;
+ uint64_t rx, tx, pipe, drop, tmo;
+ uint64_t rx_sum = 0;
+ uint64_t tx_sum = 0;
+ uint64_t pipe_sum = 0;
+ uint64_t tmo_sum = 0;
+ double sec = 0.0;
+
+ printf("\nTest statistics\n");
+ printf(" worker rx_pkt tx_pkt pipe dropped tmo\n");
+
+ for (i = 0; i < test_global->opt.num_worker; i++) {
+ rx = test_global->worker_stat[i].rx_pkt;
+ tx = test_global->worker_stat[i].tx_pkt;
+ pipe = test_global->worker_stat[i].pipe_pkt;
+ tmo = test_global->worker_stat[i].tmo;
+ rx_sum += rx;
+ tx_sum += tx;
+ pipe_sum += pipe;
+ tmo_sum += tmo;
+ drop = test_global->worker_stat[i].tx_drop +
+ test_global->worker_stat[i].pipe_drop;
+
+ printf(" %6i %16" PRIu64 " %16" PRIu64 " %16" PRIu64 " %16"
+ PRIu64 " %16" PRIu64 "\n", i, rx, tx, pipe, drop, tmo);
+ }
+
+ test_global->rx_pkt_sum = rx_sum;
+ test_global->tx_pkt_sum = tx_sum;
+ drop = rx_sum - tx_sum;
+
+ printf(" ------------------------------------------------------------------------------------\n");
+ printf(" total %16" PRIu64 " %16" PRIu64 " %16" PRIu64 " %16"
+ PRIu64 " %16" PRIu64 "\n\n", rx_sum, tx_sum, pipe_sum, drop,
+ tmo_sum);
+
+ sec = nsec / 1000000000.0;
+ printf(" Total test time: %.2f sec\n", sec);
+ printf(" Rx packet rate: %.2f pps\n", rx_sum / sec);
+ printf(" Tx packet rate: %.2f pps\n", tx_sum / sec);
+ printf(" Drop rate: %.2f pps\n", drop / sec);
+ printf(" Timeout rate: %.2f per sec\n\n", tmo_sum / sec);
+}
+
+static int open_pktios(test_global_t *test_global)
+{
+ odp_pool_param_t pool_param;
+ odp_pktio_param_t pktio_param;
+ odp_pool_t pool;
+ odp_pktio_t pktio;
+ odp_pktio_capability_t pktio_capa;
+ odp_pktio_config_t pktio_config;
+ odp_pktin_queue_param_t pktin_param;
+ odp_pktout_queue_param_t pktout_param;
+ odp_schedule_sync_t sched_sync;
+ uint32_t num_queue, j;
+ char *name;
+ int i, num_pktio, ret;
+
+ num_pktio = test_global->opt.num_pktio;
+ num_queue = test_global->opt.num_pktio_queue;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.pkt.seg_len = MIN_PKT_SEG_LEN;
+ pool_param.pkt.len = test_global->pkt_len;
+ pool_param.pkt.num = test_global->pkt_num;
+ pool_param.type = ODP_POOL_PACKET;
+
+ pool = odp_pool_create("packet pool", &pool_param);
+
+ test_global->pool = pool;
+
+ if (pool == ODP_POOL_INVALID) {
+ printf("Error: Pool create.\n");
+ return -1;
+ }
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+
+ sched_sync = sched_sync_mode(test_global);
+
+ for (i = 0; i < num_pktio; i++)
+ test_global->pktio[i].pktio = ODP_PKTIO_INVALID;
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_pktio; i++) {
+ name = test_global->opt.pktio_name[i];
+ pktio = odp_pktio_open(name, pool, &pktio_param);
+
+ if (pktio == ODP_PKTIO_INVALID) {
+ printf("Error (%s): Pktio open failed.\n", name);
+ return -1;
+ }
+
+ test_global->pktio[i].pktio = pktio;
+ test_global->pktio[i].pktio_index = odp_pktio_index(pktio);
+
+ ret = odp_pktio_mac_addr(pktio,
+ test_global->pktio[i].my_addr.addr,
+ ODPH_ETHADDR_LEN);
+ if (ret != ODPH_ETHADDR_LEN) {
+ printf("Error (%s): Bad MAC address len.\n", name);
+ return -1;
+ }
+
+ odp_pktio_print(pktio);
+
+ if (odp_pktio_capability(pktio, &pktio_capa)) {
+ printf("Error (%s): Pktio capa failed.\n", name);
+ return -1;
+ }
+
+ if (num_queue > pktio_capa.max_input_queues) {
+ printf("Error (%s): Too many input queues: %u\n",
+ name, num_queue);
+ return -1;
+ }
+
+ if (num_queue > pktio_capa.max_output_queues) {
+ printf("Error (%s): Too many output queues: %u\n",
+ name, num_queue);
+ return -1;
+ }
+
+ odp_pktio_config_init(&pktio_config);
+ pktio_config.parser.layer = ODP_PROTO_LAYER_NONE;
+
+ odp_pktio_config(pktio, &pktio_config);
+
+ odp_pktin_queue_param_init(&pktin_param);
+
+ pktin_param.queue_param.sched.prio = odp_schedule_default_prio();
+ pktin_param.queue_param.sched.sync = sched_sync;
+ pktin_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ if (num_queue > 1) {
+ pktin_param.hash_enable = 1;
+ pktin_param.hash_proto.proto.ipv4_udp = 1;
+ }
+
+ pktin_param.num_queues = num_queue;
+
+ if (odp_pktin_queue_config(pktio, &pktin_param)) {
+ printf("Error (%s): Pktin config failed.\n", name);
+ return -1;
+ }
+
+ if (odp_pktin_event_queue(pktio,
+ test_global->pktio[i].input_queue,
+ num_queue) != (int)num_queue) {
+ printf("Error (%s): Input queue query failed.\n", name);
+ return -1;
+ }
+
+ for (j = 0; j < num_queue; j++) {
+ odp_queue_t queue;
+ void *ctx;
+ uint32_t len = sizeof(pktin_queue_context_t);
+
+ queue = test_global->pktio[i].input_queue[j];
+ ctx = &test_global->pktio[i].queue_context[j];
+
+ if (odp_queue_context_set(queue, ctx, len)) {
+ printf("Error (%s): Queue ctx set failed.\n",
+ name);
+ return -1;
+ }
+ }
+
+ odp_pktout_queue_param_init(&pktout_param);
+ pktout_param.num_queues = num_queue;
+ pktout_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+
+ if (test_global->opt.pipe_stages)
+ pktout_param.op_mode = ODP_PKTIO_OP_MT;
+
+ if (odp_pktout_queue_config(pktio, &pktout_param)) {
+ printf("Error (%s): Pktout config failed.\n", name);
+ return -1;
+ }
+
+ if (odp_pktout_queue(pktio,
+ test_global->pktio[i].pktout,
+ num_queue) != (int)num_queue) {
+ printf("Error (%s): Output queue query failed.\n",
+ name);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void link_pktios(test_global_t *test_global)
+{
+ int i, num_pktio, input, output;
+ int num_queue;
+ odp_pktout_queue_t pktout;
+ pktin_queue_context_t *ctx;
+
+ num_pktio = test_global->opt.num_pktio;
+ num_queue = test_global->opt.num_pktio_queue;
+
+ printf("Forwarding table (pktio indexes)\n");
+
+ /* If single interface loopback, otherwise forward to the next
+ * interface. */
+ for (input = 0; input < num_pktio; input++) {
+ output = (input + 1) % num_pktio;
+ printf(" input %i, output %i\n", input, output);
+
+ for (i = 0; i < num_queue; i++) {
+ ctx = &test_global->pktio[input].queue_context[i];
+ pktout = test_global->pktio[output].pktout[i];
+ ctx->stage = 0;
+ ctx->queue_idx = i;
+ ctx->dst_pktout = pktout;
+ ctx->dst_pktio = output;
+ ctx->dst_queue = i;
+ ctx->src_pktio = input;
+ ctx->src_queue = i;
+ }
+ }
+
+ printf("\n");
+}
+
+static int start_pktios(test_global_t *test_global)
+{
+ int i;
+
+ for (i = 0; i < test_global->opt.num_pktio; i++) {
+ if (odp_pktio_start(test_global->pktio[i].pktio)) {
+ printf("Error (%s): Pktio start failed.\n",
+ test_global->opt.pktio_name[i]);
+
+ return -1;
+ }
+
+ test_global->pktio[i].started = 1;
+ }
+
+ return 0;
+}
+
+static int stop_pktios(test_global_t *test_global)
+{
+ odp_pktio_t pktio;
+ int i, ret = 0;
+
+ for (i = 0; i < test_global->opt.num_pktio; i++) {
+ pktio = test_global->pktio[i].pktio;
+
+ if (pktio == ODP_PKTIO_INVALID ||
+ test_global->pktio[i].started == 0)
+ continue;
+
+ if (odp_pktio_stop(pktio)) {
+ printf("Error (%s): Pktio stop failed.\n",
+ test_global->opt.pktio_name[i]);
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+static void empty_queues(uint64_t wait_ns)
+{
+ odp_event_t ev;
+ uint64_t wait_time = odp_schedule_wait_time(wait_ns);
+
+ /* Drop all events from all queues */
+ while (1) {
+ ev = odp_schedule(NULL, wait_time);
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ odp_event_free(ev);
+ }
+}
+
+static int close_pktios(test_global_t *test_global)
+{
+ odp_pktio_t pktio;
+ odp_pool_t pool;
+ int i, ret = 0;
+
+ for (i = 0; i < test_global->opt.num_pktio; i++) {
+ pktio = test_global->pktio[i].pktio;
+
+ if (pktio == ODP_PKTIO_INVALID)
+ continue;
+
+ if (odp_pktio_close(pktio)) {
+ printf("Error (%s): Pktio close failed.\n",
+ test_global->opt.pktio_name[i]);
+ ret = -1;
+ }
+ }
+
+ pool = test_global->pool;
+
+ if (pool == ODP_POOL_INVALID)
+ return ret;
+
+ if (odp_pool_destroy(pool)) {
+ printf("Error: Pool destroy failed.\n");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int create_pipeline_queues(test_global_t *test_global)
+{
+ int i, j, k, num_pktio, stages, queues, ctx_size;
+ pipe_queue_context_t *ctx;
+ odp_queue_param_t queue_param;
+ odp_schedule_sync_t sched_sync;
+ int ret = 0;
+
+ num_pktio = test_global->opt.num_pktio;
+ stages = test_global->opt.pipe_stages;
+ queues = test_global->opt.pipe_queues;
+ sched_sync = sched_sync_mode(test_global);
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.prio = odp_schedule_default_prio();
+ queue_param.sched.sync = sched_sync;
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ queue_param.size = test_global->opt.pipe_queue_size;
+ if (test_global->schedule_config.queue_size &&
+ queue_param.size > test_global->schedule_config.queue_size) {
+ printf("Error: Pipeline queue max size is %u\n",
+ test_global->schedule_config.queue_size);
+ return -1;
+ }
+
+ ctx_size = sizeof(pipe_queue_context_t);
+
+ for (i = 0; i < stages; i++) {
+ for (j = 0; j < queues; j++) {
+ ctx = &test_global->pipe_queue_ctx[i][j].ctx;
+
+ /* packet input is stage 0 */
+ ctx->stage = i + 1;
+ ctx->queue_idx = j;
+ }
+ }
+
+ for (k = 0; k < num_pktio; k++) {
+ for (i = 0; i < stages; i++) {
+ for (j = 0; j < queues; j++) {
+ odp_queue_t q;
+
+ q = odp_queue_create(NULL, &queue_param);
+ test_global->pipe_queue[k][i].queue[j] = q;
+
+ if (q == ODP_QUEUE_INVALID) {
+ printf("Error: Queue create failed [%i] %i/%i\n",
+ k, i, j);
+ ret = -1;
+ break;
+ }
+
+ ctx = &test_global->pipe_queue_ctx[i][j].ctx;
+
+ if (odp_queue_context_set(q, ctx, ctx_size)) {
+ printf("Error: Queue ctx set failed [%i] %i/%i\n",
+ k, i, j);
+ ret = -1;
+ break;
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void destroy_pipeline_queues(test_global_t *test_global)
+{
+ int i, j, k, num_pktio, stages, queues;
+ odp_queue_t queue;
+
+ num_pktio = test_global->opt.num_pktio;
+ stages = test_global->opt.pipe_stages;
+ queues = test_global->opt.pipe_queues;
+
+ for (k = 0; k < num_pktio; k++) {
+ for (i = 0; i < stages; i++) {
+ for (j = 0; j < queues; j++) {
+ queue = test_global->pipe_queue[k][i].queue[j];
+
+ if (queue == ODP_QUEUE_INVALID) {
+ printf("Error: Bad queue handle [%i] %i/%i\n",
+ k, i, j);
+ return;
+ }
+
+ if (odp_queue_destroy(queue)) {
+ printf("Error: Queue destroy failed [%i] %i/%i\n",
+ k, i, j);
+ return;
+ }
+ }
+ }
+ }
+}
+
+static int create_timers(test_global_t *test_global)
+{
+ int num_timer, num_pktio, num_queue, i, j;
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ odp_timer_pool_t timer_pool;
+ odp_timer_pool_param_t timer_param;
+ odp_timer_capability_t timer_capa;
+ odp_timer_t timer;
+ odp_queue_t queue;
+ uint64_t res_ns, tick;
+ uint64_t timeout_ns = 1000 * test_global->opt.timeout_us;
+
+ num_pktio = test_global->opt.num_pktio;
+ num_queue = test_global->opt.num_pktio_queue;
+ num_timer = num_pktio * num_queue;
+
+ /* Always init globals for destroy calls */
+ test_global->timer.timer_pool = ODP_TIMER_POOL_INVALID;
+ test_global->timer.timeout_pool = ODP_POOL_INVALID;
+
+ for (i = 0; i < num_pktio; i++)
+ for (j = 0; j < num_queue; j++)
+ test_global->timer.timer[i][j] = ODP_TIMER_INVALID;
+
+ /* Timers not used */
+ if (test_global->opt.timeout_us == 0)
+ return 0;
+
+ if (odp_timer_capability(ODP_CLOCK_DEFAULT, &timer_capa)) {
+ printf("Timer capa failed\n");
+ return -1;
+ }
+
+ res_ns = timeout_ns / 10;
+
+ if (timer_capa.highest_res_ns > res_ns) {
+ printf("Timeout too short. Min timeout %" PRIu64 " usec\n",
+ timer_capa.highest_res_ns / 100);
+ return -1;
+ }
+
+ odp_timer_pool_param_init(&timer_param);
+ timer_param.res_ns = res_ns;
+ timer_param.min_tmo = timeout_ns;
+ timer_param.max_tmo = timeout_ns;
+ timer_param.num_timers = num_timer;
+ timer_param.clk_src = ODP_CLOCK_DEFAULT;
+
+ timer_pool = odp_timer_pool_create("sched_pktio_timer", &timer_param);
+
+ if (timer_pool == ODP_TIMER_POOL_INVALID) {
+ printf("Timer pool create failed\n");
+ return -1;
+ }
+
+ if (odp_timer_pool_start_multi(&timer_pool, 1) != 1) {
+ ODPH_ERR("Timer pool start failed\n");
+ return -1;
+ }
+
+ test_global->timer.timer_pool = timer_pool;
+ tick = odp_timer_ns_to_tick(timer_pool, timeout_ns);
+ test_global->timer.timeout_tick = tick;
+
+ for (i = 0; i < num_pktio; i++) {
+ for (j = 0; j < num_queue; j++) {
+ queue = test_global->pktio[i].input_queue[j];
+ timer = odp_timer_alloc(timer_pool, queue, NULL);
+
+ if (timer == ODP_TIMER_INVALID) {
+ printf("Timer alloc failed.\n");
+ return -1;
+ }
+
+ test_global->timer.timer[i][j] = timer;
+ }
+ }
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_TIMEOUT;
+ pool_param.tmo.num = num_timer;
+
+ pool = odp_pool_create("timeout pool", &pool_param);
+
+ if (pool == ODP_POOL_INVALID) {
+ printf("Timeout pool create failed.\n");
+ return -1;
+ }
+
+ test_global->timer.timeout_pool = pool;
+
+ return 0;
+}
+
+static int start_timers(test_global_t *test_global)
+{
+ int i, j;
+ odp_timeout_t timeout;
+ odp_timer_t timer;
+ odp_timer_retval_t ret;
+ odp_timer_start_t start_param;
+ uint64_t timeout_tick = test_global->timer.timeout_tick;
+ int num_pktio = test_global->opt.num_pktio;
+ int num_queue = test_global->opt.num_pktio_queue;
+ odp_pool_t pool = test_global->timer.timeout_pool;
+
+ /* Timers not used */
+ if (test_global->opt.timeout_us == 0)
+ return 0;
+
+ start_param.tick_type = ODP_TIMER_TICK_REL;
+ start_param.tick = timeout_tick;
+
+ for (i = 0; i < num_pktio; i++) {
+ for (j = 0; j < num_queue; j++) {
+ timer = test_global->timer.timer[i][j];
+
+ timeout = odp_timeout_alloc(pool);
+ if (timeout == ODP_TIMEOUT_INVALID) {
+ printf("Timeout alloc failed\n");
+ return -1;
+ }
+
+ start_param.tmo_ev = odp_timeout_to_event(timeout);
+
+ ret = odp_timer_start(timer, &start_param);
+ if (ret != ODP_TIMER_SUCCESS) {
+ printf("Timer set failed\n");
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void destroy_timers(test_global_t *test_global)
+{
+ int i, j;
+ odp_timer_t timer;
+ int num_pktio = test_global->opt.num_pktio;
+ int num_queue = test_global->opt.num_pktio_queue;
+ odp_timer_pool_t timer_pool = test_global->timer.timer_pool;
+ odp_pool_t pool = test_global->timer.timeout_pool;
+
+ if (timer_pool == ODP_TIMER_POOL_INVALID)
+ return;
+
+ /* Wait any remaining timers to expire */
+ empty_queues(2000 * test_global->opt.timeout_us);
+
+ for (i = 0; i < num_pktio; i++) {
+ for (j = 0; j < num_queue; j++) {
+ timer = test_global->timer.timer[i][j];
+
+ if (timer == ODP_TIMER_INVALID)
+ break;
+
+ if (odp_timer_free(timer))
+ printf("Timer free failed: %i, %i\n", i, j);
+ }
+ }
+
+ if (pool != ODP_POOL_INVALID) {
+ if (odp_pool_destroy(pool))
+ printf("Timeout pool destroy failed\n");
+ }
+
+ odp_timer_pool_destroy(timer_pool);
+}
+
+static void start_workers(odph_thread_t thread[],
+ test_global_t *test_global)
+{
+ int i;
+ odp_cpumask_t cpumask;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[MAX_WORKERS];
+ int num = test_global->opt.num_worker;
+
+ odp_cpumask_zero(&cpumask);
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = test_global->instance;
+ thr_common.cpumask = &cpumask;
+
+ for (i = 0; i < num; i++) {
+ odp_cpumask_set(&cpumask, test_global->worker_cpu[i]);
+ test_global->worker_arg[i].worker_id = i;
+ test_global->worker_arg[i].test_global_ptr = test_global;
+
+ odph_thread_param_init(&thr_param[i]);
+
+ if (!i) {
+ if (test_global->opt.timeout_us)
+ thr_param[0].start = worker_thread_timers;
+ else if (test_global->opt.pipe_stages)
+ thr_param[0].start = worker_thread_pipeline;
+ else
+ thr_param[0].start = worker_thread_direct;
+ } else {
+ thr_param[i].start = thr_param[0].start;
+ }
+
+ thr_param[i].arg = &test_global->worker_arg[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ }
+
+ memset(thread, 0, num * sizeof(odph_thread_t));
+ odph_thread_create(thread, &thr_common, thr_param, num);
+}
+
+static void wait_workers(odph_thread_t thread[], test_global_t *test_global)
+{
+ odph_thread_join(thread, test_global->opt.num_worker);
+}
+
+int main(int argc, char *argv[])
+{
+ odp_instance_t instance;
+ odp_init_t init;
+ odp_shm_t shm;
+ odp_time_t t1 = ODP_TIME_NULL, t2 = ODP_TIME_NULL;
+ odph_helper_options_t helper_options;
+ odph_thread_t thread[MAX_WORKERS];
+ test_options_t test_options;
+ int ret = 0;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ printf("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ signal(SIGINT, sig_handler);
+
+ if (parse_options(argc, argv, &test_options))
+ return -1;
+
+ /* List features not to be used (may optimize performance) */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.tm = 1;
+ init.not_used.feat.timer = 1;
+
+ if (test_options.timeout_us)
+ init.not_used.feat.timer = 0;
+
+ init.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ printf("Error: Global init failed.\n");
+ return -1;
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ printf("Error: Local init failed.\n");
+ return -1;
+ }
+
+ /* Reserve memory for args from shared mem */
+ shm = odp_shm_reserve("test_global", sizeof(test_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ printf("Error: shm reserve failed.\n");
+ return -1;
+ }
+
+ test_global = odp_shm_addr(shm);
+ memset(test_global, 0, sizeof(test_global_t));
+
+ test_global->instance = instance;
+ test_global->pool = ODP_POOL_INVALID;
+
+ memcpy(&test_global->opt, &test_options, sizeof(test_options_t));
+
+ odp_sys_info_print();
+
+ if (config_setup(test_global))
+ goto quit;
+
+ print_config(test_global);
+
+ if (open_pktios(test_global))
+ goto quit;
+
+ link_pktios(test_global);
+
+ if (create_pipeline_queues(test_global))
+ goto quit;
+
+ if (create_timers(test_global))
+ goto quit;
+
+ if (start_pktios(test_global))
+ goto quit;
+
+ odp_barrier_init(&test_global->worker_start,
+ test_global->opt.num_worker + 1);
+
+ start_workers(thread, test_global);
+
+ if (start_timers(test_global)) {
+ test_global->stop_workers = 1;
+ odp_mb_full();
+ }
+
+ /* Synchronize pktio configuration with workers. Worker are now ready
+ * to process packets. */
+ odp_barrier_wait(&test_global->worker_start);
+
+ t1 = odp_time_local();
+
+ wait_workers(thread, test_global);
+
+ t2 = odp_time_local();
+
+quit:
+ stop_pktios(test_global);
+ empty_queues(ODP_TIME_SEC_IN_NS / 2);
+ close_pktios(test_global);
+ destroy_pipeline_queues(test_global);
+ destroy_timers(test_global);
+
+ if (test_global->opt.collect_stat) {
+ print_stat(test_global, odp_time_diff_ns(t2, t1));
+
+ /* Encode return value for validation test usage. */
+ if (test_global->rx_pkt_sum > TEST_PASSED_LIMIT)
+ ret += 1;
+
+ if (test_global->tx_pkt_sum > TEST_PASSED_LIMIT)
+ ret += 2;
+ }
+ test_global = NULL;
+ odp_mb_full();
+
+ if (odp_shm_free(shm)) {
+ printf("Error: shm free failed.\n");
+ ret = -1;
+ }
+
+ if (odp_term_local()) {
+ printf("Error: term local failed.\n");
+ ret = -1;
+ }
+
+ if (odp_term_global(instance)) {
+ printf("Error: term global failed.\n");
+ ret = -1;
+ }
+
+ return ret;
+}
diff --git a/test/performance/odp_sched_pktio_run.sh b/test/performance/odp_sched_pktio_run.sh
new file mode 100755
index 000000000..dd332c191
--- /dev/null
+++ b/test/performance/odp_sched_pktio_run.sh
@@ -0,0 +1,109 @@
+#!/bin/sh
+#
+# Copyright (c) 2018, Linaro Limited
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# directory where test binaries have been built
+TEST_DIR="${TEST_DIR:-$PWD}"
+# directory where test sources are, including scripts
+TEST_SRC_DIR=$(dirname $0)
+
+PATH=$TEST_DIR:$TEST_DIR/../../example/generator:$PATH
+
+# exit codes expected by automake for skipped tests
+TEST_SKIPPED=77
+
+VALIDATION_TESTDIR=platform/$ODP_PLATFORM/test/validation
+PLATFORM_VALIDATION=${TEST_SRC_DIR}/../../$VALIDATION_TESTDIR
+
+FLOOD_MODE=0
+
+# Use installed pktio env or for make check take it from platform directory
+if [ -f "./pktio_env" ]; then
+ . ./pktio_env
+elif [ "$ODP_PLATFORM" = "" ]; then
+ echo "$0: error: ODP_PLATFORM must be defined"
+ # not skipped as this should never happen via "make check"
+ exit 1
+elif [ -f ${PLATFORM_VALIDATION}/api/pktio/pktio_env ]; then
+ . ${PLATFORM_VALIDATION}/api/pktio/pktio_env
+else
+ echo "BUG: unable to find pktio_env!"
+ echo "pktio_env has to be in current directory or "
+ echo "in platform/\$ODP_PLATFORM/test."
+ echo "ODP_PLATFORM=\"$ODP_PLATFORM\""
+ exit 1
+fi
+
+run_sched_pktio()
+{
+ setup_pktio_env clean # install trap to call cleanup_pktio_env
+
+ if [ $? -ne 0 ]; then
+ echo "setup_pktio_env error $?"
+ exit $TEST_SKIPPED
+ fi
+
+ type odp_generator > /dev/null
+ if [ $? -ne 0 ]; then
+ echo "odp_generator not installed. Aborting."
+ cleanup_pktio_env
+ exit 1
+ fi
+
+ # 1 worker
+ export ODP_PLATFORM_PARAMS="-m 512 --file-prefix="sched" \
+--proc-type auto --no-pci --vdev net_pcap1,iface=$IF1 \
+--vdev net_pcap2,iface=$IF2"
+
+ odp_sched_pktio${EXEEXT} -i 0,1 -c 1 -s &
+
+ TEST_PID=$!
+
+ sleep 1
+
+ # Run generator with one worker
+ export ODP_PLATFORM_PARAMS="-m 256 --file-prefix="gen" \
+--proc-type auto --no-pci \
+--vdev net_pcap0,iface=$IF0"
+
+ (odp_generator${EXEEXT} --interval $FLOOD_MODE -I 0 \
+ --srcip 192.168.0.1 --dstip 192.168.0.2 \
+ -m u -w 1 2>&1 > /dev/null) \
+ 2>&1 > /dev/null &
+
+ GEN_PID=$!
+
+ # Run test for 5 sec
+ sleep 5
+
+ kill -2 ${GEN_PID}
+ wait ${GEN_PID}
+
+ # Kill with SIGINT to output statistics
+ kill -2 ${TEST_PID}
+ wait ${TEST_PID}
+
+ ret=$?
+
+ if [ $ret -eq 3 ]; then
+ echo "PASS: received and transmitted over 5000 packets"
+ ret=0
+ else
+ echo "FAIL: less than thousand rx or tx packets $ret"
+ ret=1
+ fi
+
+ cleanup_pktio_env
+
+ exit $ret
+}
+
+case "$1" in
+ setup) setup_pktio_env ;;
+ cleanup) cleanup_pktio_env ;;
+ *) run_sched_pktio ;;
+esac
diff --git a/test/common_plat/performance/odp_scheduling.c b/test/performance/odp_scheduling.c
index c74a07133..c9f3eb89f 100644
--- a/test/common_plat/performance/odp_scheduling.c
+++ b/test/performance/odp_scheduling.c
@@ -1,21 +1,20 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2013-2018 Linaro Limited
+ * Copyright (c) 2019-2023 Nokia
*/
/**
- * @file
+ * @example odp_scheduling.c
+ *
+ * Performance test application for miscellaneous scheduling operations
*
- * @example odp_example.c ODP example application
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
*/
#include <string.h>
#include <stdlib.h>
#include <inttypes.h>
-#include <test_debug.h>
-
/* ODP main header */
#include <odp_api.h>
@@ -28,7 +27,7 @@
/* GNU lib C */
#include <getopt.h>
-#define NUM_MSG (512 * 1024) /**< Number of msg in pool */
+#define MAX_BUF (512 * 1024) /**< Maximum pool size */
#define MAX_ALLOCS 32 /**< Alloc burst size */
#define QUEUES_PER_PRIO 64 /**< Queue per priority */
#define NUM_PRIOS 2 /**< Number of tested priorities */
@@ -49,16 +48,14 @@ typedef struct {
/** Test arguments */
typedef struct {
- int cpu_count; /**< CPU count */
+ double test_sec; /**< CPU frequency test duration in seconds */
+ unsigned int cpu_count; /**< CPU count */
int fairness; /**< Check fairness */
} test_args_t;
-typedef struct {
+typedef struct ODP_ALIGNED_CACHE {
uint64_t num_ev;
-
- /* Round up the struct size to cache line size */
- uint8_t pad[ODP_CACHE_LINE_SIZE - sizeof(uint64_t)];
-} queue_context_t ODP_ALIGNED_CACHE;
+} queue_context_t;
/** Test global variables */
typedef struct {
@@ -66,6 +63,7 @@ typedef struct {
odp_spinlock_t lock;
odp_pool_t pool;
int first_thr;
+ int queues_per_prio;
test_args_t args;
odp_queue_t queue[NUM_PRIOS][QUEUES_PER_PRIO];
queue_context_t queue_ctx[NUM_PRIOS][QUEUES_PER_PRIO];
@@ -76,14 +74,14 @@ static void print_stats(int prio, test_globals_t *globals)
{
int i, j, k;
- if (prio == ODP_SCHED_PRIO_HIGHEST)
+ if (prio == odp_schedule_max_prio())
i = 0;
else
i = 1;
printf("\nQueue fairness\n-----+--------\n");
- for (j = 0; j < QUEUES_PER_PRIO;) {
+ for (j = 0; j < globals->queues_per_prio;) {
printf(" %2i | ", j);
for (k = 0; k < STATS_PER_LINE - 1; k++) {
@@ -136,7 +134,7 @@ static int enqueue_events(int thr, int prio, int num_queues, int num_events,
odp_queue_t queue;
int i, j, k, ret;
- if (prio == ODP_SCHED_PRIO_HIGHEST)
+ if (prio == odp_schedule_max_prio())
i = 0;
else
i = 1;
@@ -147,14 +145,15 @@ static int enqueue_events(int thr, int prio, int num_queues, int num_events,
ret = odp_buffer_alloc_multi(globals->pool, buf, num_events);
if (ret != num_events) {
- LOG_ERR(" [%i] buffer alloc failed\n", thr);
+ ODPH_ERR(" [%i] buffer alloc failed\n", thr);
ret = ret < 0 ? 0 : ret;
+ ret = ret > num_events ? num_events : ret; /* GCC-9 -O3 workaround */
odp_buffer_free_multi(buf, ret);
return -1;
}
for (k = 0; k < num_events; k++) {
if (!odp_buffer_is_valid(buf[k])) {
- LOG_ERR(" [%i] buffer alloc failed\n", thr);
+ ODPH_ERR(" [%i] buffer alloc failed\n", thr);
odp_buffer_free_multi(buf, num_events);
return -1;
}
@@ -163,7 +162,7 @@ static int enqueue_events(int thr, int prio, int num_queues, int num_events,
ret = odp_queue_enq_multi(queue, ev, num_events);
if (ret != num_events) {
- LOG_ERR(" [%i] Queue enqueue failed.\n", thr);
+ ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
ret = ret < 0 ? 0 : ret;
odp_buffer_free_multi(&buf[ret], num_events - ret);
return -1;
@@ -193,7 +192,7 @@ static int test_alloc_single(int thr, test_globals_t *globals)
temp_buf = odp_buffer_alloc(globals->pool);
if (!odp_buffer_is_valid(temp_buf)) {
- LOG_ERR(" [%i] alloc_single failed\n", thr);
+ ODPH_ERR(" [%i] alloc_single failed\n", thr);
return -1;
}
@@ -221,34 +220,34 @@ static int test_alloc_single(int thr, test_globals_t *globals)
static int test_alloc_multi(int thr, test_globals_t *globals)
{
int i, j, ret;
- odp_buffer_t temp_buf[MAX_ALLOCS];
+ const int num_alloc = MAX_ALLOCS;
+ odp_buffer_t temp_buf[num_alloc];
uint64_t c1, c2, cycles;
c1 = odp_cpu_cycles();
for (i = 0; i < ALLOC_ROUNDS; i++) {
- ret = odp_buffer_alloc_multi(globals->pool, temp_buf,
- MAX_ALLOCS);
- if (ret != MAX_ALLOCS) {
- LOG_ERR(" [%i] buffer alloc failed\n", thr);
+ ret = odp_buffer_alloc_multi(globals->pool, temp_buf, num_alloc);
+ if (ret != num_alloc) {
+ ODPH_ERR(" [%i] buffer alloc failed\n", thr);
ret = ret < 0 ? 0 : ret;
odp_buffer_free_multi(temp_buf, ret);
return -1;
}
- for (j = 0; j < MAX_ALLOCS; j++) {
+ for (j = 0; j < num_alloc; j++) {
if (!odp_buffer_is_valid(temp_buf[j])) {
- LOG_ERR(" [%i] alloc_multi failed\n", thr);
- odp_buffer_free_multi(temp_buf, MAX_ALLOCS);
+ ODPH_ERR(" [%i] alloc_multi failed\n", thr);
+ odp_buffer_free_multi(temp_buf, num_alloc);
return -1;
}
}
- odp_buffer_free_multi(temp_buf, MAX_ALLOCS);
+ odp_buffer_free_multi(temp_buf, num_alloc);
}
c2 = odp_cpu_cycles();
cycles = odp_cpu_cycles_diff(c2, c1);
- cycles = cycles / (ALLOC_ROUNDS * MAX_ALLOCS);
+ cycles = cycles / (ALLOC_ROUNDS * num_alloc);
printf(" [%i] alloc_multi alloc+free %6" PRIu64 " CPU cycles\n",
thr, cycles);
@@ -273,13 +272,13 @@ static int test_plain_queue(int thr, test_globals_t *globals)
test_message_t *t_msg;
odp_queue_t queue;
uint64_t c1, c2, cycles;
- int i;
+ int i, j;
/* Alloc test message */
buf = odp_buffer_alloc(globals->pool);
if (!odp_buffer_is_valid(buf)) {
- LOG_ERR(" [%i] buffer alloc failed\n", thr);
+ ODPH_ERR(" [%i] buffer alloc failed\n", thr);
return -1;
}
@@ -302,17 +301,25 @@ static int test_plain_queue(int thr, test_globals_t *globals)
ev = odp_buffer_to_event(buf);
if (odp_queue_enq(queue, ev)) {
- LOG_ERR(" [%i] Queue enqueue failed.\n", thr);
+ ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
odp_buffer_free(buf);
return -1;
}
- ev = odp_queue_deq(queue);
+ /* When enqueue and dequeue are decoupled (e.g. not using a
+ * common lock), an enqueued event may not be immediately
+ * visible to dequeue. So we just try again for a while. */
+ for (j = 0; j < 100; j++) {
+ ev = odp_queue_deq(queue);
+ if (ev != ODP_EVENT_INVALID)
+ break;
+ odp_cpu_pause();
+ }
buf = odp_buffer_from_event(ev);
if (!odp_buffer_is_valid(buf)) {
- LOG_ERR(" [%i] Queue empty.\n", thr);
+ ODPH_ERR(" [%i] Queue empty.\n", thr);
return -1;
}
}
@@ -359,7 +366,7 @@ static int test_schedule_single(const char *str, int thr,
ev = odp_schedule(&queue, ODP_SCHED_WAIT);
if (odp_queue_enq(queue, ev)) {
- LOG_ERR(" [%i] Queue enqueue failed.\n", thr);
+ ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
odp_event_free(ev);
return -1;
}
@@ -379,18 +386,19 @@ static int test_schedule_single(const char *str, int thr,
tot++;
if (odp_queue_enq(queue, ev)) {
- LOG_ERR(" [%i] Queue enqueue failed.\n", thr);
+ ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
odp_event_free(ev);
return -1;
}
}
- odp_schedule_resume();
-
c2 = odp_cpu_cycles();
cycles = odp_cpu_cycles_diff(c2, c1);
odp_barrier_wait(&globals->barrier);
+
+ odp_schedule_resume();
+
clear_sched_queues();
cycles = cycles / tot;
@@ -422,7 +430,7 @@ static int test_schedule_many(const char *str, int thr,
uint32_t i;
uint32_t tot;
- if (enqueue_events(thr, prio, QUEUES_PER_PRIO, 1, globals))
+ if (enqueue_events(thr, prio, globals->queues_per_prio, 1, globals))
return -1;
/* Start sched-enq loop */
@@ -432,7 +440,7 @@ static int test_schedule_many(const char *str, int thr,
ev = odp_schedule(&queue, ODP_SCHED_WAIT);
if (odp_queue_enq(queue, ev)) {
- LOG_ERR(" [%i] Queue enqueue failed.\n", thr);
+ ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
odp_event_free(ev);
return -1;
}
@@ -452,18 +460,19 @@ static int test_schedule_many(const char *str, int thr,
tot++;
if (odp_queue_enq(queue, ev)) {
- LOG_ERR(" [%i] Queue enqueue failed.\n", thr);
+ ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
odp_event_free(ev);
return -1;
}
}
- odp_schedule_resume();
-
c2 = odp_cpu_cycles();
cycles = odp_cpu_cycles_diff(c2, c1);
odp_barrier_wait(&globals->barrier);
+
+ odp_schedule_resume();
+
clear_sched_queues();
cycles = cycles / tot;
@@ -493,7 +502,8 @@ static int test_schedule_multi(const char *str, int thr,
int num;
uint32_t tot = 0;
- if (enqueue_events(thr, prio, QUEUES_PER_PRIO, MULTI_BUFS_MAX, globals))
+ if (enqueue_events(thr, prio, globals->queues_per_prio, MULTI_BUFS_MAX,
+ globals))
return -1;
/* Start sched-enq loop */
@@ -514,7 +524,7 @@ static int test_schedule_multi(const char *str, int thr,
/* Assume we can enqueue all events */
if (odp_queue_enq_multi(queue, ev, num) != num) {
- LOG_ERR(" [%i] Queue enqueue failed.\n", thr);
+ ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
return -1;
}
}
@@ -540,17 +550,18 @@ static int test_schedule_multi(const char *str, int thr,
/* Assume we can enqueue all events */
if (odp_queue_enq_multi(queue, ev, num) != num) {
- LOG_ERR(" [%i] Queue enqueue failed.\n", thr);
+ ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
return -1;
}
}
- odp_schedule_resume();
-
c2 = odp_cpu_cycles();
cycles = odp_cpu_cycles_diff(c2, c1);
odp_barrier_wait(&globals->barrier);
+
+ odp_schedule_resume();
+
clear_sched_queues();
if (tot)
@@ -590,7 +601,7 @@ static int run_thread(void *arg ODP_UNUSED)
globals = odp_shm_addr(shm);
if (globals == NULL) {
- LOG_ERR("Shared mem lookup failed\n");
+ ODPH_ERR("Shared mem lookup failed\n");
return -1;
}
@@ -633,19 +644,19 @@ static int run_thread(void *arg ODP_UNUSED)
odp_barrier_wait(barrier);
if (test_schedule_single("sched_____s_lo", thr,
- ODP_SCHED_PRIO_LOWEST, globals))
+ odp_schedule_min_prio(), globals))
return -1;
odp_barrier_wait(barrier);
if (test_schedule_many("sched_____m_lo", thr,
- ODP_SCHED_PRIO_LOWEST, globals))
+ odp_schedule_min_prio(), globals))
return -1;
odp_barrier_wait(barrier);
if (test_schedule_multi("sched_multi_lo", thr,
- ODP_SCHED_PRIO_LOWEST, globals))
+ odp_schedule_min_prio(), globals))
return -1;
/* High prio */
@@ -653,19 +664,19 @@ static int run_thread(void *arg ODP_UNUSED)
odp_barrier_wait(barrier);
if (test_schedule_single("sched_____s_hi", thr,
- ODP_SCHED_PRIO_HIGHEST, globals))
+ odp_schedule_max_prio(), globals))
return -1;
odp_barrier_wait(barrier);
if (test_schedule_many("sched_____m_hi", thr,
- ODP_SCHED_PRIO_HIGHEST, globals))
+ odp_schedule_max_prio(), globals))
return -1;
odp_barrier_wait(barrier);
if (test_schedule_multi("sched_multi_hi", thr,
- ODP_SCHED_PRIO_HIGHEST, globals))
+ odp_schedule_max_prio(), globals))
return -1;
printf("Thread %i exits\n", thr);
@@ -676,17 +687,17 @@ static int run_thread(void *arg ODP_UNUSED)
/**
* @internal Test cycle counter frequency
*/
-static void test_cpu_freq(void)
+static void test_cpu_freq(double test_sec)
{
odp_time_t cur_time, test_time, start_time, end_time;
uint64_t c1, c2, cycles;
uint64_t nsec;
double diff_max_hz, max_cycles;
- printf("\nCPU cycle count frequency test (runs about %i sec)\n",
- TEST_SEC);
+ printf("\nCPU cycle count frequency test (runs about %f sec)\n",
+ test_sec);
- test_time = odp_time_local_from_ns(TEST_SEC * ODP_TIME_SEC_IN_NS);
+ test_time = odp_time_local_from_ns(test_sec * ODP_TIME_SEC_IN_NS);
start_time = odp_time_local();
end_time = odp_time_sum(start_time, test_time);
@@ -723,7 +734,8 @@ static void print_usage(void)
{
printf("\n\nUsage: ./odp_example [options]\n");
printf("Options:\n");
- printf(" -c, --count <number> CPU count, 0=all available, default=0\n");
+ printf(" -t, --time <number> test duration, default=%.1f\n", (double)TEST_SEC);
+ printf(" -c, --count <number> CPU count, 0=all available, default=1\n");
printf(" -h, --help this help\n");
printf(" -f, --fair collect fairness statistics\n");
printf("\n\n");
@@ -742,18 +754,18 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
int long_index;
static const struct option longopts[] = {
+ {"time", required_argument, NULL, 't'},
{"count", required_argument, NULL, 'c'},
{"fair", no_argument, NULL, 'f'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:fh";
+ static const char *shortopts = "+t:c:fh";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- odph_parse_options(argc, argv, shortopts, longopts);
+ args->cpu_count = 1; /* use one worker by default */
+ args->test_sec = TEST_SEC;
- opterr = 0; /* do not issue errors on helper options */
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -765,6 +777,10 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
args->fairness = 1;
break;
+ case 't':
+ args->test_sec = atof(optarg);
+ break;
+
case 'c':
args->cpu_count = atoi(optarg);
break;
@@ -785,7 +801,8 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
*/
int main(int argc, char *argv[])
{
- odph_odpthread_t *thread_tbl;
+ odph_helper_options_t helper_options;
+ odph_thread_t *thread_tbl;
test_args_t args;
int num_workers;
odp_cpumask_t cpumask;
@@ -798,16 +815,32 @@ int main(int argc, char *argv[])
odp_pool_param_t params;
int ret = 0;
odp_instance_t instance;
- odph_odpthread_params_t thr_params;
+ odp_init_t init_param;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
+ odp_queue_capability_t capa;
+ odp_pool_capability_t pool_capa;
+ odp_schedule_config_t schedule_config;
+ uint32_t num_queues, num_buf;
printf("\nODP example starts\n\n");
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
memset(&args, 0, sizeof(args));
parse_args(argc, argv, &args);
/* ODP global init */
- if (odp_init_global(&instance, NULL, NULL)) {
- LOG_ERR("ODP global init failed.\n");
+ if (odp_init_global(&instance, &init_param, NULL)) {
+ ODPH_ERR("ODP global init failed.\n");
return -1;
}
@@ -816,22 +849,12 @@ int main(int argc, char *argv[])
* setting up resources for worker threads.
*/
if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
- LOG_ERR("ODP global init failed.\n");
+ ODPH_ERR("ODP global init failed.\n");
return -1;
}
printf("\n");
- printf("ODP system info\n");
- printf("---------------\n");
- printf("ODP API version: %s\n", odp_version_api_str());
- printf("ODP impl name: %s\n", odp_version_impl_name());
- printf("ODP impl details: %s\n", odp_version_impl_str());
- printf("CPU model: %s\n", odp_cpu_model_str());
- printf("CPU freq (hz): %" PRIu64 "\n", odp_cpu_hz_max());
- printf("Cache line size: %i\n", odp_sys_cache_line_size());
- printf("Max CPU count: %i\n", odp_cpu_count());
-
- printf("\n");
+ odp_sys_info_print();
/* Get default worker cpumask */
num_workers = odp_cpumask_default_worker(&cpumask, args.cpu_count);
@@ -841,19 +864,19 @@ int main(int argc, char *argv[])
printf("first CPU: %i\n", odp_cpumask_first(&cpumask));
printf("cpu mask: %s\n", cpumaskstr);
- thread_tbl = calloc(sizeof(odph_odpthread_t), num_workers);
+ thread_tbl = calloc(num_workers, sizeof(odph_thread_t));
if (!thread_tbl) {
- LOG_ERR("no memory for thread_tbl\n");
+ ODPH_ERR("no memory for thread_tbl\n");
return -1;
}
/* Test cycle count frequency */
- test_cpu_freq();
+ test_cpu_freq(args.test_sec);
shm = odp_shm_reserve("test_globals",
sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
if (shm == ODP_SHM_INVALID) {
- LOG_ERR("Shared memory reserve failed.\n");
+ ODPH_ERR("Shared memory reserve failed.\n");
return -1;
}
@@ -864,34 +887,68 @@ int main(int argc, char *argv[])
/*
* Create message pool
*/
+ if (odp_pool_capability(&pool_capa)) {
+ ODPH_ERR("Pool capabilities failed.\n");
+ return -1;
+ }
+
+ num_buf = MAX_BUF;
+ if (pool_capa.buf.max_num && pool_capa.buf.max_num < MAX_BUF)
+ num_buf = pool_capa.buf.max_num;
odp_pool_param_init(&params);
params.buf.size = sizeof(test_message_t);
params.buf.align = 0;
- params.buf.num = NUM_MSG;
+ params.buf.num = num_buf;
params.type = ODP_POOL_BUFFER;
pool = odp_pool_create("msg_pool", &params);
if (pool == ODP_POOL_INVALID) {
- LOG_ERR("Pool create failed.\n");
+ ODPH_ERR("Pool create failed.\n");
return -1;
}
globals->pool = pool;
+ if (odp_queue_capability(&capa)) {
+ ODPH_ERR("Fetching queue capabilities failed.\n");
+ return -1;
+ }
+
+ odp_schedule_config_init(&schedule_config);
+ odp_schedule_config(&schedule_config);
+
+ globals->queues_per_prio = QUEUES_PER_PRIO;
+ num_queues = globals->queues_per_prio * NUM_PRIOS;
+ if (schedule_config.num_queues &&
+ num_queues > schedule_config.num_queues)
+ globals->queues_per_prio = schedule_config.num_queues /
+ NUM_PRIOS;
+
+ /* One plain queue is also used */
+ num_queues = (globals->queues_per_prio * NUM_PRIOS) + 1;
+ if (num_queues > capa.max_queues)
+ globals->queues_per_prio--;
+
+ if (globals->queues_per_prio <= 0) {
+ ODPH_ERR("Not enough queues. At least 1 plain and %d scheduled "
+ "queues required.\n", NUM_PRIOS);
+ return -1;
+ }
+
/*
* Create a queue for plain queue test
*/
plain_queue = odp_queue_create("plain_queue", NULL);
if (plain_queue == ODP_QUEUE_INVALID) {
- LOG_ERR("Plain queue create failed.\n");
+ ODPH_ERR("Plain queue create failed.\n");
return -1;
}
/*
- * Create queues for schedule test. QUEUES_PER_PRIO per priority.
+ * Create queues for schedule test.
*/
for (i = 0; i < NUM_PRIOS; i++) {
char name[] = "sched_XX_YY";
@@ -900,9 +957,9 @@ int main(int argc, char *argv[])
int prio;
if (i == 0)
- prio = ODP_SCHED_PRIO_HIGHEST;
+ prio = odp_schedule_max_prio();
else
- prio = ODP_SCHED_PRIO_LOWEST;
+ prio = odp_schedule_min_prio();
name[6] = '0' + (prio / 10);
name[7] = '0' + prio - (10 * (prio / 10));
@@ -913,14 +970,14 @@ int main(int argc, char *argv[])
param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
param.sched.group = ODP_SCHED_GROUP_ALL;
- for (j = 0; j < QUEUES_PER_PRIO; j++) {
+ for (j = 0; j < globals->queues_per_prio; j++) {
name[9] = '0' + j / 10;
name[10] = '0' + j - 10 * (j / 10);
queue = odp_queue_create(name, &param);
if (queue == ODP_QUEUE_INVALID) {
- LOG_ERR("Schedule queue create failed.\n");
+ ODPH_ERR("Schedule queue create failed.\n");
return -1;
}
@@ -930,7 +987,7 @@ int main(int argc, char *argv[])
&globals->queue_ctx[i][j],
sizeof(queue_context_t))
< 0) {
- LOG_ERR("Queue context set failed.\n");
+ ODPH_ERR("Queue context set failed.\n");
return -1;
}
}
@@ -947,15 +1004,21 @@ int main(int argc, char *argv[])
globals->first_thr = -1;
/* Create and launch worker threads */
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
- thr_params.start = run_thread;
- thr_params.arg = NULL;
- odph_odpthreads_create(thread_tbl, &cpumask, &thr_params);
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.thr_type = ODP_THREAD_WORKER;
+ thr_param.start = run_thread;
+ thr_param.arg = NULL;
+
+ odph_thread_create(thread_tbl, &thr_common, &thr_param, num_workers);
/* Wait for worker threads to terminate */
- odph_odpthreads_join(thread_tbl);
+ odph_thread_join(thread_tbl, num_workers);
free(thread_tbl);
printf("ODP example complete\n\n");
@@ -963,7 +1026,7 @@ int main(int argc, char *argv[])
for (i = 0; i < NUM_PRIOS; i++) {
odp_queue_t queue;
- for (j = 0; j < QUEUES_PER_PRIO; j++) {
+ for (j = 0; j < globals->queues_per_prio; j++) {
queue = globals->queue[i][j];
ret += odp_queue_destroy(queue);
}
diff --git a/test/performance/odp_scheduling_run.sh b/test/performance/odp_scheduling_run.sh
new file mode 100755
index 000000000..4e004264e
--- /dev/null
+++ b/test/performance/odp_scheduling_run.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+#
+# Copyright (c) 2015-2018, Linaro Limited
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script that passes command line arguments to odp_scheduling test when
+# launched by 'make check'
+
+TEST_DIR="${TEST_DIR:-$(dirname $0)}"
+ALL=0
+
+run()
+{
+ echo odp_scheduling_run starts requesting $1 worker threads
+ echo ======================================================
+
+ if [ $(nproc) -lt $1 ]; then
+ echo "Not enough CPU cores. Skipping test."
+ else
+ $TEST_DIR/odp_scheduling${EXEEXT} -c $1 -t 0.1
+ RET_VAL=$?
+ if [ $RET_VAL -ne 0 ]; then
+ echo odp_scheduling FAILED
+ exit $RET_VAL
+ fi
+ fi
+}
+
+run 1
+run 5
+run 8
+run 11
+run $ALL
+
+exit 0
diff --git a/test/performance/odp_stash_perf.c b/test/performance/odp_stash_perf.c
new file mode 100644
index 000000000..cb223999e
--- /dev/null
+++ b/test/performance/odp_stash_perf.c
@@ -0,0 +1,523 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Linaro Limited
+ * Copyright (c) 2021 Nokia
+ * Copyright (c) 2023 Arm
+ */
+
+/**
+ * @example odp_stash_perf.c
+ *
+ * Performance test application for stash APIs
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define MAX_STASHES (32)
+
+typedef struct test_options_t {
+ uint32_t num_stash;
+ uint32_t num_round;
+ uint32_t max_burst;
+ uint32_t stash_size;
+ int strict;
+ int num_cpu;
+
+} test_options_t;
+
+typedef struct test_stat_t {
+ uint64_t rounds;
+ uint64_t ops;
+ uint64_t nsec;
+ uint64_t cycles;
+ uint64_t num_retry;
+
+} test_stat_t;
+
+typedef struct test_global_t {
+ odp_barrier_t barrier;
+ test_options_t options;
+ odp_instance_t instance;
+ odp_shm_t shm;
+ odp_pool_t pool;
+ odp_stash_t stash[MAX_STASHES];
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ test_stat_t stat[ODP_THREAD_COUNT_MAX];
+
+} test_global_t;
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Stash performance test\n"
+ "\n"
+ "Usage: odp_stash_perf [options]\n"
+ "\n"
+ " -c, --num_cpu <num> Number of worker threads. Default: 1\n"
+ " -n, --num_stash <num> Number of stashes. Default: 1\n"
+ " -b, --burst_size <num> Max number of objects per stash call. Default: 1\n"
+ " -s, --stash_size <num> Stash size. Default: 1000\n"
+ " -r, --num_round <num> Number of rounds. Default: 1000\n"
+ " -m, --strict Strict size stash\n"
+ " -h, --help This help\n"
+ "\n");
+}
+
+static int parse_options(int argc, char *argv[], test_options_t *test_options)
+{
+ int opt;
+ int long_index;
+ int ret = 0;
+
+ static const struct option longopts[] = {
+ { "num_cpu", required_argument, NULL, 'c' },
+ { "num_stash", required_argument, NULL, 'n' },
+ { "burst_size", required_argument, NULL, 'b' },
+ { "stash_size", required_argument, NULL, 's' },
+ { "num_round", required_argument, NULL, 'r' },
+ { "strict", no_argument, NULL, 'm' },
+ { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ static const char *shortopts = "+c:n:b:s:r:mh";
+
+ test_options->num_cpu = 1;
+ test_options->num_stash = 1;
+ test_options->max_burst = 1;
+ test_options->stash_size = 1000;
+ test_options->num_round = 1000;
+ test_options->strict = 0;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'c':
+ test_options->num_cpu = atoi(optarg);
+ break;
+ case 'n':
+ test_options->num_stash = atoi(optarg);
+ break;
+ case 'b':
+ test_options->max_burst = atoi(optarg);
+ break;
+ case 's':
+ test_options->stash_size = atoi(optarg);
+ break;
+ case 'r':
+ test_options->num_round = atoi(optarg);
+ break;
+ case 'm':
+ test_options->strict = 1;
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (test_options->num_stash > MAX_STASHES) {
+ ODPH_ERR("Too many stashes %u. Test maximum %u.\n",
+ test_options->num_stash, MAX_STASHES);
+ return -1;
+ }
+
+ return ret;
+}
+
+static int create_stashes(test_global_t *global)
+{
+ uint32_t i;
+ uint32_t tmp = 0;
+ test_options_t *test_options = &global->options;
+
+ uint32_t num_stash = test_options->num_stash;
+ uint32_t num_round = test_options->num_round;
+ int num_stored;
+ uint32_t num_remain;
+ odp_stash_t *stash = global->stash;
+ odp_stash_capability_t stash_capa;
+
+ printf("\nTesting %s stashes\n",
+ test_options->strict == 0 ? "NORMAL" : "STRICT_SIZE");
+ printf(" num rounds %u\n", num_round);
+ printf(" num stashes %u\n", num_stash);
+ printf(" stash size %u\n", test_options->stash_size);
+ printf(" max burst size %u\n", test_options->max_burst);
+
+ if (odp_stash_capability(&stash_capa, ODP_STASH_TYPE_DEFAULT)) {
+ ODPH_ERR("Get stash capability failed\n");
+ return -1;
+ }
+
+ if (test_options->stash_size > stash_capa.max_num_obj) {
+ ODPH_ERR("Max stash size supported %" PRIu64 "\n",
+ stash_capa.max_num_obj);
+ return -1;
+ }
+
+ if (test_options->num_stash > stash_capa.max_stashes) {
+ ODPH_ERR("Max stash supported %u\n", stash_capa.max_stashes);
+ return -1;
+ }
+
+ for (i = 0; i < num_stash; i++) {
+ odp_stash_param_t stash_param;
+
+ odp_stash_param_init(&stash_param);
+ stash_param.num_obj = test_options->stash_size;
+ stash_param.obj_size = sizeof(uint32_t);
+ stash_param.strict_size = test_options->strict;
+
+ stash[i] = odp_stash_create("test_stash_u32", &stash_param);
+ if (stash[i] == ODP_STASH_INVALID) {
+ ODPH_ERR("Stash create failed\n");
+ return -1;
+ }
+
+ num_remain = test_options->stash_size;
+ do {
+ num_stored = odp_stash_put_u32(stash[i], &tmp, 1);
+ if (num_stored < 0) {
+ ODPH_ERR("Error: Stash put failed\n");
+ return -1;
+ }
+ num_remain -= num_stored;
+ } while (num_remain);
+ }
+
+ return 0;
+}
+
+static int destroy_stashes(test_global_t *global)
+{
+ odp_stash_t *stash = global->stash;
+ test_options_t *test_options = &global->options;
+ uint32_t num_stash = test_options->num_stash;
+ uint32_t tmp;
+ int num;
+
+ for (uint32_t i = 0; i < num_stash; i++) {
+ do {
+ num = odp_stash_get_u32(stash[i], &tmp, 1);
+ if (num < 0) {
+ ODPH_ERR("Error: Stash get failed %u\n", i);
+ return -1;
+ }
+ } while (num);
+
+ if (odp_stash_destroy(stash[i])) {
+ ODPH_ERR("Stash destroy failed\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int run_test(void *arg)
+{
+ uint64_t c1, c2, cycles, nsec;
+ odp_time_t t1, t2;
+ uint32_t rounds;
+ int num_stored;
+ int num_remain;
+ int num_obj;
+ test_stat_t *stat;
+ test_global_t *global = arg;
+ test_options_t *test_options = &global->options;
+ odp_stash_t stash;
+ uint64_t num_retry = 0;
+ uint64_t ops = 0;
+ uint32_t num_stash = test_options->num_stash;
+ uint32_t num_round = test_options->num_round;
+ int thr = odp_thread_id();
+ int ret = 0;
+ uint32_t i = 0;
+ uint32_t max_burst = test_options->max_burst;
+ uint32_t *tmp = malloc(sizeof(uint32_t) * max_burst);
+
+ if (tmp == NULL) {
+ ODPH_ERR("Error: malloc failed\n");
+ ret = -1;
+ goto error;
+ }
+
+ stat = &global->stat[thr];
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ t1 = odp_time_local();
+ c1 = odp_cpu_cycles();
+
+ for (rounds = 0; rounds < num_round; rounds++) {
+ stash = global->stash[i++];
+
+ if (i == num_stash)
+ i = 0;
+
+ num_obj = odp_stash_get_u32(stash, tmp, max_burst);
+ if (num_obj == 0)
+ continue;
+
+ if (num_obj < 0) {
+ ODPH_ERR("Error: Stash get failed\n");
+ ret = -1;
+ goto error;
+ }
+ num_remain = num_obj;
+ do {
+ num_stored = odp_stash_put_u32(stash, tmp, num_remain);
+ if (num_stored < 0) {
+ ODPH_ERR("Error: Stash put failed\n");
+ ret = -1;
+ goto error;
+ }
+
+ if (num_stored != num_remain)
+ num_retry++;
+
+ num_remain -= num_stored;
+ } while (num_remain);
+ ops += num_obj;
+ }
+
+ c2 = odp_cpu_cycles();
+ t2 = odp_time_local();
+
+ nsec = odp_time_diff_ns(t2, t1);
+ cycles = odp_cpu_cycles_diff(c2, c1);
+
+ stat->rounds = rounds;
+ stat->ops = ops;
+ stat->nsec = nsec;
+ stat->cycles = cycles;
+ stat->num_retry = num_retry;
+error:
+ free(tmp);
+ return ret;
+}
+
+static int start_workers(test_global_t *global)
+{
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
+ odp_cpumask_t cpumask;
+ int ret;
+ test_options_t *test_options = &global->options;
+ int num_cpu = test_options->num_cpu;
+
+ ret = odp_cpumask_default_worker(&cpumask, num_cpu);
+
+ if (num_cpu && ret != num_cpu) {
+ ODPH_ERR("Error: Too many workers. Max supported %i\n.", ret);
+ return -1;
+ }
+
+ /* Zero: all available workers */
+ if (num_cpu == 0) {
+ num_cpu = ret;
+ test_options->num_cpu = num_cpu;
+ }
+
+ printf(" num workers %u\n\n", num_cpu);
+
+ odp_barrier_init(&global->barrier, num_cpu);
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = global->instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_test;
+ thr_param.arg = global;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ if (odph_thread_create(global->thread_tbl, &thr_common, &thr_param,
+ num_cpu) != num_cpu)
+ return -1;
+
+ return 0;
+}
+
+static void print_stat(test_global_t *global)
+{
+ int i, num;
+ double rounds_ave, ops_ave, nsec_ave, cycles_ave, retry_ave;
+ test_options_t *test_options = &global->options;
+ int num_cpu = test_options->num_cpu;
+ uint64_t rounds_sum = 0;
+ uint64_t ops_sum = 0;
+ uint64_t nsec_sum = 0;
+ uint64_t cycles_sum = 0;
+ uint64_t retry_sum = 0;
+
+ /* Averages */
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ rounds_sum += global->stat[i].rounds;
+ ops_sum += global->stat[i].ops;
+ nsec_sum += global->stat[i].nsec;
+ cycles_sum += global->stat[i].cycles;
+ retry_sum += global->stat[i].num_retry;
+ }
+
+ if (rounds_sum == 0) {
+ printf("No results.\n");
+ return;
+ }
+
+ rounds_ave = rounds_sum / num_cpu;
+ ops_ave = ops_sum / num_cpu;
+ nsec_ave = nsec_sum / num_cpu;
+ cycles_ave = cycles_sum / num_cpu;
+ retry_ave = retry_sum / num_cpu;
+ num = 0;
+
+ printf("RESULTS - per thread (Million ops per sec):\n");
+ printf("----------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->stat[i].rounds) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%6.1f ", (1000.0 * global->stat[i].ops) /
+ global->stat[i].nsec);
+ num++;
+ }
+ }
+ printf("\n\n");
+
+ printf("RESULTS - per thread average (%i threads):\n", num_cpu);
+ printf("------------------------------------------\n");
+ printf(" duration: %.3f msec\n", nsec_ave / 1000000);
+ printf(" num cycles: %.3f M\n", cycles_ave / 1000000);
+ printf(" ops per get: %.3f\n", ops_ave / rounds_ave);
+ printf(" cycles per ops: %.3f\n", cycles_ave / ops_ave);
+ printf(" retries per sec: %.3f k\n",
+ (1000000.0 * retry_ave) / nsec_ave);
+ printf(" ops per sec: %.3f M\n\n",
+ (1000.0 * ops_ave) / nsec_ave);
+
+ printf("TOTAL ops per sec: %.3f M\n\n",
+ (1000.0 * ops_sum) / nsec_ave);
+}
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t helper_options;
+ odp_instance_t instance;
+ odp_init_t init;
+ odp_shm_t shm;
+ test_global_t *global;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: Reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* List features not to be used */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.schedule = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ init.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("Error: Global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_WORKER)) {
+ ODPH_ERR("Error: Local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ shm = odp_shm_reserve("stash_perf_global", sizeof(test_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Error: Shared mem reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ global = odp_shm_addr(shm);
+ if (global == NULL) {
+ ODPH_ERR("Error: Shared mem alloc failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(global, 0, sizeof(test_global_t));
+
+ if (parse_options(argc, argv, &global->options))
+ exit(EXIT_FAILURE);
+
+ odp_sys_info_print();
+
+ global->instance = instance;
+
+ if (create_stashes(global)) {
+ ODPH_ERR("Error: Create stashes failed.\n");
+ goto destroy;
+ }
+
+ if (start_workers(global)) {
+ ODPH_ERR("Error: Test start failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Wait workers to exit */
+ odph_thread_join(global->thread_tbl, global->options.num_cpu);
+
+ print_stat(global);
+
+destroy:
+ if (destroy_stashes(global)) {
+ ODPH_ERR("Error: Destroy stashes failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Error: Shared mem free failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Error: term local failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Error: term global failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
diff --git a/test/performance/odp_stress.c b/test/performance/odp_stress.c
new file mode 100644
index 000000000..3ec01df33
--- /dev/null
+++ b/test/performance/odp_stress.c
@@ -0,0 +1,876 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_stress.c
+ *
+ * Test application that can be used to stress CPU, memory, and HW accelerators.
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+typedef struct test_options_t {
+ uint32_t num_cpu;
+ uint64_t period_ns;
+ uint64_t rounds;
+ uint64_t mem_size;
+ int mode;
+ int group_mode;
+
+} test_options_t;
+
+typedef struct test_stat_t {
+ uint64_t rounds;
+ uint64_t tot_nsec;
+ uint64_t work_nsec;
+
+} test_stat_t;
+
+typedef struct test_stat_sum_t {
+ uint64_t rounds;
+ uint64_t tot_nsec;
+ uint64_t work_nsec;
+
+} test_stat_sum_t;
+
+typedef struct thread_arg_t {
+ void *global;
+ int worker_idx;
+
+} thread_arg_t;
+
+typedef struct test_global_t {
+ test_options_t test_options;
+ odp_atomic_u32_t exit_test;
+ odp_barrier_t barrier;
+ odp_cpumask_t cpumask;
+ odp_timer_pool_t timer_pool;
+ odp_pool_t tmo_pool;
+ uint64_t period_ticks;
+ uint8_t *worker_mem;
+ odp_timer_t timer[ODP_THREAD_COUNT_MAX];
+ odp_queue_t tmo_queue[ODP_THREAD_COUNT_MAX];
+ odp_schedule_group_t group[ODP_THREAD_COUNT_MAX];
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ test_stat_t stat[ODP_THREAD_COUNT_MAX];
+ thread_arg_t thread_arg[ODP_THREAD_COUNT_MAX];
+ test_stat_sum_t stat_sum;
+ odp_atomic_u64_t tot_rounds;
+
+} test_global_t;
+
+test_global_t *test_global;
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Stress test options:\n"
+ "\n"
+ " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs. Default: 1\n"
+ " -p, --period_ns Timeout period in nsec. Default: 100 ms\n"
+ " -r, --rounds Number of timeout rounds. Default: 2\n"
+ " -m, --mode Select test mode. Default: 1\n"
+ " 0: No stress, just wait for timeouts\n"
+ " 1: Memcpy\n"
+ " -s, --mem_size Memory size per worker in bytes. Default: 2048\n"
+ " -g, --group_mode Select schedule group mode: Default: 1\n"
+ " 0: Use GROUP_ALL group. Scheduler load balances timeout events.\n"
+ " 1: Create a group per CPU. Dedicated timeout event per CPU.\n"
+ " -h, --help This help\n"
+ "\n");
+}
+
+static int parse_options(int argc, char *argv[], test_options_t *test_options)
+{
+ int opt;
+ int long_index;
+ int ret = 0;
+
+ static const struct option longopts[] = {
+ {"num_cpu", required_argument, NULL, 'c'},
+ {"period_ns", required_argument, NULL, 'p'},
+ {"rounds", required_argument, NULL, 'r'},
+ {"mode", required_argument, NULL, 'm'},
+ {"mem_size", required_argument, NULL, 's'},
+ {"group_mode", required_argument, NULL, 'g'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "+c:p:r:m:s:g:h";
+
+ test_options->num_cpu = 1;
+ test_options->period_ns = 100 * ODP_TIME_MSEC_IN_NS;
+ test_options->rounds = 2;
+ test_options->mode = 1;
+ test_options->mem_size = 2048;
+ test_options->group_mode = 1;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'c':
+ test_options->num_cpu = atoi(optarg);
+ break;
+ case 'p':
+ test_options->period_ns = atoll(optarg);
+ break;
+ case 'r':
+ test_options->rounds = atoll(optarg);
+ break;
+ case 'm':
+ test_options->mode = atoi(optarg);
+ break;
+ case 's':
+ test_options->mem_size = atoll(optarg);
+ break;
+ case 'g':
+ test_options->group_mode = atoi(optarg);
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (test_options->mode) {
+ if (test_options->mem_size < 2) {
+ ODPH_ERR("Too small memory size\n");
+ return -1;
+ }
+ }
+
+ return ret;
+}
+
+static int set_num_cpu(test_global_t *global)
+{
+ int ret;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+
+ /* One thread used for the main thread */
+ if (num_cpu < 0 || num_cpu > ODP_THREAD_COUNT_MAX - 1) {
+ ODPH_ERR("Bad number of workers. Maximum is %i.\n", ODP_THREAD_COUNT_MAX - 1);
+ return -1;
+ }
+
+ ret = odp_cpumask_default_worker(&global->cpumask, num_cpu);
+
+ if (num_cpu && ret != num_cpu) {
+ ODPH_ERR("Too many workers. Max supported %i\n.", ret);
+ return -1;
+ }
+
+ /* Zero: all available workers */
+ if (num_cpu == 0) {
+ num_cpu = ret;
+ test_options->num_cpu = num_cpu;
+ }
+
+ odp_barrier_init(&global->barrier, num_cpu + 1);
+
+ return 0;
+}
+
+static int join_group(test_global_t *global, int worker_idx, int thr)
+{
+ odp_thrmask_t thrmask;
+ odp_schedule_group_t group;
+
+ odp_thrmask_zero(&thrmask);
+ odp_thrmask_set(&thrmask, thr);
+ group = global->group[worker_idx];
+
+ if (odp_schedule_group_join(group, &thrmask)) {
+ ODPH_ERR("Thread %i failed to join group %i\n", thr, worker_idx);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int worker_thread(void *arg)
+{
+ int thr, timer_ret;
+ uint32_t exit_test;
+ odp_event_t ev;
+ odp_timeout_t tmo;
+ odp_timer_t timer;
+ uint64_t tot_nsec, work_sum, max_nsec;
+ odp_timer_start_t start_param;
+ odp_time_t t1, t2, max_time;
+ odp_time_t work_t1, work_t2;
+ uint8_t *src = NULL, *dst = NULL;
+ thread_arg_t *thread_arg = arg;
+ int worker_idx = thread_arg->worker_idx;
+ test_global_t *global = thread_arg->global;
+ test_options_t *test_options = &global->test_options;
+ int mode = test_options->mode;
+ int group_mode = test_options->group_mode;
+ uint64_t mem_size = test_options->mem_size;
+ uint64_t copy_size = mem_size / 2;
+ uint64_t rounds = 0;
+ int ret = 0;
+ uint32_t done = 0;
+ uint64_t wait = ODP_SCHED_WAIT;
+ uint64_t tot_rounds = test_options->rounds * test_options->num_cpu;
+
+ thr = odp_thread_id();
+ max_nsec = 2 * test_options->rounds * test_options->period_ns;
+ max_time = odp_time_local_from_ns(max_nsec);
+ printf("Thread %i starting on CPU %i\n", thr, odp_cpu_id());
+
+ if (group_mode == 0) {
+ /* Timeout events are load balanced. Using this
+ * period to poll exit status. */
+ wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
+ } else {
+ if (join_group(global, worker_idx, thr)) {
+ /* Join failed, exit after barrier */
+ wait = ODP_SCHED_NO_WAIT;
+ done = 1;
+ }
+ }
+
+ if (mode) {
+ src = global->worker_mem + worker_idx * mem_size;
+ dst = src + copy_size;
+ }
+
+ start_param.tick_type = ODP_TIMER_TICK_REL;
+ start_param.tick = global->period_ticks;
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ work_sum = 0;
+ t1 = odp_time_local();
+ max_time = odp_time_sum(t1, max_time);
+
+ while (1) {
+ ev = odp_schedule(NULL, wait);
+
+ exit_test = odp_atomic_load_u32(&global->exit_test);
+ exit_test += done;
+
+ if (ev == ODP_EVENT_INVALID) {
+ odp_time_t cur_time = odp_time_local();
+
+ if (odp_time_cmp(cur_time, max_time) > 0)
+ exit_test += 1;
+
+ if (exit_test) {
+ /* Exit loop without schedule context */
+ break;
+ }
+
+ continue;
+ }
+
+ rounds++;
+
+ if (group_mode) {
+ if (rounds >= test_options->rounds)
+ done = 1;
+ } else {
+ if (odp_atomic_fetch_inc_u64(&global->tot_rounds) >= (tot_rounds - 1))
+ done = 1;
+ }
+
+ if (done == 0) {
+ tmo = odp_timeout_from_event(ev);
+ timer = odp_timeout_timer(tmo);
+ start_param.tmo_ev = ev;
+
+ timer_ret = odp_timer_start(timer, &start_param);
+
+ if (timer_ret != ODP_TIMER_SUCCESS) {
+ ODPH_ERR("Timer start failed (%" PRIu64 ")\n", rounds);
+ done = 1;
+ }
+ }
+
+ /* Do work */
+ if (mode) {
+ work_t1 = odp_time_local();
+
+ memcpy(dst, src, copy_size);
+
+ work_t2 = odp_time_local();
+ work_sum += odp_time_diff_ns(work_t2, work_t1);
+ }
+
+ if (done) {
+ /* Stop timer and do not wait events */
+ wait = ODP_SCHED_NO_WAIT;
+ odp_event_free(ev);
+ }
+ }
+
+ t2 = odp_time_local();
+ tot_nsec = odp_time_diff_ns(t2, t1);
+
+ /* Update stats*/
+ global->stat[thr].rounds = rounds;
+ global->stat[thr].tot_nsec = tot_nsec;
+ global->stat[thr].work_nsec = work_sum;
+
+ return ret;
+}
+
+static int start_workers(test_global_t *global, odp_instance_t instance)
+{
+ odph_thread_common_param_t thr_common;
+ int i, ret;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ odph_thread_param_t thr_param[num_cpu];
+
+ memset(global->thread_tbl, 0, sizeof(global->thread_tbl));
+ odph_thread_common_param_init(&thr_common);
+
+ thr_common.instance = instance;
+ thr_common.cpumask = &global->cpumask;
+
+ for (i = 0; i < num_cpu; i++) {
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = worker_thread;
+ thr_param[i].arg = &global->thread_arg[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ }
+
+ ret = odph_thread_create(global->thread_tbl, &thr_common, thr_param, num_cpu);
+
+ if (ret != num_cpu) {
+ ODPH_ERR("Thread create failed %i\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int create_timers(test_global_t *global)
+{
+ odp_timer_capability_t timer_capa;
+ odp_timer_res_capability_t timer_res_capa;
+ odp_timer_pool_param_t timer_pool_param;
+ odp_timer_pool_t tp;
+ odp_pool_param_t pool_param;
+ odp_pool_t pool;
+ double duration;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+ uint64_t period_ns = test_options->period_ns;
+ uint64_t res_ns = period_ns / 1000;
+
+ if (odp_timer_capability(ODP_CLOCK_DEFAULT, &timer_capa)) {
+ ODPH_ERR("Timer capability failed\n");
+ return -1;
+ }
+
+ if (timer_capa.queue_type_sched == 0) {
+ ODPH_ERR("Timer does not support sched queues\n");
+ return -1;
+ }
+
+ memset(&timer_res_capa, 0, sizeof(odp_timer_res_capability_t));
+ timer_res_capa.max_tmo = 2 * period_ns;
+ if (odp_timer_res_capability(ODP_CLOCK_DEFAULT, &timer_res_capa)) {
+ ODPH_ERR("Timer resolution capability failed. Too long period.\n");
+ return -1;
+ }
+
+ if (res_ns < timer_res_capa.res_ns)
+ res_ns = timer_res_capa.res_ns;
+
+ duration = test_options->rounds * (double)period_ns / ODP_TIME_SEC_IN_NS;
+
+ printf(" num timers %u\n", num_cpu);
+ printf(" resolution %" PRIu64 " nsec\n", res_ns);
+ printf(" period %" PRIu64 " nsec\n", period_ns);
+ printf(" test duration %.2f sec\n", duration);
+ if (test_options->group_mode == 0)
+ printf(" force stop after %.2f sec\n", 2 * duration);
+ printf("\n");
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_TIMEOUT;
+ pool_param.tmo.num = num_cpu;
+
+ pool = odp_pool_create("Timeout pool", &pool_param);
+ global->tmo_pool = pool;
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Pool create failed\n");
+ return -1;
+ }
+
+ odp_timer_pool_param_init(&timer_pool_param);
+ timer_pool_param.res_ns = res_ns;
+ timer_pool_param.min_tmo = period_ns / 2;
+ timer_pool_param.max_tmo = 2 * period_ns;
+ timer_pool_param.num_timers = 2 * num_cpu; /* extra for stop events */
+ timer_pool_param.clk_src = ODP_CLOCK_DEFAULT;
+
+ tp = odp_timer_pool_create("Stress timers", &timer_pool_param);
+ global->timer_pool = tp;
+ if (tp == ODP_TIMER_POOL_INVALID) {
+ ODPH_ERR("Timer pool create failed\n");
+ return -1;
+ }
+
+ if (odp_timer_pool_start_multi(&tp, 1) != 1) {
+ ODPH_ERR("Timer pool start failed\n");
+ return -1;
+ }
+
+ global->period_ticks = odp_timer_ns_to_tick(tp, period_ns);
+
+ return 0;
+}
+
+static int create_queues(test_global_t *global)
+{
+ odp_schedule_capability_t sched_capa;
+ odp_thrmask_t thrmask;
+ odp_queue_param_t queue_param;
+ uint32_t i;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+
+ if (odp_schedule_capability(&sched_capa)) {
+ ODPH_ERR("Schedule capability failed\n");
+ return -1;
+ }
+
+ if (test_options->group_mode) {
+ if ((sched_capa.max_groups - 1) < num_cpu) {
+ ODPH_ERR("Too many workers. Not enough schedule groups.\n");
+ return -1;
+ }
+
+ odp_thrmask_zero(&thrmask);
+
+ /* A group per worker thread */
+ for (i = 0; i < num_cpu; i++) {
+ global->group[i] = odp_schedule_group_create(NULL, &thrmask);
+
+ if (global->group[i] == ODP_SCHED_GROUP_INVALID) {
+ ODPH_ERR("Schedule group create failed (%u)\n", i);
+ return -1;
+ }
+ }
+ }
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ for (i = 0; i < num_cpu; i++) {
+ if (test_options->group_mode)
+ queue_param.sched.group = global->group[i];
+
+ global->tmo_queue[i] = odp_queue_create(NULL, &queue_param);
+
+ if (global->tmo_queue[i] == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Timeout dest queue create failed (%u)\n", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int start_timers(test_global_t *global)
+{
+ odp_timer_start_t start_param;
+ uint32_t i;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+ odp_timeout_t tmo[num_cpu];
+ odp_timer_t timer[num_cpu];
+
+ for (i = 0; i < num_cpu; i++) {
+ tmo[i] = odp_timeout_alloc(global->tmo_pool);
+
+ if (tmo[i] == ODP_TIMEOUT_INVALID) {
+ ODPH_ERR("Timeout alloc failed (%u)\n", i);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < num_cpu; i++) {
+ timer[i] = odp_timer_alloc(global->timer_pool, global->tmo_queue[i], NULL);
+
+ if (timer[i] == ODP_TIMER_INVALID) {
+ ODPH_ERR("Timer alloc failed (%u)\n", i);
+ return -1;
+ }
+
+ global->timer[i] = timer[i];
+ }
+
+ start_param.tick_type = ODP_TIMER_TICK_REL;
+ start_param.tick = global->period_ticks;
+
+ for (i = 0; i < num_cpu; i++) {
+ start_param.tmo_ev = odp_timeout_to_event(tmo[i]);
+
+ if (odp_timer_start(timer[i], &start_param) != ODP_TIMER_SUCCESS) {
+ ODPH_ERR("Timer start failed (%u)\n", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void destroy_timers(test_global_t *global)
+{
+ uint32_t i;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+
+ for (i = 0; i < num_cpu; i++) {
+ odp_timer_t timer = global->timer[i];
+
+ if (timer == ODP_TIMER_INVALID)
+ continue;
+
+ if (odp_timer_free(timer))
+ ODPH_ERR("Timer free failed (%u)\n", i);
+ }
+
+ if (global->timer_pool != ODP_TIMER_POOL_INVALID)
+ odp_timer_pool_destroy(global->timer_pool);
+
+ for (i = 0; i < num_cpu; i++) {
+ odp_queue_t queue = global->tmo_queue[i];
+
+ if (queue == ODP_QUEUE_INVALID)
+ continue;
+
+ if (odp_queue_destroy(queue))
+ ODPH_ERR("Queue destroy failed (%u)\n", i);
+ }
+
+ if (test_options->group_mode) {
+ for (i = 0; i < num_cpu; i++) {
+ odp_schedule_group_t group = global->group[i];
+
+ if (group == ODP_SCHED_GROUP_INVALID)
+ continue;
+
+ if (odp_schedule_group_destroy(group))
+ ODPH_ERR("Schedule group destroy failed (%u)\n", i);
+ }
+ }
+
+ if (global->tmo_pool != ODP_POOL_INVALID)
+ odp_pool_destroy(global->tmo_pool);
+}
+
+static void sig_handler(int signo)
+{
+ (void)signo;
+
+ if (test_global == NULL)
+ return;
+
+ odp_atomic_add_u32(&test_global->exit_test, 1);
+}
+
+static void stop_workers(test_global_t *global)
+{
+ uint32_t i;
+ odp_timeout_t tmo;
+ odp_event_t ev;
+ odp_queue_t queue;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+
+ odp_atomic_add_u32(&test_global->exit_test, 1);
+
+ for (i = 0; i < num_cpu; i++) {
+ queue = global->tmo_queue[i];
+ if (queue == ODP_QUEUE_INVALID)
+ continue;
+
+ tmo = odp_timeout_alloc(global->tmo_pool);
+
+ if (tmo == ODP_TIMEOUT_INVALID)
+ continue;
+
+ ev = odp_timeout_to_event(tmo);
+ if (odp_queue_enq(queue, ev)) {
+ ODPH_ERR("Enqueue failed %u\n", i);
+ odp_event_free(ev);
+ }
+ }
+}
+
+static void sum_stat(test_global_t *global)
+{
+ uint32_t i;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+ test_stat_sum_t *sum = &global->stat_sum;
+
+ memset(sum, 0, sizeof(test_stat_sum_t));
+
+ for (i = 1; i < num_cpu + 1 ; i++) {
+ sum->rounds += global->stat[i].rounds;
+ sum->tot_nsec += global->stat[i].tot_nsec;
+ sum->work_nsec += global->stat[i].work_nsec;
+ }
+}
+
+static void print_stat(test_global_t *global)
+{
+ uint32_t i;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+ int mode = test_options->mode;
+ test_stat_sum_t *sum = &global->stat_sum;
+ double sec_ave, work_ave, perc;
+ double round_ave = 0.0;
+ double copy_ave = 0.0;
+ double copy_tot = 0.0;
+ double cpu_load = 0.0;
+ const double mega = 1000000.0;
+ const double giga = 1000000000.0;
+ uint32_t num = 0;
+
+ if (num_cpu == 0)
+ return;
+
+ sec_ave = (sum->tot_nsec / giga) / num_cpu;
+ work_ave = (sum->work_nsec / giga) / num_cpu;
+
+ printf("\n");
+ printf("CPU load from work (percent) per thread:\n");
+ printf("----------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 1; i < num_cpu + 1; i++) {
+ if (global->stat[i].tot_nsec == 0)
+ continue;
+
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ perc = 100.0 * ((double)global->stat[i].work_nsec) / global->stat[i].tot_nsec;
+
+ printf("%6.2f ", perc);
+ num++;
+ }
+
+ if (sec_ave != 0.0) {
+ round_ave = (double)sum->rounds / num_cpu;
+ cpu_load = 100.0 * (work_ave / sec_ave);
+
+ if (mode) {
+ uint64_t copy_bytes = sum->rounds * test_options->mem_size / 2;
+
+ copy_ave = copy_bytes / (sum->work_nsec / giga);
+ copy_tot = copy_ave * num_cpu;
+ }
+ }
+
+ printf("\n\n");
+ printf("TOTAL (%i workers)\n", num_cpu);
+ printf(" ave time: %.2f sec\n", sec_ave);
+ printf(" ave work: %.2f sec\n", work_ave);
+ printf(" ave CPU load: %.2f\n", cpu_load);
+ printf(" ave rounds per sec: %.2f\n", round_ave / sec_ave);
+ printf(" ave copy speed: %.2f MB/sec\n", copy_ave / mega);
+ printf(" total copy speed: %.2f MB/sec\n", copy_tot / mega);
+ printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t helper_options;
+ odp_instance_t instance;
+ odp_init_t init;
+ odp_shm_t shm, shm_global;
+ odp_schedule_config_t sched_config;
+ test_global_t *global;
+ test_options_t *test_options;
+ int i, mode;
+ uint32_t num_cpu;
+ uint64_t mem_size;
+ odp_shm_t shm_work = ODP_SHM_INVALID;
+
+ signal(SIGINT, sig_handler);
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init);
+ init.mem_model = helper_options.mem_model;
+
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("Global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ shm = odp_shm_reserve("Stress global", sizeof(test_global_t), ODP_CACHE_LINE_SIZE, 0);
+ shm_global = shm;
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("SHM reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ global = odp_shm_addr(shm);
+ if (global == NULL) {
+ ODPH_ERR("SHM addr failed\n");
+ exit(EXIT_FAILURE);
+ }
+ test_global = global;
+
+ memset(global, 0, sizeof(test_global_t));
+ odp_atomic_init_u32(&global->exit_test, 0);
+ odp_atomic_init_u64(&global->tot_rounds, 0);
+
+ global->timer_pool = ODP_TIMER_POOL_INVALID;
+ global->tmo_pool = ODP_POOL_INVALID;
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ global->timer[i] = ODP_TIMER_INVALID;
+ global->tmo_queue[i] = ODP_QUEUE_INVALID;
+ global->group[i] = ODP_SCHED_GROUP_INVALID;
+
+ global->thread_arg[i].global = global;
+ global->thread_arg[i].worker_idx = i;
+ }
+
+ if (parse_options(argc, argv, &global->test_options))
+ exit(EXIT_FAILURE);
+
+ test_options = &global->test_options;
+ mode = test_options->mode;
+
+ odp_sys_info_print();
+
+ odp_schedule_config_init(&sched_config);
+ sched_config.sched_group.all = 1;
+ sched_config.sched_group.control = 0;
+ sched_config.sched_group.worker = 0;
+
+ odp_schedule_config(&sched_config);
+
+ if (set_num_cpu(global))
+ exit(EXIT_FAILURE);
+
+ num_cpu = test_options->num_cpu;
+
+ /* Memory for workers */
+ if (mode) {
+ mem_size = test_options->mem_size * num_cpu;
+
+ shm = odp_shm_reserve("Test memory", mem_size, ODP_CACHE_LINE_SIZE, 0);
+ shm_work = shm;
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("SHM reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ global->worker_mem = odp_shm_addr(shm);
+ if (global->worker_mem == NULL) {
+ ODPH_ERR("SHM addr failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(global->worker_mem, 0, mem_size);
+ }
+
+ printf("\n");
+ printf("Test parameters\n");
+ printf(" num workers %u\n", num_cpu);
+ printf(" mode %i\n", mode);
+ printf(" group mode %i\n", test_options->group_mode);
+ printf(" mem size per worker %" PRIu64 " bytes\n", test_options->mem_size);
+
+ if (create_timers(global))
+ exit(EXIT_FAILURE);
+
+ if (create_queues(global))
+ exit(EXIT_FAILURE);
+
+ /* Start worker threads */
+ start_workers(global, instance);
+
+ /* Wait until all workers are ready */
+ odp_barrier_wait(&global->barrier);
+
+ if (start_timers(global)) {
+ /* Stop all workers, if some timer did not start */
+ ODPH_ERR("Timers did not start. Stopping workers.\n");
+ stop_workers(global);
+ }
+
+ /* Wait workers to exit */
+ odph_thread_join(global->thread_tbl, num_cpu);
+
+ sum_stat(global);
+
+ print_stat(global);
+
+ destroy_timers(global);
+
+ if (mode) {
+ if (odp_shm_free(shm_work)) {
+ ODPH_ERR("SHM free failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (odp_shm_free(shm_global)) {
+ ODPH_ERR("SHM free failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Term local failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Term global failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
diff --git a/test/performance/odp_timer_perf.c b/test/performance/odp_timer_perf.c
new file mode 100644
index 000000000..918267a1b
--- /dev/null
+++ b/test/performance/odp_timer_perf.c
@@ -0,0 +1,1402 @@
+/* Copyright (c) 2019-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @example odp_timer_perf.c
+ *
+ * Performance test application for timer APIs
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define MODE_SCHED_OVERH 0
+#define MODE_START_CANCEL 1
+#define MODE_START_EXPIRE 2
+#define MAX_TIMER_POOLS 32
+#define MAX_TIMERS 10000
+#define START_NS (100 * ODP_TIME_MSEC_IN_NS)
+
+typedef struct test_options_t {
+ uint32_t num_cpu;
+ uint32_t num_tp;
+ uint32_t num_timer;
+ uint64_t res_ns;
+ uint64_t period_ns;
+ int shared;
+ int mode;
+ uint64_t test_rounds;
+
+} test_options_t;
+
+typedef struct time_stat_t {
+ uint64_t num;
+ uint64_t sum_ns;
+ uint64_t max_ns;
+
+} time_stat_t;
+
+typedef struct test_stat_t {
+ uint64_t rounds;
+ uint64_t events;
+ uint64_t nsec;
+ uint64_t cycles_0;
+ uint64_t cycles_1;
+
+ uint64_t cancels;
+ uint64_t sets;
+
+ time_stat_t before;
+ time_stat_t after;
+
+} test_stat_t;
+
+typedef struct test_stat_sum_t {
+ uint64_t rounds;
+ uint64_t events;
+ uint64_t nsec;
+ uint64_t cycles_0;
+ uint64_t cycles_1;
+
+ uint64_t cancels;
+ uint64_t sets;
+
+ time_stat_t before;
+ time_stat_t after;
+
+ double time_ave;
+ uint32_t num;
+
+} test_stat_sum_t;
+
+typedef struct thread_arg_t {
+ void *global;
+ int worker_idx;
+
+} thread_arg_t;
+
+typedef struct timer_ctx_t {
+ uint64_t target_ns;
+ uint64_t target_tick;
+ uint32_t tp_idx;
+ uint32_t timer_idx;
+ int last;
+
+} timer_ctx_t;
+
+typedef struct timer_pool_t {
+ odp_timer_pool_t tp;
+ uint64_t start_tick;
+ uint64_t period_tick;
+
+} timer_pool_t;
+
+typedef struct test_global_t {
+ test_options_t test_options;
+ odp_atomic_u32_t exit_test;
+ odp_atomic_u32_t timers_started;
+ odp_barrier_t barrier;
+ odp_cpumask_t cpumask;
+ timer_pool_t timer_pool[MAX_TIMER_POOLS];
+ odp_pool_t pool[MAX_TIMER_POOLS];
+ odp_queue_t queue[MAX_TIMER_POOLS];
+ odp_timer_t timer[MAX_TIMER_POOLS][MAX_TIMERS];
+ timer_ctx_t timer_ctx[MAX_TIMER_POOLS][MAX_TIMERS];
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ test_stat_t stat[ODP_THREAD_COUNT_MAX];
+ thread_arg_t thread_arg[ODP_THREAD_COUNT_MAX];
+ test_stat_sum_t stat_sum;
+
+} test_global_t;
+
+test_global_t *test_global;
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Timer performance test\n"
+ "\n"
+ "Usage: odp_timer_perf [options]\n"
+ "\n"
+ " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs. Default: 1\n"
+ " -n, --num_tp Number of timer pools. Default: 1\n"
+ " -t, --num_timer Number of timers per timer pool. Default: 10\n"
+ " -r, --res_ns Resolution in nsec. Default: 10000000\n"
+ " -p, --period_ns Timeout period in nsec. Default: 100000000\n"
+ " -s, --shared Shared vs private timer pool. Currently, private pools can be\n"
+ " tested only with single CPU. Default: 1\n"
+ " 0: Private timer pools\n"
+ " 1: Shared timer pools\n"
+ " -m, --mode Select test mode. Default: 0\n"
+ " 0: Measure odp_schedule() overhead when using timers\n"
+ " 1: Measure timer set + cancel performance\n"
+ " 2: Measure schedule and timer start overhead while continuously\n"
+ " restarting expiring timers\n"
+ " -R, --rounds Number of test rounds in timer set + cancel test.\n"
+ " Default: 100000\n"
+ " -h, --help This help\n"
+ "\n");
+}
+
+static int parse_options(int argc, char *argv[], test_options_t *test_options)
+{
+ int opt;
+ int long_index;
+ int ret = 0;
+
+ static const struct option longopts[] = {
+ {"num_cpu", required_argument, NULL, 'c'},
+ {"num_tp ", required_argument, NULL, 'n'},
+ {"num_timer", required_argument, NULL, 't'},
+ {"res_ns", required_argument, NULL, 'r'},
+ {"period_ns", required_argument, NULL, 'p'},
+ {"shared", required_argument, NULL, 's'},
+ {"mode", required_argument, NULL, 'm'},
+ {"rounds", required_argument, NULL, 'R'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "+c:n:t:r:p:s:m:R:h";
+
+ test_options->num_cpu = 1;
+ test_options->num_tp = 1;
+ test_options->num_timer = 10;
+ test_options->res_ns = 10 * ODP_TIME_MSEC_IN_NS;
+ test_options->period_ns = 100 * ODP_TIME_MSEC_IN_NS;
+ test_options->shared = 1;
+ test_options->mode = 0;
+ test_options->test_rounds = 100000;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'c':
+ test_options->num_cpu = atoi(optarg);
+ break;
+ case 'n':
+ test_options->num_tp = atoi(optarg);
+ break;
+ case 't':
+ test_options->num_timer = atoi(optarg);
+ break;
+ case 'r':
+ test_options->res_ns = atoll(optarg);
+ break;
+ case 'p':
+ test_options->period_ns = atoll(optarg);
+ break;
+ case 's':
+ test_options->shared = atoi(optarg);
+ break;
+ case 'm':
+ test_options->mode = atoi(optarg);
+ break;
+ case 'R':
+ test_options->test_rounds = atoll(optarg);
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (test_options->num_timer > MAX_TIMERS) {
+ ODPH_ERR("Too many timers. Max %u\n", MAX_TIMERS);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int set_num_cpu(test_global_t *global)
+{
+ int ret;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ int shared = test_options->shared;
+
+ /* One thread used for the main thread */
+ if (num_cpu > ODP_THREAD_COUNT_MAX - 1) {
+ ODPH_ERR("Too many workers. Maximum is %i.\n", ODP_THREAD_COUNT_MAX - 1);
+ return -1;
+ }
+
+ ret = odp_cpumask_default_worker(&global->cpumask, num_cpu);
+
+ if (num_cpu && ret != num_cpu) {
+ ODPH_ERR("Too many workers. Max supported %i\n.", ret);
+ return -1;
+ }
+
+ if (shared == 0 && num_cpu != 1) {
+ ODPH_ERR("Private pool test supports only single CPU\n.");
+ return -1;
+ }
+
+ /* Zero: all available workers */
+ if (num_cpu == 0) {
+ num_cpu = ret;
+ test_options->num_cpu = num_cpu;
+ }
+
+ if (shared) /* Main thread + all workers */
+ odp_barrier_init(&global->barrier, num_cpu + 1);
+ else /* Only the main thread */
+ odp_barrier_init(&global->barrier, 1);
+
+ return 0;
+}
+
+static int create_timer_pools(test_global_t *global)
+{
+ odp_timer_capability_t timer_capa;
+ odp_timer_res_capability_t timer_res_capa;
+ odp_timer_pool_param_t timer_pool_param;
+ odp_timer_pool_t tp;
+ odp_queue_param_t queue_param;
+ odp_queue_t queue;
+ odp_pool_param_t pool_param;
+ odp_pool_t pool;
+ uint64_t max_tmo_ns, min_tmo_ns;
+ uint32_t i, j;
+ uint32_t max_timers;
+ int priv;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+ uint32_t num_tp = test_options->num_tp;
+ uint32_t num_timer = test_options->num_timer;
+ uint64_t res_ns = test_options->res_ns;
+ uint64_t period_ns = test_options->period_ns;
+ int mode = test_options->mode;
+ char tp_name[] = "timer_pool_00";
+
+ max_tmo_ns = START_NS + (num_timer * period_ns);
+ min_tmo_ns = START_NS / 2;
+
+ if (test_options->mode == MODE_START_EXPIRE) {
+ /*
+ * Timers are set to 1-2 periods from current time. Add an
+ * arbitrary margin of one period, resulting in maximum of
+ * three periods.
+ */
+ max_tmo_ns = period_ns * 3;
+ min_tmo_ns = test_options->res_ns / 2;
+ }
+
+ priv = 0;
+ if (test_options->shared == 0)
+ priv = 1;
+
+ printf("\nTimer performance test\n");
+ printf(" mode %i\n", mode);
+ printf(" num cpu %u\n", num_cpu);
+ printf(" private pool %i\n", priv);
+ printf(" num timer pool %u\n", num_tp);
+ printf(" num timer %u\n", num_timer);
+ printf(" resolution %" PRIu64 " nsec\n", res_ns);
+ printf(" period %" PRIu64 " nsec\n", period_ns);
+ printf(" max timeout %" PRIu64 " nsec\n", max_tmo_ns);
+ printf(" min timeout %" PRIu64 " nsec\n", min_tmo_ns);
+ printf(" first timer at %.2f sec\n", (double)START_NS / ODP_TIME_SEC_IN_NS);
+ if (mode == MODE_SCHED_OVERH)
+ printf(" test duration %.2f sec\n", (double)max_tmo_ns / ODP_TIME_SEC_IN_NS);
+ else
+ printf(" test rounds %" PRIu64 "\n", test_options->test_rounds);
+
+ for (i = 0; i < MAX_TIMER_POOLS; i++) {
+ global->timer_pool[i].tp = ODP_TIMER_POOL_INVALID;
+ global->pool[i] = ODP_POOL_INVALID;
+ global->queue[i] = ODP_QUEUE_INVALID;
+
+ for (j = 0; j < MAX_TIMERS; j++)
+ global->timer[i][j] = ODP_TIMER_INVALID;
+ }
+
+ if (odp_timer_capability(ODP_CLOCK_DEFAULT, &timer_capa)) {
+ ODPH_ERR("Timer capability failed\n");
+ return -1;
+ }
+
+ memset(&timer_res_capa, 0, sizeof(odp_timer_res_capability_t));
+ timer_res_capa.res_ns = res_ns;
+ if (odp_timer_res_capability(ODP_CLOCK_DEFAULT, &timer_res_capa)) {
+ ODPH_ERR("Timer resolution capability failed\n");
+ return -1;
+ }
+
+ if (res_ns < timer_capa.max_res.res_ns) {
+ ODPH_ERR("Too high resolution\n");
+ return -1;
+ }
+
+ if (min_tmo_ns < timer_res_capa.min_tmo) {
+ ODPH_ERR("Too short min timeout\n");
+ return -1;
+ }
+
+ if (max_tmo_ns > timer_res_capa.max_tmo) {
+ ODPH_ERR("Too long max timeout\n");
+ return -1;
+ }
+
+ max_timers = timer_capa.max_timers;
+ if (max_timers && num_timer > max_timers) {
+ ODPH_ERR("Too many timers (max %u)\n", max_timers);
+ return -1;
+ }
+
+ if (num_tp > timer_capa.max_pools) {
+ ODPH_ERR("Too many timer pools (max %u)\n", timer_capa.max_pools);
+ return -1;
+ }
+
+ odp_timer_pool_param_init(&timer_pool_param);
+ timer_pool_param.res_ns = res_ns;
+ timer_pool_param.min_tmo = min_tmo_ns;
+ timer_pool_param.max_tmo = max_tmo_ns;
+ timer_pool_param.num_timers = num_timer;
+ timer_pool_param.priv = priv;
+ timer_pool_param.clk_src = ODP_CLOCK_DEFAULT;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_TIMEOUT;
+ pool_param.tmo.num = num_timer;
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.prio = odp_schedule_default_prio();
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ for (i = 0; i < num_tp; i++) {
+ if (num_tp < 100) {
+ tp_name[11] = '0' + i / 10;
+ tp_name[12] = '0' + i % 10;
+ }
+
+ tp = odp_timer_pool_create(tp_name, &timer_pool_param);
+ global->timer_pool[i].tp = tp;
+ if (tp == ODP_TIMER_POOL_INVALID) {
+ ODPH_ERR("Timer pool create failed (%u)\n", i);
+ return -1;
+ }
+
+ if (odp_timer_pool_start_multi(&tp, 1) != 1) {
+ ODPH_ERR("Timer pool start failed (%u)\n", i);
+ return -1;
+ }
+
+ pool = odp_pool_create(tp_name, &pool_param);
+ global->pool[i] = pool;
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Pool create failed (%u)\n", i);
+ return -1;
+ }
+
+ queue = odp_queue_create(tp_name, &queue_param);
+ global->queue[i] = queue;
+ if (queue == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Queue create failed (%u)\n", i);
+ return -1;
+ }
+
+ global->timer_pool[i].period_tick = odp_timer_ns_to_tick(tp,
+ test_options->period_ns);
+ global->timer_pool[i].start_tick = odp_timer_ns_to_tick(tp, START_NS);
+ }
+
+ printf(" start %" PRIu64 " tick\n", global->timer_pool[0].start_tick);
+ printf(" period %" PRIu64 " ticks\n", global->timer_pool[0].period_tick);
+ printf("\n");
+
+ return 0;
+}
+
+static int set_timers(test_global_t *global)
+{
+ odp_timer_pool_info_t timer_pool_info;
+ odp_timer_pool_t tp;
+ odp_timer_t timer;
+ odp_pool_t pool;
+ odp_queue_t queue;
+ odp_time_t time;
+ uint64_t tick_cur, nsec, time_ns;
+ uint64_t max_tmo_ns;
+ uint32_t i, j;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_tp = test_options->num_tp;
+ uint32_t num_timer = test_options->num_timer;
+ uint64_t period_ns = test_options->period_ns;
+
+ max_tmo_ns = START_NS + (num_timer * period_ns);
+
+ for (i = 0; i < num_tp; i++) {
+ tp = global->timer_pool[i].tp;
+ pool = global->pool[i];
+ queue = global->queue[i];
+
+ nsec = max_tmo_ns;
+ tick_cur = odp_timer_current_tick(tp);
+ time = odp_time_global();
+ time_ns = odp_time_to_ns(time);
+
+ for (j = 0; j < num_timer; j++) {
+ uint64_t tick_ns;
+ odp_timeout_t timeout;
+ odp_event_t ev;
+ int status;
+ timer_ctx_t *ctx = &global->timer_ctx[i][j];
+ odp_timer_start_t start_param;
+
+ /* Set timers backwards, the last timer is set first */
+ if (j == 0)
+ ctx->last = 1;
+
+ ctx->target_ns = time_ns + nsec;
+ ctx->tp_idx = i;
+ ctx->timer_idx = j;
+
+ timeout = odp_timeout_alloc(pool);
+ ev = odp_timeout_to_event(timeout);
+
+ timer = odp_timer_alloc(tp, queue, ctx);
+ global->timer[i][j] = timer;
+
+ tick_ns = odp_timer_ns_to_tick(tp, nsec);
+ nsec = nsec - period_ns;
+
+ start_param.tick_type = ODP_TIMER_TICK_ABS;
+ start_param.tick = tick_cur + tick_ns;
+ start_param.tmo_ev = ev;
+
+ if (test_options->mode == MODE_START_EXPIRE) {
+ uint64_t offset_ns = period_ns + j * period_ns / num_timer;
+
+ ctx->target_ns = time_ns + offset_ns;
+ ctx->target_tick = tick_cur + odp_timer_ns_to_tick(tp, offset_ns);
+ start_param.tick = ctx->target_tick;
+ }
+
+ status = odp_timer_start(timer, &start_param);
+ if (status != ODP_TIMER_SUCCESS) {
+ ODPH_ERR("Timer set %i/%i (ret %i)\n", i, j, status);
+ return -1;
+ }
+ }
+
+ if (odp_timer_pool_info(tp, &timer_pool_info)) {
+ ODPH_ERR("Timer pool info failed\n");
+ return -1;
+ }
+
+ printf("Timer pool info [%i]:\n", i);
+ printf(" cur_timers %u\n", timer_pool_info.cur_timers);
+ printf(" hwm_timers %u\n", timer_pool_info.hwm_timers);
+ printf("\n");
+ }
+
+ return 0;
+}
+
+static int destroy_timer_pool(test_global_t *global)
+{
+ odp_timer_pool_t tp;
+ odp_pool_t pool;
+ odp_queue_t queue;
+ odp_timer_t timer;
+ uint32_t i, j;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_timer = test_options->num_timer;
+ uint32_t num_tp = test_options->num_tp;
+
+ for (i = 0; i < num_tp; i++) {
+ for (j = 0; j < num_timer; j++) {
+ timer = global->timer[i][j];
+
+ if (timer == ODP_TIMER_INVALID)
+ break;
+
+ if (odp_timer_free(timer))
+ printf("Timer free failed: %i/%i\n", i, j);
+ }
+
+ queue = global->queue[i];
+ if (queue != ODP_QUEUE_INVALID)
+ odp_queue_destroy(queue);
+
+ pool = global->pool[i];
+ if (pool != ODP_POOL_INVALID)
+ odp_pool_destroy(pool);
+
+ tp = global->timer_pool[i].tp;
+ if (tp != ODP_TIMER_POOL_INVALID)
+ odp_timer_pool_destroy(tp);
+ }
+
+ return 0;
+}
+
+static int sched_mode_worker(void *arg)
+{
+ int thr;
+ uint32_t exit_test;
+ odp_event_t ev;
+ odp_timeout_t tmo;
+ uint64_t c2, diff, nsec, time_ns, target_ns;
+ odp_time_t t1, t2, time;
+ time_stat_t before, after;
+ timer_ctx_t *ctx;
+ thread_arg_t *thread_arg = arg;
+ test_global_t *global = thread_arg->global;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_tp = test_options->num_tp;
+ uint64_t cycles = 0;
+ uint64_t events = 0;
+ uint64_t rounds = 0;
+ uint64_t c1 = 0;
+ int meas = 1;
+ int ret = 0;
+
+ memset(&before, 0, sizeof(time_stat_t));
+ memset(&after, 0, sizeof(time_stat_t));
+
+ thr = odp_thread_id();
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ t1 = odp_time_local();
+
+ while (1) {
+ if (meas) {
+ c1 = odp_cpu_cycles();
+ meas = 0;
+ }
+
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ rounds++;
+
+ exit_test = odp_atomic_load_u32(&global->exit_test);
+ if (odp_likely(ev == ODP_EVENT_INVALID && exit_test < num_tp))
+ continue;
+
+ c2 = odp_cpu_cycles();
+ diff = odp_cpu_cycles_diff(c2, c1);
+ cycles += diff;
+
+ if (ev == ODP_EVENT_INVALID && exit_test >= num_tp)
+ break;
+
+ time = odp_time_global();
+ time_ns = odp_time_to_ns(time);
+ events++;
+ meas = 1;
+
+ tmo = odp_timeout_from_event(ev);
+ ctx = odp_timeout_user_ptr(tmo);
+ odp_timeout_free(tmo);
+
+ target_ns = ctx->target_ns;
+ if (time_ns < target_ns) {
+ diff = target_ns - time_ns;
+ before.num++;
+ before.sum_ns += diff;
+ if (diff > before.max_ns)
+ before.max_ns = diff;
+
+ ODPH_DBG("before %" PRIu64 "\n", diff);
+ } else {
+ diff = time_ns - target_ns;
+ after.num++;
+ after.sum_ns += diff;
+ if (diff > after.max_ns)
+ after.max_ns = diff;
+
+ ODPH_DBG("after %" PRIu64 "\n", time_ns - target_ns);
+ }
+
+ if (ctx->last)
+ odp_atomic_inc_u32(&global->exit_test);
+ }
+
+ t2 = odp_time_local();
+ nsec = odp_time_diff_ns(t2, t1);
+
+ /* Update stats*/
+ global->stat[thr].events = events;
+ global->stat[thr].cycles_0 = cycles;
+ global->stat[thr].rounds = rounds;
+ global->stat[thr].nsec = nsec;
+ global->stat[thr].before = before;
+ global->stat[thr].after = after;
+
+ return ret;
+}
+
+static int cancel_timers(test_global_t *global, uint32_t worker_idx)
+{
+ uint32_t i, j;
+ int r;
+ odp_timer_t timer;
+ odp_event_t ev;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_tp = test_options->num_tp;
+ uint32_t num_timer = test_options->num_timer;
+ uint32_t num_worker = test_options->num_cpu;
+ int ret = 0;
+
+ for (i = 0; i < num_tp; i++) {
+ for (j = worker_idx; j < num_timer; j += num_worker) {
+ timer = global->timer[i][j];
+ if (timer == ODP_TIMER_INVALID)
+ continue;
+
+ r = odp_timer_cancel(timer, &ev);
+
+ if (r == ODP_TIMER_SUCCESS) {
+ odp_event_free(ev);
+ } else if (r == ODP_TIMER_TOO_NEAR) {
+ ret = 1;
+ } else {
+ ret = -1;
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int set_cancel_mode_worker(void *arg)
+{
+ uint64_t tick, start_tick, period_tick, nsec;
+ uint64_t c1, c2;
+ int thr, status;
+ uint32_t i, j, worker_idx;
+ odp_event_t ev;
+ odp_time_t t1, t2;
+ odp_timer_t timer;
+ odp_timer_pool_t tp;
+ odp_timeout_t tmo;
+ odp_timer_start_t start_param;
+ thread_arg_t *thread_arg = arg;
+ test_global_t *global = thread_arg->global;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_tp = test_options->num_tp;
+ uint32_t num_timer = test_options->num_timer;
+ uint32_t num_worker = test_options->num_cpu;
+ int ret = 0;
+ int started = 0;
+ uint64_t test_rounds = test_options->test_rounds;
+ uint64_t num_tmo = 0;
+ uint64_t num_cancel = 0;
+ uint64_t num_set = 0;
+ uint64_t cancel_cycles = 0, start_cycles = 0;
+ odp_event_t ev_tbl[MAX_TIMERS];
+
+ thr = odp_thread_id();
+ worker_idx = thread_arg->worker_idx;
+ t1 = ODP_TIME_NULL;
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ while (1) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (odp_unlikely(ev != ODP_EVENT_INVALID)) {
+ /* Timeout, set timer again. When start_tick is large enough, this should
+ * not happen. */
+ timer_ctx_t *ctx;
+
+ tmo = odp_timeout_from_event(ev);
+ ctx = odp_timeout_user_ptr(tmo);
+ i = ctx->tp_idx;
+ j = ctx->timer_idx;
+ timer = global->timer[i][j];
+ start_tick = global->timer_pool[i].start_tick;
+ period_tick = global->timer_pool[i].period_tick;
+ tick = start_tick + j * period_tick;
+
+ start_param.tick_type = ODP_TIMER_TICK_REL;
+ start_param.tick = tick;
+ start_param.tmo_ev = ev;
+
+ status = odp_timer_start(timer, &start_param);
+ num_tmo++;
+ num_set++;
+
+ if (status != ODP_TIMER_SUCCESS) {
+ ODPH_ERR("Timer set (tmo) failed (ret %i)\n", status);
+ ret = -1;
+ break;
+ }
+
+ continue;
+ }
+
+ if (odp_unlikely(odp_atomic_load_u32(&global->exit_test)))
+ break;
+
+ if (odp_unlikely(started == 0)) {
+ /* Run schedule loop while waiting for timers to be created */
+ if (odp_atomic_load_acq_u32(&global->timers_started) == 0)
+ continue;
+
+ /* Start measurements */
+ started = 1;
+ t1 = odp_time_local();
+ }
+
+ /* Cancel and set timers again */
+ for (i = 0; i < num_tp; i++) {
+ tp = global->timer_pool[i].tp;
+ if (tp == ODP_TIMER_POOL_INVALID)
+ continue;
+
+ start_tick = global->timer_pool[i].start_tick;
+ period_tick = global->timer_pool[i].period_tick;
+
+ tick = odp_timer_current_tick(tp) + start_tick;
+ c1 = odp_cpu_cycles();
+
+ for (j = worker_idx; j < num_timer; j += num_worker) {
+ ev_tbl[j] = ODP_EVENT_INVALID;
+
+ timer = global->timer[i][j];
+ if (timer == ODP_TIMER_INVALID)
+ continue;
+
+ status = odp_timer_cancel(timer, &ev_tbl[j]);
+ num_cancel++;
+
+ if (odp_unlikely(status == ODP_TIMER_TOO_NEAR)) {
+ continue;
+ } else if (odp_unlikely(status != ODP_TIMER_SUCCESS)) {
+ ODPH_ERR("Timer (%u/%u) cancel failed (ret %i)\n", i, j,
+ status);
+ ret = -1;
+ break;
+ }
+ }
+
+ c2 = odp_cpu_cycles();
+ cancel_cycles += odp_cpu_cycles_diff(c2, c1);
+ c1 = c2;
+
+ for (j = worker_idx; j < num_timer; j += num_worker) {
+ if (ev_tbl[j] == ODP_EVENT_INVALID)
+ continue;
+
+ timer = global->timer[i][j];
+ if (timer == ODP_TIMER_INVALID)
+ continue;
+
+ start_param.tick_type = ODP_TIMER_TICK_ABS;
+ start_param.tick = tick + j * period_tick;
+ start_param.tmo_ev = ev_tbl[j];
+
+ status = odp_timer_start(timer, &start_param);
+ num_set++;
+
+ if (status != ODP_TIMER_SUCCESS) {
+ ODPH_ERR("Timer (%u/%u) set failed (ret %i)\n", i, j,
+ status);
+ ret = -1;
+ break;
+ }
+ }
+
+ c2 = odp_cpu_cycles();
+ start_cycles += odp_cpu_cycles_diff(c2, c1);
+ }
+
+ if (test_rounds) {
+ test_rounds--;
+ if (test_rounds == 0)
+ break;
+ }
+ }
+
+ t2 = odp_time_local();
+ nsec = odp_time_diff_ns(t2, t1);
+
+ /* Cancel all timers that belong to this thread */
+ if (cancel_timers(global, worker_idx))
+ ODPH_ERR("Timer cancel failed\n");
+
+ /* Update stats */
+ global->stat[thr].events = num_tmo;
+ global->stat[thr].rounds = test_options->test_rounds - test_rounds;
+ global->stat[thr].nsec = nsec;
+ global->stat[thr].cycles_0 = cancel_cycles;
+ global->stat[thr].cycles_1 = start_cycles;
+
+ global->stat[thr].cancels = num_cancel;
+ global->stat[thr].sets = num_set;
+
+ return ret;
+}
+
+static int set_expire_mode_worker(void *arg)
+{
+ int status, thr;
+ uint32_t i, j, exit_test;
+ odp_event_t ev;
+ odp_timeout_t tmo;
+ uint64_t c2, c3, c4, diff, nsec, time_ns, target_ns, period_tick, wait;
+ odp_timer_t timer;
+ odp_timer_start_t start_param;
+ odp_time_t t1, t2;
+ time_stat_t before, after;
+ timer_ctx_t *ctx;
+ thread_arg_t *thread_arg = arg;
+ test_global_t *global = thread_arg->global;
+ test_options_t *opt = &global->test_options;
+ uint32_t num_tp = opt->num_tp;
+ uint64_t sched_cycles = 0;
+ uint64_t start_cycles = 0;
+ uint64_t events = 0;
+ uint64_t rounds = 0;
+ uint64_t c1 = 0;
+ int meas = 1;
+ int ret = 0;
+
+ memset(&before, 0, sizeof(time_stat_t));
+ memset(&after, 0, sizeof(time_stat_t));
+
+ thr = odp_thread_id();
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ t1 = odp_time_local();
+
+ while (events < opt->test_rounds * opt->num_timer / opt->num_cpu) {
+ if (meas) {
+ c1 = odp_cpu_cycles();
+ meas = 0;
+ }
+
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ rounds++;
+
+ exit_test = odp_atomic_load_u32(&global->exit_test);
+ if (odp_likely(ev == ODP_EVENT_INVALID && exit_test < num_tp))
+ continue;
+
+ c2 = odp_cpu_cycles();
+ diff = odp_cpu_cycles_diff(c2, c1);
+ sched_cycles += diff;
+
+ if (ev == ODP_EVENT_INVALID && exit_test >= num_tp)
+ break;
+
+ events++;
+ meas = 1;
+ tmo = odp_timeout_from_event(ev);
+ ctx = odp_timeout_user_ptr(tmo);
+ i = ctx->tp_idx;
+ j = ctx->timer_idx;
+ timer = global->timer[i][j];
+ period_tick = global->timer_pool[i].period_tick;
+ time_ns = odp_time_global_ns();
+ target_ns = ctx->target_ns;
+
+ if (time_ns < target_ns) {
+ diff = target_ns - time_ns;
+ before.num++;
+ before.sum_ns += diff;
+ if (diff > before.max_ns)
+ before.max_ns = diff;
+
+ ODPH_DBG("before %" PRIu64 "\n", diff);
+ } else {
+ diff = time_ns - target_ns;
+ after.num++;
+ after.sum_ns += diff;
+ if (diff > after.max_ns)
+ after.max_ns = diff;
+
+ ODPH_DBG("after %" PRIu64 "\n", diff);
+ }
+
+ /* Start the timer again */
+ start_param.tick_type = ODP_TIMER_TICK_ABS;
+ ctx->target_ns += opt->period_ns;
+ ctx->target_tick += period_tick;
+ start_param.tick = ctx->target_tick;
+ start_param.tmo_ev = ev;
+ c3 = odp_cpu_cycles();
+
+ status = odp_timer_start(timer, &start_param);
+
+ c4 = odp_cpu_cycles();
+ diff = odp_cpu_cycles_diff(c4, c3);
+ start_cycles += diff;
+
+ if (status != ODP_TIMER_SUCCESS) {
+ ODPH_ERR("Timer set (tmo) failed (ret %i)\n", status);
+ ret = -1;
+ break;
+ }
+ }
+
+ t2 = odp_time_local();
+ nsec = odp_time_diff_ns(t2, t1);
+
+ /* Cancel all timers that belong to this thread */
+ status = cancel_timers(global, thread_arg->worker_idx);
+
+ wait = ODP_SCHED_NO_WAIT;
+ if (status > 0)
+ wait = odp_schedule_wait_time(opt->period_ns);
+
+ /* Wait and free remaining events */
+ while (1) {
+ ev = odp_schedule(NULL, wait);
+ if (ev == ODP_EVENT_INVALID)
+ break;
+ odp_event_free(ev);
+ }
+
+ /* Update stats*/
+ global->stat[thr].events = events;
+ global->stat[thr].cycles_0 = sched_cycles;
+ global->stat[thr].cycles_1 = start_cycles;
+ global->stat[thr].rounds = rounds;
+ global->stat[thr].nsec = nsec;
+ global->stat[thr].before = before;
+ global->stat[thr].after = after;
+
+ return ret;
+}
+
+static int start_workers(test_global_t *global, odp_instance_t instance)
+{
+ odph_thread_common_param_t thr_common;
+ int i, ret;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ odph_thread_param_t thr_param[num_cpu];
+
+ memset(global->thread_tbl, 0, sizeof(global->thread_tbl));
+ odph_thread_common_param_init(&thr_common);
+
+ thr_common.instance = instance;
+ thr_common.cpumask = &global->cpumask;
+
+ for (i = 0; i < num_cpu; i++) {
+ odph_thread_param_init(&thr_param[i]);
+
+ if (test_options->mode == MODE_SCHED_OVERH)
+ thr_param[i].start = sched_mode_worker;
+ else if (test_options->mode == MODE_START_CANCEL)
+ thr_param[i].start = set_cancel_mode_worker;
+ else
+ thr_param[i].start = set_expire_mode_worker;
+
+ thr_param[i].arg = &global->thread_arg[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ }
+
+ ret = odph_thread_create(global->thread_tbl, &thr_common, thr_param,
+ num_cpu);
+
+ if (ret != num_cpu) {
+ ODPH_ERR("Thread create failed %i\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void sum_stat(test_global_t *global)
+{
+ int i;
+ test_stat_sum_t *sum = &global->stat_sum;
+
+ memset(sum, 0, sizeof(test_stat_sum_t));
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->stat[i].rounds == 0)
+ continue;
+
+ sum->num++;
+ sum->events += global->stat[i].events;
+ sum->rounds += global->stat[i].rounds;
+ sum->cycles_0 += global->stat[i].cycles_0;
+ sum->cycles_1 += global->stat[i].cycles_1;
+ sum->nsec += global->stat[i].nsec;
+ sum->cancels += global->stat[i].cancels;
+ sum->sets += global->stat[i].sets;
+
+ sum->before.num += global->stat[i].before.num;
+ sum->before.sum_ns += global->stat[i].before.sum_ns;
+ sum->after.num += global->stat[i].after.num;
+ sum->after.sum_ns += global->stat[i].after.sum_ns;
+
+ if (global->stat[i].before.max_ns > sum->before.max_ns)
+ sum->before.max_ns = global->stat[i].before.max_ns;
+
+ if (global->stat[i].after.max_ns > sum->after.max_ns)
+ sum->after.max_ns = global->stat[i].after.max_ns;
+ }
+
+ if (sum->num)
+ sum->time_ave = ((double)sum->nsec / sum->num) / ODP_TIME_SEC_IN_NS;
+}
+
+static void print_stat_sched_mode(test_global_t *global)
+{
+ int i;
+ test_stat_sum_t *sum = &global->stat_sum;
+ double round_ave = 0.0;
+ double before_ave = 0.0;
+ double after_ave = 0.0;
+ int num = 0;
+
+ printf("\n");
+ printf("RESULTS - schedule() cycles per thread:\n");
+ printf("----------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->stat[i].rounds) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%6.1f ", (double)global->stat[i].cycles_0 / global->stat[i].rounds);
+ num++;
+ }
+ }
+
+ printf("\n\n");
+
+ if (sum->num)
+ round_ave = (double)sum->rounds / sum->num;
+
+ if (sum->before.num)
+ before_ave = (double)sum->before.sum_ns / sum->before.num;
+
+ if (sum->after.num)
+ after_ave = (double)sum->after.sum_ns / sum->after.num;
+
+ printf("TOTAL (%i workers)\n", sum->num);
+ printf(" events: %" PRIu64 "\n", sum->events);
+ printf(" ave time: %.2f sec\n", sum->time_ave);
+ printf(" ave rounds per sec: %.2fM\n", (round_ave / sum->time_ave) / 1000000.0);
+ printf(" num before: %" PRIu64 "\n", sum->before.num);
+ printf(" ave before: %.1f nsec\n", before_ave);
+ printf(" max before: %" PRIu64 " nsec\n", sum->before.max_ns);
+ printf(" num after: %" PRIu64 "\n", sum->after.num);
+ printf(" ave after: %.1f nsec\n", after_ave);
+ printf(" max after: %" PRIu64 " nsec\n", sum->after.max_ns);
+ printf("\n");
+}
+
+static void print_stat_set_cancel_mode(test_global_t *global)
+{
+ int i;
+ test_stat_sum_t *sum = &global->stat_sum;
+ double set_ave = 0.0;
+ int num = 0;
+
+ printf("\n");
+ printf("RESULTS\n");
+ printf("odp_timer_cancel() cycles per thread:\n");
+ printf("-------------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ const test_stat_t *si = &global->stat[i];
+
+ if (si->cancels) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%6.1f ", (double)si->cycles_0 / si->cancels);
+ num++;
+ }
+ }
+
+ printf("\n\n");
+
+ num = 0;
+ printf("odp_timer_start() cycles per thread:\n");
+ printf("-------------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ const test_stat_t *si = &global->stat[i];
+
+ if (si->sets) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%6.1f ", (double)si->cycles_1 / si->sets);
+ num++;
+ }
+ }
+
+ if (sum->num)
+ set_ave = (double)sum->sets / sum->num;
+
+ printf("\n\n");
+ printf("TOTAL (%i workers)\n", sum->num);
+ printf(" rounds: %" PRIu64 "\n", sum->rounds);
+ printf(" timeouts: %" PRIu64 "\n", sum->events);
+ printf(" timer cancels: %" PRIu64 "\n", sum->cancels);
+ printf(" cancels failed: %" PRIu64 "\n", sum->cancels - sum->sets);
+ printf(" timer sets: %" PRIu64 "\n", sum->sets);
+ printf(" ave time: %.2f sec\n", sum->time_ave);
+ printf(" cancel+set per cpu: %.2fM per sec\n", (set_ave / sum->time_ave) / 1000000.0);
+ printf("\n");
+}
+
+static void print_stat_expire_mode(test_global_t *global)
+{
+ int i;
+ test_stat_sum_t *sum = &global->stat_sum;
+ double round_ave = 0.0;
+ double before_ave = 0.0;
+ double after_ave = 0.0;
+ int num = 0;
+
+ printf("\n");
+ printf("RESULTS\n");
+ printf("odp_schedule() cycles per thread:\n");
+ printf("-------------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->stat[i].rounds) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%6.1f ", (double)global->stat[i].cycles_0 / global->stat[i].rounds);
+ num++;
+ }
+ }
+
+ printf("\n\n");
+
+ num = 0;
+ printf("odp_timer_start() cycles per thread:\n");
+ printf("-------------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->stat[i].events) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%6.1f ", (double)global->stat[i].cycles_1 / global->stat[i].events);
+ num++;
+ }
+ }
+
+ printf("\n\n");
+
+ if (sum->num)
+ round_ave = (double)sum->rounds / sum->num;
+
+ if (sum->before.num)
+ before_ave = (double)sum->before.sum_ns / sum->before.num;
+
+ if (sum->after.num)
+ after_ave = (double)sum->after.sum_ns / sum->after.num;
+
+ printf("TOTAL (%i workers)\n", sum->num);
+ printf(" events: %" PRIu64 "\n", sum->events);
+ printf(" ave time: %.2f sec\n", sum->time_ave);
+ printf(" ave rounds per sec: %.2fM\n", (round_ave / sum->time_ave) / 1000000.0);
+ printf(" num before: %" PRIu64 "\n", sum->before.num);
+ printf(" ave before: %.1f nsec\n", before_ave);
+ printf(" max before: %" PRIu64 " nsec\n", sum->before.max_ns);
+ printf(" num after: %" PRIu64 "\n", sum->after.num);
+ printf(" ave after: %.1f nsec\n", after_ave);
+ printf(" max after: %" PRIu64 " nsec\n", sum->after.max_ns);
+ printf("\n");
+}
+
+static void sig_handler(int signo)
+{
+ (void)signo;
+
+ if (test_global == NULL)
+ return;
+ odp_atomic_add_u32(&test_global->exit_test, MAX_TIMER_POOLS);
+}
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t helper_options;
+ odp_instance_t instance;
+ odp_init_t init;
+ odp_shm_t shm;
+ test_global_t *global;
+ test_options_t *test_options;
+ int i, shared, mode;
+
+ signal(SIGINT, sig_handler);
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* List features not to be used */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.tm = 1;
+
+ init.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("Global init failed.\n");
+ return -1;
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed.\n");
+ return -1;
+ }
+
+ shm = odp_shm_reserve("timer_perf_global", sizeof(test_global_t), ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Shared mem reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ global = odp_shm_addr(shm);
+ if (global == NULL) {
+ ODPH_ERR("Shared mem alloc failed\n");
+ exit(EXIT_FAILURE);
+ }
+ test_global = global;
+
+ memset(global, 0, sizeof(test_global_t));
+ odp_atomic_init_u32(&global->exit_test, 0);
+ odp_atomic_init_u32(&global->timers_started, 0);
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ global->thread_arg[i].global = global;
+ global->thread_arg[i].worker_idx = i;
+ }
+
+ if (parse_options(argc, argv, &global->test_options))
+ return -1;
+
+ test_options = &global->test_options;
+ shared = test_options->shared;
+ mode = test_options->mode;
+
+ odp_sys_info_print();
+
+ odp_schedule_config(NULL);
+
+ if (set_num_cpu(global))
+ return -1;
+
+ if (create_timer_pools(global))
+ return -1;
+
+ if (shared) {
+ /* Start worker threads */
+ start_workers(global, instance);
+
+ /* Wait until workers have started.
+ * Scheduler calls from workers may be needed to run timer
+ * pools in a software implementation. Wait 1 msec to ensure
+ * that timer pools are running before setting timers. */
+ odp_barrier_wait(&global->barrier);
+ odp_time_wait_ns(ODP_TIME_MSEC_IN_NS);
+ }
+
+ /* Set timers. Force workers to exit on failure. */
+ if (set_timers(global))
+ odp_atomic_add_u32(&global->exit_test, MAX_TIMER_POOLS);
+ else
+ odp_atomic_store_rel_u32(&global->timers_started, 1);
+
+ if (!shared) {
+ /* Test private pools on the master thread */
+ if (mode == MODE_SCHED_OVERH) {
+ if (sched_mode_worker(&global->thread_arg[0])) {
+ ODPH_ERR("Sched_mode_worker failed\n");
+ return -1;
+ }
+ } else if (mode == MODE_START_CANCEL) {
+ if (set_cancel_mode_worker(&global->thread_arg[0])) {
+ ODPH_ERR("Set_cancel_mode_worker failed\n");
+ return -1;
+ }
+ } else {
+ if (set_expire_mode_worker(&global->thread_arg[0])) {
+ ODPH_ERR("Set_expire_mode_worker failed\n");
+ return -1;
+ }
+ }
+ } else {
+ /* Wait workers to exit */
+ odph_thread_join(global->thread_tbl,
+ global->test_options.num_cpu);
+ }
+
+ sum_stat(global);
+
+ if (mode == MODE_SCHED_OVERH)
+ print_stat_sched_mode(global);
+ else if (mode == MODE_START_CANCEL)
+ print_stat_set_cancel_mode(global);
+ else
+ print_stat_expire_mode(global);
+
+ destroy_timer_pool(global);
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Shared mem free failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Term local failed.\n");
+ return -1;
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Term global failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/test/performance/odp_timer_perf_run.sh b/test/performance/odp_timer_perf_run.sh
new file mode 100755
index 000000000..7738ca91b
--- /dev/null
+++ b/test/performance/odp_timer_perf_run.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+#
+# Copyright (c) 2020, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TEST_DIR="${TEST_DIR:-$(dirname $0)}"
+
+echo odp_timer_perf: odp_schedule overhead mode
+echo ===============================================
+
+$TEST_DIR/odp_timer_perf${EXEEXT} -m 0 -c 1
+
+RET_VAL=$?
+if [ $RET_VAL -ne 0 ]; then
+ echo odp_timer_perf -m 0: FAILED
+ exit $RET_VAL
+fi
+
+echo odp_timer_perf: timer set + cancel mode
+echo ===============================================
+
+$TEST_DIR/odp_timer_perf${EXEEXT} -m 1 -c 1 -t 10 -R 50
+
+RET_VAL=$?
+if [ $RET_VAL -ne 0 ]; then
+ echo odp_timer_perf -m 1: FAILED
+ exit $RET_VAL
+fi
+
+exit 0
diff --git a/test/common_plat/performance/udp64.pcap b/test/performance/udp64.pcap
index 45f9d6e63..45f9d6e63 100644
--- a/test/common_plat/performance/udp64.pcap
+++ b/test/performance/udp64.pcap
Binary files differ
diff --git a/test/test_debug.h b/test/test_debug.h
deleted file mode 100644
index aec0977d1..000000000
--- a/test/test_debug.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-/**
- * @file
- *
- * test debug
- */
-
-#ifndef TEST_DEBUG_H_
-#define TEST_DEBUG_H_
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef TEST_DEBUG_PRINT
-#define TEST_DEBUG_PRINT 1
-#endif
-
-/**
- * log level.
- */
-typedef enum test_log_level {
- TEST_LOG_DBG,
- TEST_LOG_ERR,
- TEST_LOG_ABORT
-} test_log_level_e;
-
-/**
- * default LOG macro.
- */
-#define TEST_LOG(level, fmt, ...) \
-do { \
- switch (level) { \
- case TEST_LOG_ERR: \
- fprintf(stderr, "%s:%d:%s():" fmt, __FILE__, \
- __LINE__, __func__, ##__VA_ARGS__); \
- break; \
- case TEST_LOG_DBG: \
- if (TEST_DEBUG_PRINT == 1) \
- fprintf(stderr, "%s:%d:%s():" fmt, __FILE__, \
- __LINE__, __func__, ##__VA_ARGS__); \
- break; \
- case TEST_LOG_ABORT: \
- fprintf(stderr, "%s:%d:%s(): " fmt, __FILE__, \
- __LINE__, __func__, ##__VA_ARGS__); \
- abort(); \
- break; \
- default: \
- fprintf(stderr, "Unknown LOG level"); \
- break;\
- } \
-} while (0)
-
-/**
- * Debug printing macro, which prints output when DEBUG flag is set.
- */
-#define LOG_DBG(fmt, ...) \
- TEST_LOG(TEST_LOG_DBG, fmt, ##__VA_ARGS__)
-
-/**
- * Print output to stderr (file, line and function).
- */
-#define LOG_ERR(fmt, ...) \
- TEST_LOG(TEST_LOG_ERR, fmt, ##__VA_ARGS__)
-
-/**
- * Print output to stderr (file, line and function),
- * then abort.
- */
-#define LOG_ABORT(fmt, ...) \
- TEST_LOG(TEST_LOG_ABORT, fmt, ##__VA_ARGS__)
-
-/**
- * @}
- */
-
-/**
- * Mark intentionally unused argument for functions
- */
-#define TEST_UNUSED __attribute__((__unused__))
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/test/common_plat/validation/Makefile.am b/test/validation/Makefile.am
index 5d525fba4..328543780 100644
--- a/test/common_plat/validation/Makefile.am
+++ b/test/validation/Makefile.am
@@ -1,3 +1,3 @@
-if cunit_support
+if test_vald
SUBDIRS = api
endif
diff --git a/test/linux-dpdk/.gitignore b/test/validation/api/.gitignore
index 5dabf91c1..5dabf91c1 100644
--- a/test/linux-dpdk/.gitignore
+++ b/test/validation/api/.gitignore
diff --git a/test/validation/api/Makefile.am b/test/validation/api/Makefile.am
new file mode 100644
index 000000000..5a3c0216b
--- /dev/null
+++ b/test/validation/api/Makefile.am
@@ -0,0 +1,100 @@
+ODP_MODULES = align \
+ atomic \
+ barrier \
+ buffer \
+ byteorder \
+ chksum \
+ classification \
+ comp \
+ cpumask \
+ crypto \
+ dma \
+ errno \
+ event \
+ hash \
+ hints \
+ init \
+ ipsec \
+ lock \
+ ml \
+ queue \
+ packet \
+ pktio \
+ pool \
+ random \
+ scheduler \
+ stash \
+ std \
+ thread \
+ time \
+ timer \
+ traffic_mngr \
+ shmem \
+ system
+
+SUBDIRS = $(ODP_MODULES)
+
+include $(top_srcdir)/test/Makefile.inc
+TESTS_ENVIRONMENT += TEST_DIR=${top_builddir}/test/validation/api
+
+TESTS = \
+ align/align_main$(EXEEXT) \
+ atomic/atomic_main$(EXEEXT) \
+ barrier/barrier_main$(EXEEXT) \
+ buffer/buffer_main$(EXEEXT) \
+ byteorder/byteorder_main$(EXEEXT) \
+ chksum/chksum_main$(EXEEXT) \
+ classification/classification_main$(EXEEXT) \
+ comp/comp_main$(EXEEXT) \
+ cpumask/cpumask_main$(EXEEXT) \
+ crypto/crypto_main$(EXEEXT) \
+ dma/dma_main$(EXEEXT) \
+ errno/errno_main$(EXEEXT) \
+ event/event_main$(EXEEXT) \
+ hash/hash_main$(EXEEXT) \
+ hints/hints_main$(EXEEXT) \
+ init/init_defaults.sh \
+ init/init_abort.sh \
+ init/init_log.sh \
+ init/init_log_thread.sh \
+ init/init_num_thr.sh \
+ init/init_feature_enabled.sh \
+ init/init_feature_disabled.sh \
+ init/init_test_param_init.sh \
+ init/init_test_term_abnormal.sh \
+ ipsec/ipsec_sync.sh \
+ ipsec/ipsec_async.sh \
+ ipsec/ipsec_inline_in.sh \
+ ipsec/ipsec_inline_out.sh \
+ lock/lock_main$(EXEEXT) \
+ ml/ml_main$(EXEEXT) \
+ packet/packet_main$(EXEEXT) \
+ pktio/pktio_main$(EXEEXT) \
+ pool/pool_main$(EXEEXT) \
+ queue/queue_main$(EXEEXT) \
+ random/random_main$(EXEEXT) \
+ scheduler/scheduler_main$(EXEEXT) \
+ scheduler/scheduler_no_predef_groups$(EXEEXT) \
+ stash/stash_main$(EXEEXT) \
+ std/std_main$(EXEEXT) \
+ thread/thread_main$(EXEEXT) \
+ time/time_main$(EXEEXT) \
+ timer/timer_main$(EXEEXT) \
+ traffic_mngr/traffic_mngr_main$(EXEEXT) \
+ shmem/shmem_main$(EXEEXT) \
+ system/system_main$(EXEEXT)
+
+TESTNAME = validation
+
+TESTENV = tests-$(TESTNAME).env
+
+test_DATA = $(TESTENV)
+
+DISTCLEANFILES = $(TESTENV)
+.PHONY: $(TESTENV)
+$(TESTENV):
+ echo "TESTS=\"$(TESTS)\"" > $@
+ echo "$(TESTS_ENVIRONMENT)" >> $@
+ echo "$(LOG_COMPILER)" >> $@
+
+.NOTPARALLEL:
diff --git a/test/validation/api/Makefile.inc b/test/validation/api/Makefile.inc
new file mode 100644
index 000000000..8610b5687
--- /dev/null
+++ b/test/validation/api/Makefile.inc
@@ -0,0 +1,3 @@
+include $(top_srcdir)/test/Makefile.inc
+
+PRELDADD += $(LIBCUNIT_COMMON)
diff --git a/test/common_plat/validation/api/README b/test/validation/api/README
index 1baebaafc..7ee903478 100644
--- a/test/common_plat/validation/api/README
+++ b/test/validation/api/README
@@ -1,4 +1,4 @@
-Copyright (c) 2015, Linaro Limited
+Copyright (c) 2015-2018, Linaro Limited
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
diff --git a/test/validation/api/align/.gitignore b/test/validation/api/align/.gitignore
new file mode 100644
index 000000000..3031151a1
--- /dev/null
+++ b/test/validation/api/align/.gitignore
@@ -0,0 +1 @@
+align_main
diff --git a/test/validation/api/align/Makefile.am b/test/validation/api/align/Makefile.am
new file mode 100644
index 000000000..dd6e36bcd
--- /dev/null
+++ b/test/validation/api/align/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = align_main
+align_main_SOURCES = align.c
diff --git a/test/validation/api/align/align.c b/test/validation/api/align/align.c
new file mode 100644
index 000000000..ddbd1a2fe
--- /dev/null
+++ b/test/validation/api/align/align.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 Nokia
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+
+#include <stddef.h>
+#include <stdint.h>
+
+/* Test struct without gaps */
+typedef struct ODP_PACKED {
+ uint8_t a;
+ uint8_t b;
+ uint16_t c;
+ uint32_t d;
+} test_type_t;
+
+/* Test struct with gaps */
+typedef struct ODP_PACKED {
+ uint8_t a;
+ uint16_t b;
+ uint8_t c;
+ uint32_t d;
+} test_type_2_t;
+
+static void test_aligned(void)
+{
+ uint8_t align_2 ODP_ALIGNED(2);
+ uint16_t align_4 ODP_ALIGNED(4);
+ uint32_t align_8 ODP_ALIGNED(8);
+ uint64_t align_16 ODP_ALIGNED(16);
+
+ CU_ASSERT((uintptr_t)&align_2 % 2 == 0);
+ CU_ASSERT((uintptr_t)&align_4 % 4 == 0);
+ CU_ASSERT((uintptr_t)&align_8 % 8 == 0);
+ CU_ASSERT((uintptr_t)&align_16 % 16 == 0);
+}
+
+static void test_packed(void)
+{
+ uint32_t offset;
+
+ offset = 0;
+ CU_ASSERT(offsetof(test_type_t, a) == offset);
+
+ offset += sizeof(uint8_t);
+ CU_ASSERT(offsetof(test_type_t, b) == offset);
+
+ offset += sizeof(uint8_t);
+ CU_ASSERT(offsetof(test_type_t, c) == offset);
+
+ offset += sizeof(uint16_t);
+ CU_ASSERT(offsetof(test_type_t, d) == offset);
+
+ offset = 0;
+ CU_ASSERT(offsetof(test_type_2_t, a) == offset);
+
+ offset += sizeof(uint8_t);
+ CU_ASSERT(offsetof(test_type_2_t, b) == offset);
+
+ offset += sizeof(uint16_t);
+ CU_ASSERT(offsetof(test_type_2_t, c) == offset);
+
+ offset += sizeof(uint8_t);
+ CU_ASSERT(offsetof(test_type_2_t, d) == offset);
+}
+
+static void test_offsetof(void)
+{
+ CU_ASSERT(ODP_OFFSETOF(test_type_t, a) == offsetof(test_type_t, a));
+ CU_ASSERT(ODP_OFFSETOF(test_type_t, b) == offsetof(test_type_t, b));
+ CU_ASSERT(ODP_OFFSETOF(test_type_t, c) == offsetof(test_type_t, c));
+ CU_ASSERT(ODP_OFFSETOF(test_type_t, d) == offsetof(test_type_t, d));
+ CU_ASSERT(ODP_OFFSETOF(test_type_2_t, a) == offsetof(test_type_2_t, a));
+ CU_ASSERT(ODP_OFFSETOF(test_type_2_t, b) == offsetof(test_type_2_t, b));
+ CU_ASSERT(ODP_OFFSETOF(test_type_2_t, c) == offsetof(test_type_2_t, c));
+ CU_ASSERT(ODP_OFFSETOF(test_type_2_t, d) == offsetof(test_type_2_t, d));
+}
+
+static void test_field_sizeof(void)
+{
+ test_type_t tt;
+
+ CU_ASSERT(ODP_FIELD_SIZEOF(test_type_t, a) == sizeof(tt.a));
+ CU_ASSERT(ODP_FIELD_SIZEOF(test_type_t, b) == sizeof(tt.b));
+ CU_ASSERT(ODP_FIELD_SIZEOF(test_type_t, c) == sizeof(tt.c));
+ CU_ASSERT(ODP_FIELD_SIZEOF(test_type_t, d) == sizeof(tt.d));
+}
+
+static void test_cache_line_size(void)
+{
+ CU_ASSERT(ODP_CACHE_LINE_SIZE > 0);
+ CU_ASSERT(ODP_CACHE_LINE_SIZE % 2 == 0);
+}
+
+static void test_page_size(void)
+{
+ CU_ASSERT(ODP_PAGE_SIZE > 0);
+ CU_ASSERT(ODP_PAGE_SIZE % 2 == 0);
+}
+
+static void test_aligned_cache(void)
+{
+ uint8_t arr[123] ODP_ALIGNED_CACHE;
+
+ CU_ASSERT((uintptr_t)arr % ODP_CACHE_LINE_SIZE == 0);
+}
+
+static void test_aligned_page(void)
+{
+ uint8_t arr[123] ODP_ALIGNED_PAGE;
+
+ CU_ASSERT((uintptr_t)arr % ODP_PAGE_SIZE == 0);
+}
+
+static void test_cache_line_roundup(void)
+{
+ CU_ASSERT(ODP_CACHE_LINE_ROUNDUP(123) % ODP_CACHE_LINE_SIZE == 0);
+ CU_ASSERT(ODP_CACHE_LINE_ROUNDUP(ODP_CACHE_LINE_SIZE) == ODP_CACHE_LINE_SIZE);
+ CU_ASSERT(ODP_CACHE_LINE_ROUNDUP(0) == 0);
+}
+
+odp_testinfo_t align_suite[] = {
+ ODP_TEST_INFO(test_aligned),
+ ODP_TEST_INFO(test_packed),
+ ODP_TEST_INFO(test_offsetof),
+ ODP_TEST_INFO(test_field_sizeof),
+ ODP_TEST_INFO(test_cache_line_size),
+ ODP_TEST_INFO(test_page_size),
+ ODP_TEST_INFO(test_aligned_cache),
+ ODP_TEST_INFO(test_aligned_page),
+ ODP_TEST_INFO(test_cache_line_roundup),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t align_suites[] = {
+ {"align", NULL, NULL, align_suite},
+ ODP_SUITE_INFO_NULL
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* Parse common options */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(align_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/atomic/.gitignore b/test/validation/api/atomic/.gitignore
index 610ffeab0..610ffeab0 100644
--- a/test/common_plat/validation/api/atomic/.gitignore
+++ b/test/validation/api/atomic/.gitignore
diff --git a/test/validation/api/atomic/Makefile.am b/test/validation/api/atomic/Makefile.am
new file mode 100644
index 000000000..41ad2e6b6
--- /dev/null
+++ b/test/validation/api/atomic/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = atomic_main
+atomic_main_SOURCES = atomic.c
diff --git a/test/validation/api/atomic/atomic.c b/test/validation/api/atomic/atomic.c
new file mode 100644
index 000000000..fab982462
--- /dev/null
+++ b/test/validation/api/atomic/atomic.c
@@ -0,0 +1,1717 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021-2022 Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <malloc.h>
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include <odp_cunit_common.h>
+#include <unistd.h>
+
+#define MAX_WORKERS 32
+
+#define ADD_SUB_CNT 5
+
+#define CNT 100000ULL
+#define U32_INIT_VAL (1UL << 31)
+#define U64_INIT_VAL (1ULL << 63)
+#define U32_MAGIC 0xa23f65b2
+#define U64_MAGIC 0xf2e1c5430cb6a52e
+
+#define GLOBAL_SHM_NAME "GlobalLockTest"
+
+#define UNUSED __attribute__((__unused__))
+
+typedef __volatile uint32_t volatile_u32_t;
+typedef __volatile uint64_t volatile_u64_t;
+
+typedef struct {
+ odp_atomic_u128_t a128u;
+ odp_atomic_u64_t a64u;
+ odp_atomic_u64_t a64u_min;
+ odp_atomic_u64_t a64u_max;
+ odp_atomic_u64_t a64u_xchg;
+ odp_atomic_u32_t a32u;
+ odp_atomic_u32_t a32u_min;
+ odp_atomic_u32_t a32u_max;
+ odp_atomic_u32_t a32u_xchg;
+
+ uint32_t g_num_threads;
+
+ odp_barrier_t global_barrier;
+} global_shared_mem_t;
+
+static odp_shm_t global_shm;
+static global_shared_mem_t *global_mem;
+
+/* Initialise per-thread memory */
+static void thread_init(void)
+{
+ global_shared_mem_t *global_mem;
+ odp_shm_t global_shm;
+
+ global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ global_mem = odp_shm_addr(global_shm);
+ CU_ASSERT_PTR_NOT_NULL(global_mem);
+}
+
+static void test_atomic_inc_32(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_inc_u32(&global_mem->a32u);
+}
+
+static void test_atomic_inc_64(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_inc_u64(&global_mem->a64u);
+}
+
+static void test_atomic_dec_32(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_dec_u32(&global_mem->a32u);
+}
+
+static void test_atomic_dec_64(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_dec_u64(&global_mem->a64u);
+}
+
+static void test_atomic_fetch_inc_32(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_inc_u32(&global_mem->a32u);
+}
+
+static void test_atomic_fetch_inc_64(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_inc_u64(&global_mem->a64u);
+}
+
+static void test_atomic_fetch_dec_32(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_dec_u32(&global_mem->a32u);
+}
+
+static void test_atomic_fetch_dec_64(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_dec_u64(&global_mem->a64u);
+}
+
+static void test_atomic_add_32(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_add_u32(&global_mem->a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_add_64(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_add_u64(&global_mem->a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_sub_32(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_sub_u32(&global_mem->a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_sub_64(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_sub_u64(&global_mem->a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_add_32(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_add_u32(&global_mem->a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_add_64(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_add_u64(&global_mem->a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_sub_32(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_sub_u32(&global_mem->a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_sub_64(void)
+{
+ uint64_t i;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_sub_u64(&global_mem->a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_min_32(void)
+{
+ uint64_t i;
+ uint32_t tmp;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_dec_u32(&global_mem->a32u);
+ odp_atomic_min_u32(&global_mem->a32u_min, tmp);
+ }
+}
+
+static void test_atomic_min_64(void)
+{
+ uint64_t i, tmp;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_dec_u64(&global_mem->a64u);
+ odp_atomic_min_u64(&global_mem->a64u_min, tmp);
+ }
+}
+
+static void test_atomic_max_32(void)
+{
+ uint64_t i;
+ uint32_t tmp;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_inc_u32(&global_mem->a32u);
+ odp_atomic_max_u32(&global_mem->a32u_max, tmp);
+ }
+}
+
+static void test_atomic_max_64(void)
+{
+ uint64_t i, tmp;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_inc_u64(&global_mem->a64u);
+ odp_atomic_max_u64(&global_mem->a64u_max, tmp);
+ }
+}
+
+static void test_atomic_cas_inc_32(void)
+{
+ uint64_t i, old_mismatch = 0;
+ uint32_t old, old_old;
+ odp_atomic_u32_t *a32u = &global_mem->a32u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u32(a32u);
+ old_old = old;
+
+ while (odp_atomic_cas_u32(a32u, &old, old + 1) == 0) {
+ if (old == old_old)
+ old_mismatch++;
+
+ old_old = old;
+ }
+
+ if (old != old_old)
+ old_mismatch++;
+ }
+
+ CU_ASSERT(old_mismatch == 0);
+}
+
+static void test_atomic_cas_acq_inc_32(void)
+{
+ uint64_t i, old_mismatch = 0;
+ uint32_t old, old_old;
+ odp_atomic_u32_t *a32u = &global_mem->a32u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u32(a32u);
+ old_old = old;
+
+ while (odp_atomic_cas_acq_u32(a32u, &old, old + 1) == 0) {
+ if (old == old_old)
+ old_mismatch++;
+
+ old_old = old;
+ }
+
+ if (old != old_old)
+ old_mismatch++;
+ }
+
+ CU_ASSERT(old_mismatch == 0);
+}
+
+static void test_atomic_cas_rel_inc_32(void)
+{
+ uint64_t i, old_mismatch = 0;
+ uint32_t old, old_old;
+ odp_atomic_u32_t *a32u = &global_mem->a32u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u32(a32u);
+ old_old = old;
+
+ while (odp_atomic_cas_rel_u32(a32u, &old, old + 1) == 0) {
+ if (old == old_old)
+ old_mismatch++;
+
+ old_old = old;
+ }
+
+ if (old != old_old)
+ old_mismatch++;
+ }
+
+ CU_ASSERT(old_mismatch == 0);
+}
+
+static void test_atomic_cas_acq_rel_inc_32(void)
+{
+ uint64_t i, old_mismatch = 0;
+ uint32_t old, old_old;
+ odp_atomic_u32_t *a32u = &global_mem->a32u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u32(a32u);
+ old_old = old;
+
+ while (odp_atomic_cas_acq_rel_u32(a32u, &old, old + 1) == 0) {
+ if (old == old_old)
+ old_mismatch++;
+
+ old_old = old;
+ }
+
+ if (old != old_old)
+ old_mismatch++;
+ }
+
+ CU_ASSERT(old_mismatch == 0);
+}
+
+static void test_atomic_cas_dec_32(void)
+{
+ uint64_t i;
+ uint32_t old;
+ odp_atomic_u32_t *a32u = &global_mem->a32u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u32(a32u);
+
+ while (odp_atomic_cas_u32(a32u, &old, old - 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_cas_inc_64(void)
+{
+ uint64_t i, old, old_old, old_mismatch = 0;
+ odp_atomic_u64_t *a64u = &global_mem->a64u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u64(a64u);
+ old_old = old;
+
+ while (odp_atomic_cas_u64(a64u, &old, old + 1) == 0) {
+ if (old == old_old)
+ old_mismatch++;
+
+ old_old = old;
+ }
+
+ if (old != old_old)
+ old_mismatch++;
+ }
+
+ CU_ASSERT(old_mismatch == 0);
+}
+
+static void test_atomic_cas_acq_inc_64(void)
+{
+ uint64_t i, old, old_old, old_mismatch = 0;
+ odp_atomic_u64_t *a64u = &global_mem->a64u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u64(a64u);
+ old_old = old;
+
+ while (odp_atomic_cas_acq_u64(a64u, &old, old + 1) == 0) {
+ if (old == old_old)
+ old_mismatch++;
+
+ old_old = old;
+ }
+
+ if (old != old_old)
+ old_mismatch++;
+ }
+
+ CU_ASSERT(old_mismatch == 0);
+}
+
+static void test_atomic_cas_rel_inc_64(void)
+{
+ uint64_t i, old, old_old, old_mismatch = 0;
+ odp_atomic_u64_t *a64u = &global_mem->a64u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u64(a64u);
+ old_old = old;
+
+ while (odp_atomic_cas_rel_u64(a64u, &old, old + 1) == 0) {
+ if (old == old_old)
+ old_mismatch++;
+
+ old_old = old;
+ }
+
+ if (old != old_old)
+ old_mismatch++;
+ }
+
+ CU_ASSERT(old_mismatch == 0);
+}
+
+static void test_atomic_cas_acq_rel_inc_64(void)
+{
+ uint64_t i, old, old_old, old_mismatch = 0;
+ odp_atomic_u64_t *a64u = &global_mem->a64u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u64(a64u);
+ old_old = old;
+
+ while (odp_atomic_cas_acq_rel_u64(a64u, &old, old + 1) == 0) {
+ if (old == old_old)
+ old_mismatch++;
+
+ old_old = old;
+ }
+
+ if (old != old_old)
+ old_mismatch++;
+ }
+
+ CU_ASSERT(old_mismatch == 0);
+}
+
+static void test_atomic_cas_dec_64(void)
+{
+ uint64_t i, old;
+ odp_atomic_u64_t *a64u = &global_mem->a64u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u64(a64u);
+
+ while (odp_atomic_cas_u64(a64u, &old, old - 1) == 0)
+ ;
+ }
+}
+
+#define BUF_SIZE (64 * 1024)
+
+static void test_atomic_xchg_32(void)
+{
+ uint32_t old, new;
+ uint64_t i;
+ odp_atomic_u32_t *a32u_xchg = &global_mem->a32u_xchg;
+ uint8_t buf[BUF_SIZE];
+ uint64_t seed = odp_thread_id();
+ uint64_t count_old = 0, count_new = 0;
+
+ odp_random_test_data(buf, BUF_SIZE, &seed);
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ new = buf[i & (BUF_SIZE - 1)];
+ old = odp_atomic_xchg_u32(a32u_xchg, new);
+ count_old += old;
+ count_new += new;
+ }
+
+ odp_atomic_add_u32(a32u_xchg, count_old);
+ odp_atomic_sub_u32(a32u_xchg, count_new);
+}
+
+static void test_atomic_xchg_64(void)
+{
+ uint64_t old, new;
+ uint64_t i;
+ odp_atomic_u64_t *a64u_xchg = &global_mem->a64u_xchg;
+ uint8_t buf[BUF_SIZE];
+ uint64_t seed = odp_thread_id();
+ uint64_t count_old = 0, count_new = 0;
+
+ odp_random_test_data(buf, BUF_SIZE, &seed);
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ new = buf[i & (BUF_SIZE - 1)];
+ old = odp_atomic_xchg_u64(a64u_xchg, new);
+ count_old += old;
+ count_new += new;
+ }
+
+ odp_atomic_add_u64(a64u_xchg, count_old);
+ odp_atomic_sub_u64(a64u_xchg, count_new);
+}
+
+static void test_atomic_non_relaxed_32(void)
+{
+ uint64_t i;
+ uint32_t tmp;
+ odp_atomic_u32_t *a32u = &global_mem->a32u;
+ odp_atomic_u32_t *a32u_min = &global_mem->a32u_min;
+ odp_atomic_u32_t *a32u_max = &global_mem->a32u_max;
+ odp_atomic_u32_t *a32u_xchg = &global_mem->a32u_xchg;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_load_acq_u32(a32u);
+ odp_atomic_store_rel_u32(a32u, tmp);
+
+ tmp = odp_atomic_load_acq_u32(a32u_max);
+ odp_atomic_add_rel_u32(a32u_max, 1);
+
+ tmp = odp_atomic_load_acq_u32(a32u_min);
+ odp_atomic_sub_rel_u32(a32u_min, 1);
+
+ tmp = odp_atomic_load_u32(a32u_xchg);
+ while (odp_atomic_cas_acq_u32(a32u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u32(a32u_xchg);
+ while (odp_atomic_cas_rel_u32(a32u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u32(a32u_xchg);
+ while (odp_atomic_cas_acq_rel_u32(a32u_xchg, &tmp, tmp + 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_non_relaxed_64(void)
+{
+ uint64_t i, tmp;
+ odp_atomic_u64_t *a64u = &global_mem->a64u;
+ odp_atomic_u64_t *a64u_min = &global_mem->a64u_min;
+ odp_atomic_u64_t *a64u_max = &global_mem->a64u_max;
+ odp_atomic_u64_t *a64u_xchg = &global_mem->a64u_xchg;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_load_acq_u64(a64u);
+ odp_atomic_store_rel_u64(a64u, tmp);
+
+ tmp = odp_atomic_load_acq_u64(a64u_max);
+ odp_atomic_add_rel_u64(a64u_max, 1);
+
+ tmp = odp_atomic_load_acq_u64(a64u_min);
+ odp_atomic_sub_rel_u64(a64u_min, 1);
+
+ tmp = odp_atomic_load_u64(a64u_xchg);
+ while (odp_atomic_cas_acq_u64(a64u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u64(a64u_xchg);
+ while (odp_atomic_cas_rel_u64(a64u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u64(a64u_xchg);
+ while (odp_atomic_cas_acq_rel_u64(a64u_xchg, &tmp, tmp + 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_relaxed_128(void)
+{
+ int ret;
+ uint64_t i;
+ odp_u128_t old, new;
+ odp_atomic_u128_t *a128u = &global_mem->a128u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u128(a128u);
+
+ do {
+ new.u64[0] = old.u64[0] + 2;
+ new.u64[1] = old.u64[1] + 1;
+
+ ret = odp_atomic_cas_u128(a128u, &old, new);
+
+ } while (ret == 0);
+ }
+}
+
+static void test_atomic_non_relaxed_128_acq(void)
+{
+ int ret;
+ uint64_t i;
+ odp_u128_t old, new;
+ odp_atomic_u128_t *a128u = &global_mem->a128u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u128(a128u);
+
+ do {
+ new.u64[0] = old.u64[0] + 2;
+ new.u64[1] = old.u64[1] + 1;
+
+ ret = odp_atomic_cas_acq_u128(a128u, &old, new);
+
+ } while (ret == 0);
+ }
+}
+
+static void test_atomic_non_relaxed_128_rel(void)
+{
+ int ret;
+ uint64_t i;
+ odp_u128_t old, new;
+ odp_atomic_u128_t *a128u = &global_mem->a128u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u128(a128u);
+
+ do {
+ new.u64[0] = old.u64[0] + 2;
+ new.u64[1] = old.u64[1] + 1;
+
+ ret = odp_atomic_cas_rel_u128(a128u, &old, new);
+
+ } while (ret == 0);
+ }
+}
+
+static void test_atomic_non_relaxed_128_acq_rel(void)
+{
+ int ret;
+ uint64_t i;
+ odp_u128_t old, new;
+ odp_atomic_u128_t *a128u = &global_mem->a128u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u128(a128u);
+
+ do {
+ new.u64[0] = old.u64[0] + 2;
+ new.u64[1] = old.u64[1] + 1;
+
+ ret = odp_atomic_cas_acq_rel_u128(a128u, &old, new);
+
+ } while (ret == 0);
+ }
+}
+
+static void test_atomic_inc_dec_32(void)
+{
+ test_atomic_inc_32();
+ test_atomic_dec_32();
+}
+
+static void test_atomic_inc_dec_64(void)
+{
+ test_atomic_inc_64();
+ test_atomic_dec_64();
+}
+
+static void test_atomic_fetch_inc_dec_32(void)
+{
+ test_atomic_fetch_inc_32();
+ test_atomic_fetch_dec_32();
+}
+
+static void test_atomic_fetch_inc_dec_64(void)
+{
+ test_atomic_fetch_inc_64();
+ test_atomic_fetch_dec_64();
+}
+
+static void test_atomic_add_sub_32(void)
+{
+ test_atomic_add_32();
+ test_atomic_sub_32();
+}
+
+static void test_atomic_add_sub_64(void)
+{
+ test_atomic_add_64();
+ test_atomic_sub_64();
+}
+
+static void test_atomic_fetch_add_sub_32(void)
+{
+ test_atomic_fetch_add_32();
+ test_atomic_fetch_sub_32();
+}
+
+static void test_atomic_fetch_add_sub_64(void)
+{
+ test_atomic_fetch_add_64();
+ test_atomic_fetch_sub_64();
+}
+
+static void test_atomic_inc_add_32(void)
+{
+ test_atomic_inc_32();
+ test_atomic_fetch_inc_32();
+ test_atomic_add_32();
+ test_atomic_fetch_add_32();
+ test_atomic_cas_inc_32();
+}
+
+static void test_atomic_inc_add_64(void)
+{
+ test_atomic_inc_64();
+ test_atomic_fetch_inc_64();
+ test_atomic_add_64();
+ test_atomic_fetch_add_64();
+ test_atomic_cas_inc_64();
+}
+
+static void test_atomic_dec_sub_32(void)
+{
+ test_atomic_dec_32();
+ test_atomic_fetch_dec_32();
+ test_atomic_sub_32();
+ test_atomic_fetch_sub_32();
+ test_atomic_cas_dec_32();
+}
+
+static void test_atomic_dec_sub_64(void)
+{
+ test_atomic_dec_64();
+ test_atomic_fetch_dec_64();
+ test_atomic_sub_64();
+ test_atomic_fetch_sub_64();
+ test_atomic_cas_dec_64();
+}
+
+static void test_atomic_max_min_32(void)
+{
+ test_atomic_max_32();
+ test_atomic_min_32();
+}
+
+static void test_atomic_max_min_64(void)
+{
+ test_atomic_max_64();
+ test_atomic_min_64();
+}
+
+static void test_atomic_cas_inc_dec_32(void)
+{
+ test_atomic_cas_inc_32();
+ test_atomic_cas_dec_32();
+}
+
+static void test_atomic_cas_inc_dec_64(void)
+{
+ test_atomic_cas_inc_64();
+ test_atomic_cas_dec_64();
+}
+
+static void test_atomic_cas_inc_128(void)
+{
+ test_atomic_relaxed_128();
+ test_atomic_non_relaxed_128_acq();
+ test_atomic_non_relaxed_128_rel();
+ test_atomic_non_relaxed_128_acq_rel();
+}
+
+static void test_atomic_init(void)
+{
+ odp_atomic_init_u32(&global_mem->a32u, 0);
+ odp_atomic_init_u64(&global_mem->a64u, 0);
+ odp_atomic_init_u32(&global_mem->a32u_min, 0);
+ odp_atomic_init_u32(&global_mem->a32u_max, 0);
+ odp_atomic_init_u64(&global_mem->a64u_min, 0);
+ odp_atomic_init_u64(&global_mem->a64u_max, 0);
+ odp_atomic_init_u32(&global_mem->a32u_xchg, 0);
+ odp_atomic_init_u64(&global_mem->a64u_xchg, 0);
+
+ odp_u128_t a128u_tmp;
+
+ a128u_tmp.u64[0] = 0;
+ a128u_tmp.u64[1] = 0;
+ odp_atomic_init_u128(&global_mem->a128u, a128u_tmp);
+}
+
+static void test_atomic_store(void)
+{
+ odp_atomic_store_u32(&global_mem->a32u, U32_INIT_VAL);
+ odp_atomic_store_u64(&global_mem->a64u, U64_INIT_VAL);
+ odp_atomic_store_u32(&global_mem->a32u_min, U32_INIT_VAL);
+ odp_atomic_store_u32(&global_mem->a32u_max, U32_INIT_VAL);
+ odp_atomic_store_u64(&global_mem->a64u_min, U64_INIT_VAL);
+ odp_atomic_store_u64(&global_mem->a64u_max, U64_INIT_VAL);
+ odp_atomic_store_u32(&global_mem->a32u_xchg, U32_INIT_VAL);
+ odp_atomic_store_u64(&global_mem->a64u_xchg, U64_INIT_VAL);
+
+ odp_u128_t a128u_tmp;
+
+ a128u_tmp.u64[0] = U64_INIT_VAL;
+ a128u_tmp.u64[1] = U64_INIT_VAL;
+ odp_atomic_store_u128(&global_mem->a128u, a128u_tmp);
+}
+
+static void test_atomic_validate_init_val_32_64(void)
+{
+ CU_ASSERT(U32_INIT_VAL == odp_atomic_load_u32(&global_mem->a32u));
+ CU_ASSERT(U64_INIT_VAL == odp_atomic_load_u64(&global_mem->a64u));
+}
+
+static void test_atomic_validate_init_val_128(void)
+{
+ odp_u128_t a128u = odp_atomic_load_u128(&global_mem->a128u);
+
+ CU_ASSERT(U64_INIT_VAL == a128u.u64[0]);
+ CU_ASSERT(U64_INIT_VAL == a128u.u64[1]);
+}
+
+static void test_atomic_validate_init_val(void)
+{
+ test_atomic_validate_init_val_32_64();
+ test_atomic_validate_init_val_128();
+}
+
+static void test_atomic_validate_inc_add(void)
+{
+ test_atomic_validate_init_val_128();
+
+ /* Two increment tests, one cas increment test and two add tests. */
+ const uint64_t total_count = CNT * (3 + 2 * ADD_SUB_CNT) * global_mem->g_num_threads;
+ const uint32_t a32u = U32_INIT_VAL + total_count;
+
+ CU_ASSERT(a32u == odp_atomic_load_u32(&global_mem->a32u));
+ CU_ASSERT(U64_INIT_VAL + total_count == odp_atomic_load_u64(&global_mem->a64u));
+}
+
+static void test_atomic_validate_dec_sub(void)
+{
+ test_atomic_validate_init_val_128();
+
+ /* Two decrement tests, one cas decrement test and two sub tests. */
+ const uint64_t total_count = CNT * (3 + 2 * ADD_SUB_CNT) * global_mem->g_num_threads;
+ const uint32_t a32u = U32_INIT_VAL - total_count;
+
+ CU_ASSERT(a32u == odp_atomic_load_u32(&global_mem->a32u));
+ CU_ASSERT(U64_INIT_VAL - total_count == odp_atomic_load_u64(&global_mem->a64u));
+}
+
+static void test_atomic_validate_cas_inc_dec(void)
+{
+ test_atomic_validate_init_val_32_64();
+
+ odp_u128_t a128u = odp_atomic_load_u128(&global_mem->a128u);
+ const uint64_t iterations = a128u.u64[0] - a128u.u64[1];
+
+ CU_ASSERT(iterations == 4 * CNT * global_mem->g_num_threads);
+}
+
+static void test_atomic_validate_max_min(void)
+{
+ test_atomic_validate_init_val();
+
+ const uint64_t total_count = CNT * global_mem->g_num_threads;
+ /*
+ * Max is the result of fetch_inc, so the final max value is total_count - 1. In
+ * a long test, counter may overflow, in which case max is saturated at
+ * UINT32_MAX, and min at 0.
+ */
+ const uint32_t a32u_max = ODPH_MIN(U32_INIT_VAL + total_count - 1, UINT32_MAX);
+ const uint32_t a32u_min = U32_INIT_VAL + total_count - 1 > UINT32_MAX ? 0 : U32_INIT_VAL;
+
+ CU_ASSERT(odp_atomic_load_u32(&global_mem->a32u_max) == a32u_max);
+ CU_ASSERT(odp_atomic_load_u32(&global_mem->a32u_min) == a32u_min);
+ CU_ASSERT(odp_atomic_load_u64(&global_mem->a64u_max) == U64_INIT_VAL + total_count - 1);
+ CU_ASSERT(odp_atomic_load_u64(&global_mem->a64u_min) == U64_INIT_VAL);
+}
+
+static void test_atomic_validate_xchg(void)
+{
+ test_atomic_validate_init_val();
+
+ CU_ASSERT(odp_atomic_load_u32(&global_mem->a32u_xchg) == U32_INIT_VAL);
+ CU_ASSERT(odp_atomic_load_u64(&global_mem->a64u_xchg) == U64_INIT_VAL);
+}
+
+static void test_atomic_validate_non_relaxed(void)
+{
+ test_atomic_validate_init_val();
+
+ const uint64_t total_count = CNT * global_mem->g_num_threads;
+ /* 3 increments per round. */
+ const uint32_t a32u = U32_INIT_VAL + 3 * total_count;
+ /* 1 increment per round. */
+ const uint32_t a32u_max = U32_INIT_VAL + total_count;
+ const uint32_t a32u_min = U32_INIT_VAL - total_count;
+
+ CU_ASSERT(odp_atomic_load_u32(&global_mem->a32u_xchg) == a32u);
+ CU_ASSERT(odp_atomic_load_u64(&global_mem->a64u_xchg) == U64_INIT_VAL + 3 * total_count);
+
+ CU_ASSERT(odp_atomic_load_u32(&global_mem->a32u_max) == a32u_max);
+ CU_ASSERT(odp_atomic_load_u32(&global_mem->a32u_min) == a32u_min);
+ CU_ASSERT(odp_atomic_load_u64(&global_mem->a64u_max) == U64_INIT_VAL + total_count);
+ CU_ASSERT(odp_atomic_load_u64(&global_mem->a64u_min) == U64_INIT_VAL - total_count);
+}
+
+static int atomic_init(odp_instance_t *inst)
+{
+ uint32_t workers_count, max_threads;
+ int ret = 0;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("odph_options() failed\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
+ ODPH_ERR("odp_init_global() failed\n");
+ return -1;
+ }
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("odp_init_local() failed\n");
+ return -1;
+ }
+
+ global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
+ sizeof(global_shared_mem_t), 64, 0);
+ if (ODP_SHM_INVALID == global_shm) {
+ ODPH_ERR("Unable to reserve memory for global_shm\n");
+ return -1;
+ }
+
+ global_mem = odp_shm_addr(global_shm);
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
+ global_mem->g_num_threads = MAX_WORKERS;
+
+ workers_count = odp_cpumask_default_worker(NULL, 0);
+
+ max_threads = (workers_count >= MAX_WORKERS) ?
+ MAX_WORKERS : workers_count;
+
+ if (max_threads < global_mem->g_num_threads) {
+ printf("Requested num of threads is too large\n");
+ printf("reducing from %" PRIu32 " to %" PRIu32 "\n",
+ global_mem->g_num_threads,
+ max_threads);
+ global_mem->g_num_threads = max_threads;
+ }
+
+ printf("Num of threads used = %" PRIu32 "\n",
+ global_mem->g_num_threads);
+
+ odp_barrier_init(&global_mem->global_barrier, global_mem->g_num_threads);
+
+ return ret;
+}
+
+static int atomic_term(odp_instance_t inst)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ if (0 != odp_shm_free(shm)) {
+ ODPH_ERR("odp_shm_free() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_term_local()) {
+ ODPH_ERR("odp_term_local() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ ODPH_ERR("odp_term_global() failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Atomic tests */
+
+static int test_atomic_inc_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_inc_32();
+ test_atomic_inc_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_dec_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_dec_32();
+ test_atomic_dec_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_add_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_add_32();
+ test_atomic_add_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_sub_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_sub_32();
+ test_atomic_sub_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_fetch_inc_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_fetch_inc_32();
+ test_atomic_fetch_inc_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_fetch_dec_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_fetch_dec_32();
+ test_atomic_fetch_dec_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_fetch_add_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_fetch_add_32();
+ test_atomic_fetch_add_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_fetch_sub_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_fetch_sub_32();
+ test_atomic_fetch_sub_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_max_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_max_32();
+ test_atomic_max_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_min_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_min_32();
+ test_atomic_min_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_cas_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_cas_inc_32();
+ test_atomic_cas_inc_64();
+ test_atomic_relaxed_128();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_cas_acq_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_cas_acq_inc_32();
+ test_atomic_cas_acq_inc_64();
+ test_atomic_non_relaxed_128_acq();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_cas_rel_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_cas_rel_inc_32();
+ test_atomic_cas_rel_inc_64();
+ test_atomic_non_relaxed_128_rel();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_cas_acq_rel_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_cas_acq_rel_inc_32();
+ test_atomic_cas_acq_rel_inc_64();
+ test_atomic_non_relaxed_128_acq_rel();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_inc_dec_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_inc_dec_32();
+ test_atomic_inc_dec_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_add_sub_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_add_sub_32();
+ test_atomic_add_sub_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_fetch_inc_dec_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_fetch_inc_dec_32();
+ test_atomic_fetch_inc_dec_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_fetch_add_sub_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_fetch_add_sub_32();
+ test_atomic_fetch_add_sub_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_inc_add_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_inc_add_32();
+ test_atomic_inc_add_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_dec_sub_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_dec_sub_32();
+ test_atomic_dec_sub_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_max_min_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_max_min_32();
+ test_atomic_max_min_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_cas_inc_dec_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_cas_inc_dec_32();
+ test_atomic_cas_inc_dec_64();
+ test_atomic_cas_inc_128();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_xchg_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_xchg_32();
+ test_atomic_xchg_64();
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_non_relaxed_thread(void *arg UNUSED)
+{
+ thread_init();
+ test_atomic_non_relaxed_32();
+ test_atomic_non_relaxed_64();
+
+ return CU_get_number_of_failures();
+}
+
+static void test_atomic_functional(int test_fn(void *), void validate_fn(void))
+{
+ int num = global_mem->g_num_threads;
+
+ test_atomic_init();
+ test_atomic_store();
+ odp_cunit_thread_create(num, test_fn, NULL, 0, 0);
+ odp_cunit_thread_join(num);
+ validate_fn();
+}
+
+static void test_atomic_op_lock_free_set(void)
+{
+ odp_atomic_op_t atomic_op;
+
+ memset(&atomic_op, 0xff, sizeof(odp_atomic_op_t));
+ atomic_op.all_bits = 0;
+
+ CU_ASSERT(atomic_op.all_bits == 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 0);
+ CU_ASSERT(atomic_op.op.store == 0);
+ CU_ASSERT(atomic_op.op.fetch_add == 0);
+ CU_ASSERT(atomic_op.op.add == 0);
+ CU_ASSERT(atomic_op.op.fetch_sub == 0);
+ CU_ASSERT(atomic_op.op.sub == 0);
+ CU_ASSERT(atomic_op.op.fetch_inc == 0);
+ CU_ASSERT(atomic_op.op.inc == 0);
+ CU_ASSERT(atomic_op.op.fetch_dec == 0);
+ CU_ASSERT(atomic_op.op.dec == 0);
+ CU_ASSERT(atomic_op.op.min == 0);
+ CU_ASSERT(atomic_op.op.max == 0);
+ CU_ASSERT(atomic_op.op.cas == 0);
+ CU_ASSERT(atomic_op.op.xchg == 0);
+
+ /* Test setting first, last and couple of other bits */
+ atomic_op.op.init = 1;
+ CU_ASSERT(atomic_op.op.init == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.init = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ atomic_op.op.xchg = 1;
+ CU_ASSERT(atomic_op.op.xchg == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.xchg = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ atomic_op.op.add = 1;
+ CU_ASSERT(atomic_op.op.add == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.add = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ atomic_op.op.dec = 1;
+ CU_ASSERT(atomic_op.op.dec == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.dec = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+}
+
+static void test_atomic_op_lock_free_64(void)
+{
+ odp_atomic_op_t atomic_op;
+ int ret_null, ret;
+
+ memset(&atomic_op, 0xff, sizeof(odp_atomic_op_t));
+ ret = odp_atomic_lock_free_u64(&atomic_op);
+ ret_null = odp_atomic_lock_free_u64(NULL);
+
+ CU_ASSERT(ret == ret_null);
+
+ /* Init operation is not atomic by the spec. Call to
+ * odp_atomic_lock_free_u64() zeros it but never sets it. */
+
+ if (ret == 0) {
+ /* none are lock free */
+ CU_ASSERT(atomic_op.all_bits == 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 0);
+ CU_ASSERT(atomic_op.op.store == 0);
+ CU_ASSERT(atomic_op.op.fetch_add == 0);
+ CU_ASSERT(atomic_op.op.add == 0);
+ CU_ASSERT(atomic_op.op.fetch_sub == 0);
+ CU_ASSERT(atomic_op.op.sub == 0);
+ CU_ASSERT(atomic_op.op.fetch_inc == 0);
+ CU_ASSERT(atomic_op.op.inc == 0);
+ CU_ASSERT(atomic_op.op.fetch_dec == 0);
+ CU_ASSERT(atomic_op.op.dec == 0);
+ CU_ASSERT(atomic_op.op.min == 0);
+ CU_ASSERT(atomic_op.op.max == 0);
+ CU_ASSERT(atomic_op.op.cas == 0);
+ CU_ASSERT(atomic_op.op.xchg == 0);
+ }
+
+ if (ret == 1) {
+ /* some are lock free */
+ CU_ASSERT(atomic_op.all_bits != 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ }
+
+ if (ret == 2) {
+ /* all are lock free */
+ CU_ASSERT(atomic_op.all_bits != 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 1);
+ CU_ASSERT(atomic_op.op.store == 1);
+ CU_ASSERT(atomic_op.op.fetch_add == 1);
+ CU_ASSERT(atomic_op.op.add == 1);
+ CU_ASSERT(atomic_op.op.fetch_sub == 1);
+ CU_ASSERT(atomic_op.op.sub == 1);
+ CU_ASSERT(atomic_op.op.fetch_inc == 1);
+ CU_ASSERT(atomic_op.op.inc == 1);
+ CU_ASSERT(atomic_op.op.fetch_dec == 1);
+ CU_ASSERT(atomic_op.op.dec == 1);
+ CU_ASSERT(atomic_op.op.min == 1);
+ CU_ASSERT(atomic_op.op.max == 1);
+ CU_ASSERT(atomic_op.op.cas == 1);
+ CU_ASSERT(atomic_op.op.xchg == 1);
+ }
+}
+
+static void test_atomic_op_lock_free_128(void)
+{
+ odp_atomic_op_t atomic_op;
+ int ret_null, ret;
+
+ memset(&atomic_op, 0xff, sizeof(odp_atomic_op_t));
+ ret = odp_atomic_lock_free_u128(&atomic_op);
+ ret_null = odp_atomic_lock_free_u128(NULL);
+
+ CU_ASSERT(ret == ret_null);
+
+ /* Init operation is not atomic by the spec. Call to
+ * odp_atomic_lock_free_u128() zeros it but never sets it. */
+
+ if (ret == 0) {
+ /* none are lock free */
+ CU_ASSERT(atomic_op.all_bits == 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 0);
+ CU_ASSERT(atomic_op.op.store == 0);
+ CU_ASSERT(atomic_op.op.cas == 0);
+ }
+
+ if (ret == 1) {
+ /* some are lock free */
+ CU_ASSERT(atomic_op.all_bits != 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ }
+
+ if (ret == 2) {
+ /* all are lock free */
+ CU_ASSERT(atomic_op.all_bits != 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 1);
+ CU_ASSERT(atomic_op.op.store == 1);
+ CU_ASSERT(atomic_op.op.cas == 1);
+ }
+}
+
+static void atomic_test_atomic_init(void)
+{
+ uint64_t i;
+ odp_atomic_u128_t *a128u = &global_mem->a128u;
+
+ for (i = 0; i < CNT; i++) {
+ odp_u128_t a128u_tmp;
+
+ odp_atomic_init_u32(&global_mem->a32u, i);
+ odp_atomic_init_u64(&global_mem->a64u, i);
+ odp_atomic_init_u32(&global_mem->a32u_min, i);
+ odp_atomic_init_u32(&global_mem->a32u_max, i);
+ odp_atomic_init_u64(&global_mem->a64u_min, i);
+ odp_atomic_init_u64(&global_mem->a64u_max, i);
+ odp_atomic_init_u32(&global_mem->a32u_xchg, i);
+ odp_atomic_init_u64(&global_mem->a64u_xchg, i);
+
+ a128u_tmp.u64[0] = i;
+ a128u_tmp.u64[1] = i;
+ odp_atomic_init_u128(&global_mem->a128u, a128u_tmp);
+
+ CU_ASSERT(odp_atomic_load_u32(&global_mem->a32u) == i);
+ CU_ASSERT(odp_atomic_load_u64(&global_mem->a64u) == i);
+ CU_ASSERT(odp_atomic_load_u32(&global_mem->a32u_min) == i);
+ CU_ASSERT(odp_atomic_load_u32(&global_mem->a32u_max) == i);
+ CU_ASSERT(odp_atomic_load_u64(&global_mem->a64u_min) == i);
+ CU_ASSERT(odp_atomic_load_u64(&global_mem->a64u_max) == i);
+ CU_ASSERT(odp_atomic_load_u32(&global_mem->a32u_xchg) == i);
+ CU_ASSERT(odp_atomic_load_u64(&global_mem->a64u_xchg) == i);
+
+ a128u_tmp = odp_atomic_load_u128(a128u);
+ CU_ASSERT(a128u_tmp.u64[0] == i);
+ CU_ASSERT(a128u_tmp.u64[1] == i);
+ }
+}
+
+static void test_atomic_validate_inc(void)
+{
+ const uint64_t total_count = CNT * global_mem->g_num_threads;
+ const uint32_t a32u = U32_INIT_VAL + total_count;
+
+ CU_ASSERT(a32u == odp_atomic_load_u32(&global_mem->a32u));
+ CU_ASSERT(U64_INIT_VAL + total_count == odp_atomic_load_u64(&global_mem->a64u));
+}
+
+static void atomic_test_atomic_inc(void)
+{
+ test_atomic_functional(test_atomic_inc_thread, test_atomic_validate_inc);
+}
+
+static void test_atomic_validate_dec(void)
+{
+ const uint64_t total_count = CNT * global_mem->g_num_threads;
+ const uint32_t a32u = U32_INIT_VAL - total_count;
+
+ CU_ASSERT(a32u == odp_atomic_load_u32(&global_mem->a32u));
+ CU_ASSERT(U64_INIT_VAL - total_count == odp_atomic_load_u64(&global_mem->a64u));
+}
+
+static void atomic_test_atomic_dec(void)
+{
+ test_atomic_functional(test_atomic_dec_thread, test_atomic_validate_dec);
+}
+
+static void test_atomic_validate_add(void)
+{
+ const uint64_t total_count = CNT * ADD_SUB_CNT * global_mem->g_num_threads;
+ const uint32_t a32u = U32_INIT_VAL + total_count;
+
+ CU_ASSERT(a32u == odp_atomic_load_u32(&global_mem->a32u));
+ CU_ASSERT(U64_INIT_VAL + total_count == odp_atomic_load_u64(&global_mem->a64u));
+}
+
+static void atomic_test_atomic_add(void)
+{
+ test_atomic_functional(test_atomic_add_thread, test_atomic_validate_add);
+}
+
+static void test_atomic_validate_sub(void)
+{
+ const uint64_t total_count = CNT * ADD_SUB_CNT * global_mem->g_num_threads;
+ const uint32_t a32u = U32_INIT_VAL - total_count;
+
+ CU_ASSERT(a32u == odp_atomic_load_u32(&global_mem->a32u));
+ CU_ASSERT(U64_INIT_VAL - total_count == odp_atomic_load_u64(&global_mem->a64u));
+}
+
+static void atomic_test_atomic_sub(void)
+{
+ test_atomic_functional(test_atomic_sub_thread, test_atomic_validate_sub);
+}
+
+static void atomic_test_atomic_fetch_inc(void)
+{
+ test_atomic_functional(test_atomic_fetch_inc_thread, test_atomic_validate_inc);
+}
+
+static void atomic_test_atomic_fetch_dec(void)
+{
+ test_atomic_functional(test_atomic_fetch_dec_thread, test_atomic_validate_dec);
+}
+
+static void atomic_test_atomic_fetch_add(void)
+{
+ test_atomic_functional(test_atomic_fetch_add_thread, test_atomic_validate_add);
+}
+
+static void atomic_test_atomic_fetch_sub(void)
+{
+ test_atomic_functional(test_atomic_fetch_sub_thread, test_atomic_validate_sub);
+}
+
+static void test_atomic_validate_max(void)
+{
+ const uint64_t total_count = CNT * global_mem->g_num_threads - 1;
+ /* In a long test, counter may overflow, in which case max is saturated at UINT32_MAX. */
+ const uint32_t a32u_max = ODPH_MIN(U32_INIT_VAL + total_count, UINT32_MAX);
+
+ CU_ASSERT(a32u_max == odp_atomic_load_u32(&global_mem->a32u_max));
+ CU_ASSERT(U64_INIT_VAL + total_count == odp_atomic_load_u64(&global_mem->a64u_max));
+}
+
+static void atomic_test_atomic_max(void)
+{
+ test_atomic_functional(test_atomic_max_thread, test_atomic_validate_max);
+}
+
+static void test_atomic_validate_min(void)
+{
+ const uint64_t total_count = CNT * global_mem->g_num_threads - 1;
+ /* In a long test, counter may underflow, in which case min is saturated at 0. */
+ const uint32_t a32u_min = ODPH_MAX((int64_t)U32_INIT_VAL - (int64_t)total_count, 0);
+
+ CU_ASSERT(a32u_min == odp_atomic_load_u32(&global_mem->a32u_min));
+ CU_ASSERT(U64_INIT_VAL - total_count == odp_atomic_load_u64(&global_mem->a64u_min));
+}
+
+static void atomic_test_atomic_min(void)
+{
+ test_atomic_functional(test_atomic_min_thread, test_atomic_validate_min);
+}
+
+static void test_atomic_validate_cas_128(void)
+{
+ odp_u128_t a128u = odp_atomic_load_u128(&global_mem->a128u);
+ const uint64_t iterations = a128u.u64[0] - a128u.u64[1];
+
+ CU_ASSERT(iterations == CNT * global_mem->g_num_threads);
+}
+
+static void test_atomic_validate_cas(void)
+{
+ test_atomic_validate_inc();
+ test_atomic_validate_cas_128();
+}
+
+static void atomic_test_atomic_cas(void)
+{
+ test_atomic_functional(test_atomic_cas_thread, test_atomic_validate_cas);
+}
+
+static void atomic_test_atomic_cas_acq(void)
+{
+ test_atomic_functional(test_atomic_cas_acq_thread, test_atomic_validate_cas);
+}
+
+static void atomic_test_atomic_cas_rel(void)
+{
+ test_atomic_functional(test_atomic_cas_rel_thread, test_atomic_validate_cas);
+}
+
+static void atomic_test_atomic_cas_acq_rel(void)
+{
+ test_atomic_functional(test_atomic_cas_acq_rel_thread, test_atomic_validate_cas);
+}
+
+static void atomic_test_atomic_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_inc_dec_thread, test_atomic_validate_init_val);
+}
+
+static void atomic_test_atomic_add_sub(void)
+{
+ test_atomic_functional(test_atomic_add_sub_thread, test_atomic_validate_init_val);
+}
+
+static void atomic_test_atomic_fetch_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_fetch_inc_dec_thread, test_atomic_validate_init_val);
+}
+
+static void atomic_test_atomic_fetch_add_sub(void)
+{
+ test_atomic_functional(test_atomic_fetch_add_sub_thread, test_atomic_validate_init_val);
+}
+
+static void atomic_test_atomic_inc_add(void)
+{
+ test_atomic_functional(test_atomic_inc_add_thread, test_atomic_validate_inc_add);
+}
+
+static void atomic_test_atomic_dec_sub(void)
+{
+ test_atomic_functional(test_atomic_dec_sub_thread, test_atomic_validate_dec_sub);
+}
+
+static void atomic_test_atomic_max_min(void)
+{
+ test_atomic_functional(test_atomic_max_min_thread, test_atomic_validate_max_min);
+}
+
+static void atomic_test_atomic_cas_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_cas_inc_dec_thread, test_atomic_validate_cas_inc_dec);
+}
+
+static void atomic_test_atomic_xchg(void)
+{
+ test_atomic_functional(test_atomic_xchg_thread, test_atomic_validate_xchg);
+}
+
+static void atomic_test_atomic_non_relaxed(void)
+{
+ test_atomic_functional(test_atomic_non_relaxed_thread,
+ test_atomic_validate_non_relaxed);
+}
+
+static void atomic_test_atomic_op_lock_free(void)
+{
+ test_atomic_op_lock_free_set();
+ test_atomic_op_lock_free_64();
+ test_atomic_op_lock_free_128();
+}
+
+odp_testinfo_t atomic_suite_atomic[] = {
+ ODP_TEST_INFO(atomic_test_atomic_init),
+ ODP_TEST_INFO(atomic_test_atomic_inc),
+ ODP_TEST_INFO(atomic_test_atomic_dec),
+ ODP_TEST_INFO(atomic_test_atomic_add),
+ ODP_TEST_INFO(atomic_test_atomic_sub),
+ ODP_TEST_INFO(atomic_test_atomic_fetch_inc),
+ ODP_TEST_INFO(atomic_test_atomic_fetch_dec),
+ ODP_TEST_INFO(atomic_test_atomic_fetch_add),
+ ODP_TEST_INFO(atomic_test_atomic_fetch_sub),
+ ODP_TEST_INFO(atomic_test_atomic_max),
+ ODP_TEST_INFO(atomic_test_atomic_min),
+ ODP_TEST_INFO(atomic_test_atomic_cas),
+ ODP_TEST_INFO(atomic_test_atomic_cas_acq),
+ ODP_TEST_INFO(atomic_test_atomic_cas_rel),
+ ODP_TEST_INFO(atomic_test_atomic_cas_acq_rel),
+ ODP_TEST_INFO(atomic_test_atomic_inc_dec),
+ ODP_TEST_INFO(atomic_test_atomic_add_sub),
+ ODP_TEST_INFO(atomic_test_atomic_fetch_inc_dec),
+ ODP_TEST_INFO(atomic_test_atomic_fetch_add_sub),
+ ODP_TEST_INFO(atomic_test_atomic_inc_add),
+ ODP_TEST_INFO(atomic_test_atomic_dec_sub),
+ ODP_TEST_INFO(atomic_test_atomic_max_min),
+ ODP_TEST_INFO(atomic_test_atomic_cas_inc_dec),
+ ODP_TEST_INFO(atomic_test_atomic_xchg),
+ ODP_TEST_INFO(atomic_test_atomic_non_relaxed),
+ ODP_TEST_INFO(atomic_test_atomic_op_lock_free),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t atomic_suites[] = {
+ {"atomic", NULL, NULL,
+ atomic_suite_atomic},
+ ODP_SUITE_INFO_NULL
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ odp_cunit_register_global_init(atomic_init);
+ odp_cunit_register_global_term(atomic_term);
+
+ ret = odp_cunit_register(atomic_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/barrier/.gitignore b/test/validation/api/barrier/.gitignore
index 2e0ee7ade..2e0ee7ade 100644
--- a/test/common_plat/validation/api/barrier/.gitignore
+++ b/test/validation/api/barrier/.gitignore
diff --git a/test/validation/api/barrier/Makefile.am b/test/validation/api/barrier/Makefile.am
new file mode 100644
index 000000000..f5f751b88
--- /dev/null
+++ b/test/validation/api/barrier/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = barrier_main
+barrier_main_SOURCES = barrier.c
diff --git a/test/common_plat/validation/api/barrier/barrier.c b/test/validation/api/barrier/barrier.c
index 79ee82b3b..7dc9a44c6 100644
--- a/test/common_plat/validation/api/barrier/barrier.c
+++ b/test/validation/api/barrier/barrier.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2014, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -6,12 +7,12 @@
#include <malloc.h>
#include <odp_api.h>
-#include <CUnit/Basic.h>
+#include <odp/helper/odph_api.h>
#include <odp_cunit_common.h>
#include <unistd.h>
-#include "barrier.h"
#define VERBOSE 0
+#define MAX_WORKERS 32
#define MAX_ITERATIONS 1000
#define BARRIER_ITERATIONS 64
@@ -276,12 +277,15 @@ static void barrier_test_init(void)
}
/* Barrier tests */
-void barrier_test_memory_barrier(void)
+static void barrier_test_memory_barrier(void)
{
volatile int a = 0;
volatile int b = 0;
volatile int c = 0;
volatile int d = 0;
+ volatile int e = 0;
+ volatile int f = 0;
+ volatile int g = 0;
/* Call all memory barriers to verify that those are implemented */
a = 1;
@@ -291,58 +295,45 @@ void barrier_test_memory_barrier(void)
c = 1;
odp_mb_full();
d = 1;
+ odp_mb_sync();
+ e = 1;
+ odp_mb_sync_load();
+ f = 1;
+ odp_mb_sync_store();
+ g = 1;
/* Avoid "variable set but not used" warning */
- temp_result = a + b + c + d;
+ temp_result = a + b + c + d + e + f + g;
}
-void barrier_test_no_barrier_functional(void)
-{
- pthrd_arg arg;
-
- arg.numthrds = global_mem->g_num_threads;
- barrier_test_init();
- odp_cunit_thread_create(no_barrier_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
-}
-
-void barrier_test_barrier_functional(void)
-{
- pthrd_arg arg;
-
- arg.numthrds = global_mem->g_num_threads;
- barrier_test_init();
- odp_cunit_thread_create(barrier_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
-}
-
-odp_testinfo_t barrier_suite_barrier[] = {
- ODP_TEST_INFO(barrier_test_memory_barrier),
- ODP_TEST_INFO(barrier_test_no_barrier_functional),
- ODP_TEST_INFO(barrier_test_barrier_functional),
- ODP_TEST_INFO_NULL
-};
-
-int barrier_init(odp_instance_t *inst)
+static int barrier_init(odp_instance_t *inst)
{
uint32_t workers_count, max_threads;
int ret = 0;
- odp_cpumask_t mask;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("odph_options() failed\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
- if (0 != odp_init_global(inst, NULL, NULL)) {
- fprintf(stderr, "error: odp_init_global() failed.\n");
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
+ ODPH_ERR("odp_init_global() failed\n");
return -1;
}
if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
- fprintf(stderr, "error: odp_init_local() failed.\n");
+ ODPH_ERR("odp_init_local() failed\n");
return -1;
}
global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
- sizeof(global_shared_mem_t), 64,
- ODP_SHM_SW_ONLY);
+ sizeof(global_shared_mem_t), 64, 0);
if (ODP_SHM_INVALID == global_shm) {
- fprintf(stderr, "Unable reserve memory for global_shm\n");
+ ODPH_ERR("Unable to reserve memory for global_shm\n");
return -1;
}
@@ -353,7 +344,7 @@ int barrier_init(odp_instance_t *inst)
global_mem->g_iterations = MAX_ITERATIONS;
global_mem->g_verbose = VERBOSE;
- workers_count = odp_cpumask_default_worker(&mask, 0);
+ workers_count = odp_cpumask_default_worker(NULL, 0);
max_threads = (workers_count >= MAX_WORKERS) ?
MAX_WORKERS : workers_count;
@@ -372,41 +363,88 @@ int barrier_init(odp_instance_t *inst)
return ret;
}
-int barrier_term(odp_instance_t inst)
+static int barrier_term(odp_instance_t inst)
{
odp_shm_t shm;
shm = odp_shm_lookup(GLOBAL_SHM_NAME);
if (0 != odp_shm_free(shm)) {
- fprintf(stderr, "error: odp_shm_free() failed.\n");
+ ODPH_ERR("odp_shm_free() failed\n");
return -1;
}
if (0 != odp_term_local()) {
- fprintf(stderr, "error: odp_term_local() failed.\n");
+ ODPH_ERR("odp_term_local() failed\n");
return -1;
}
if (0 != odp_term_global(inst)) {
- fprintf(stderr, "error: odp_term_global() failed.\n");
+ ODPH_ERR("odp_term_global() failed\n");
return -1;
}
return 0;
}
+static void barrier_single_thread(void)
+{
+ odp_barrier_t barrier;
+
+ odp_barrier_init(&barrier, 1);
+
+ printf(" Calling wait...");
+
+ odp_barrier_wait(&barrier);
+
+ printf(" 1");
+
+ odp_barrier_wait(&barrier);
+
+ printf(" 2");
+
+ odp_barrier_wait(&barrier);
+
+ printf(" 3. ");
+}
+
+static void barrier_test_no_barrier_functional(void)
+{
+ int num = global_mem->g_num_threads;
+
+ barrier_test_init();
+ odp_cunit_thread_create(num, no_barrier_functional_test, NULL, 0, 0);
+ odp_cunit_thread_join(num);
+}
+
+static void barrier_test_barrier_functional(void)
+{
+ int num = global_mem->g_num_threads;
+
+ barrier_test_init();
+ odp_cunit_thread_create(num, barrier_functional_test, NULL, 0, 0);
+ odp_cunit_thread_join(num);
+}
+
+odp_testinfo_t barrier_suite_barrier[] = {
+ ODP_TEST_INFO(barrier_test_memory_barrier),
+ ODP_TEST_INFO(barrier_single_thread),
+ ODP_TEST_INFO(barrier_test_no_barrier_functional),
+ ODP_TEST_INFO(barrier_test_barrier_functional),
+ ODP_TEST_INFO_NULL
+};
+
odp_suiteinfo_t barrier_suites[] = {
{"barrier", NULL, NULL,
barrier_suite_barrier},
ODP_SUITE_INFO_NULL
};
-int barrier_main(int argc, char *argv[])
+int main(int argc, char *argv[])
{
int ret;
/* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
+ if (odp_cunit_parse_options(&argc, argv))
return -1;
odp_cunit_register_global_init(barrier_init);
diff --git a/test/common_plat/validation/api/buffer/.gitignore b/test/validation/api/buffer/.gitignore
index 0e8ac15c1..0e8ac15c1 100644
--- a/test/common_plat/validation/api/buffer/.gitignore
+++ b/test/validation/api/buffer/.gitignore
diff --git a/test/validation/api/buffer/Makefile.am b/test/validation/api/buffer/Makefile.am
new file mode 100644
index 000000000..f459010c3
--- /dev/null
+++ b/test/validation/api/buffer/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = buffer_main
+buffer_main_SOURCES = buffer.c
diff --git a/test/validation/api/buffer/buffer.c b/test/validation/api/buffer/buffer.c
new file mode 100644
index 000000000..2a79ed27e
--- /dev/null
+++ b/test/validation/api/buffer/buffer.c
@@ -0,0 +1,610 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
+ * Copyright (c) 2022, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_debug.h>
+#include "odp_cunit_common.h"
+
+#define BUF_ALIGN ODP_CACHE_LINE_SIZE
+#define BUF_SIZE 1500
+#define BUF_NUM 100
+#define BURST 8
+
+static odp_pool_capability_t pool_capa;
+static odp_pool_param_t default_param;
+
+static int buffer_suite_init(void)
+{
+ uint32_t size, num, align;
+
+ if (odp_pool_capability(&pool_capa)) {
+ ODPH_ERR("Pool capability failed\n");
+ return -1;
+ }
+
+ size = BUF_SIZE;
+ if (pool_capa.buf.max_size && size > pool_capa.buf.max_size)
+ size = pool_capa.buf.max_size;
+
+ num = BUF_NUM;
+ if (pool_capa.buf.max_num && num > pool_capa.buf.max_num)
+ num = pool_capa.buf.max_num;
+
+ align = BUF_ALIGN;
+ if (align > pool_capa.buf.max_align)
+ align = pool_capa.buf.max_align;
+
+ odp_pool_param_init(&default_param);
+ default_param.type = ODP_POOL_BUFFER;
+ default_param.buf.size = size;
+ default_param.buf.num = num;
+ default_param.buf.align = align;
+
+ printf("Default buffer pool\n");
+ printf(" size %u\n", size);
+ printf(" num %u\n", num);
+ printf(" align %u\n\n", align);
+
+ return 0;
+}
+
+static void test_pool_alloc_free(const odp_pool_param_t *param)
+{
+ odp_pool_t pool;
+ odp_event_t ev;
+ uint32_t i;
+ uint32_t num_buf = 0;
+ void *addr;
+ odp_event_subtype_t subtype;
+ uint32_t num = param->buf.num;
+ uint32_t size = param->buf.size;
+ uint32_t align = param->buf.align;
+
+ odp_buffer_t buffer[num];
+ odp_bool_t wrong_type = false, wrong_subtype = false;
+ odp_bool_t wrong_size = false, wrong_align = false;
+
+ pool = odp_pool_create("default pool", param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_pool_print(pool);
+
+ for (i = 0; i < num; i++) {
+ odp_buffer_t buf;
+
+ buffer[i] = odp_buffer_alloc(pool);
+
+ if (buffer[i] == ODP_BUFFER_INVALID)
+ break;
+ num_buf++;
+
+ CU_ASSERT(odp_buffer_is_valid(buffer[i]) == 1)
+
+ CU_ASSERT(odp_buffer_pool(buffer[i]) == pool);
+
+ ev = odp_buffer_to_event(buffer[i]);
+ CU_ASSERT(odp_buffer_from_event(ev) == buffer[i]);
+
+ odp_buffer_to_event_multi(&buffer[i], &ev, 1);
+ odp_buffer_from_event_multi(&buf, &ev, 1);
+ CU_ASSERT(buf == buffer[i]);
+
+ CU_ASSERT(odp_event_pool(ev) == pool);
+
+ if (odp_event_type(ev) != ODP_EVENT_BUFFER)
+ wrong_type = true;
+ if (odp_event_subtype(ev) != ODP_EVENT_NO_SUBTYPE)
+ wrong_subtype = true;
+ if (odp_event_types(ev, &subtype) != ODP_EVENT_BUFFER)
+ wrong_type = true;
+ if (subtype != ODP_EVENT_NO_SUBTYPE)
+ wrong_subtype = true;
+ if (odp_buffer_size(buffer[i]) < size)
+ wrong_size = true;
+
+ addr = odp_buffer_addr(buffer[i]);
+
+ if (((uintptr_t)addr % align) != 0)
+ wrong_align = true;
+
+ if (wrong_type || wrong_subtype || wrong_size || wrong_align) {
+ ODPH_ERR("Buffer has error\n");
+ odp_buffer_print(buffer[i]);
+ break;
+ }
+
+ /* Write buffer data */
+ memset(addr, 0, size);
+ }
+
+ CU_ASSERT(i == num);
+ CU_ASSERT(!wrong_type);
+ CU_ASSERT(!wrong_subtype);
+ CU_ASSERT(!wrong_size);
+ CU_ASSERT(!wrong_align);
+
+ for (i = 0; i < num_buf; i++)
+ odp_buffer_free(buffer[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void test_pool_alloc_free_multi(const odp_pool_param_t *param)
+{
+ odp_pool_t pool;
+ uint32_t i, num_buf;
+ int ret;
+ odp_event_t ev;
+ void *addr;
+ odp_event_subtype_t subtype;
+ uint32_t num = param->buf.num;
+ uint32_t size = param->buf.size;
+ uint32_t align = param->buf.align;
+
+ odp_buffer_t buffer[num + BURST];
+ odp_bool_t wrong_type = false, wrong_subtype = false;
+ odp_bool_t wrong_size = false, wrong_align = false;
+
+ pool = odp_pool_create("default pool", param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ ret = 0;
+ for (i = 0; i < num; i += ret) {
+ odp_buffer_t buf[BURST];
+ odp_event_t event[BURST];
+
+ ret = odp_buffer_alloc_multi(pool, &buffer[i], BURST);
+ CU_ASSERT(ret >= 0);
+ CU_ASSERT(ret <= BURST);
+
+ if (ret <= 0)
+ break;
+
+ odp_buffer_to_event_multi(&buffer[i], event, ret);
+ odp_buffer_from_event_multi(buf, event, ret);
+ for (int j = 0; j < ret; j++)
+ CU_ASSERT(buf[j] == buffer[i + j]);
+ }
+
+ num_buf = i;
+ CU_ASSERT(num_buf == num);
+
+ for (i = 0; i < num_buf; i++) {
+ if (buffer[i] == ODP_BUFFER_INVALID)
+ break;
+
+ CU_ASSERT(odp_buffer_is_valid(buffer[i]) == 1)
+
+ CU_ASSERT(odp_buffer_pool(buffer[i]) == pool);
+
+ ev = odp_buffer_to_event(buffer[i]);
+ CU_ASSERT(odp_buffer_from_event(ev) == buffer[i]);
+
+ if (odp_event_type(ev) != ODP_EVENT_BUFFER)
+ wrong_type = true;
+ if (odp_event_subtype(ev) != ODP_EVENT_NO_SUBTYPE)
+ wrong_subtype = true;
+ if (odp_event_types(ev, &subtype) != ODP_EVENT_BUFFER)
+ wrong_type = true;
+ if (subtype != ODP_EVENT_NO_SUBTYPE)
+ wrong_subtype = true;
+ if (odp_buffer_size(buffer[i]) < size)
+ wrong_size = true;
+
+ addr = odp_buffer_addr(buffer[i]);
+
+ if (((uintptr_t)addr % align) != 0)
+ wrong_align = true;
+
+ if (wrong_type || wrong_subtype || wrong_size || wrong_align) {
+ ODPH_ERR("Buffer has error\n");
+ odp_buffer_print(buffer[i]);
+ break;
+ }
+
+ /* Write buffer data */
+ memset(addr, 0, size);
+ }
+
+ CU_ASSERT(i == num_buf);
+ CU_ASSERT(!wrong_type);
+ CU_ASSERT(!wrong_subtype);
+ CU_ASSERT(!wrong_size);
+ CU_ASSERT(!wrong_align);
+
+ if (num_buf)
+ odp_buffer_free_multi(buffer, num_buf);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void test_pool_single_pool(odp_pool_param_t *param)
+{
+ odp_pool_t pool;
+ odp_buffer_t buffer;
+
+ param->buf.num = 1;
+
+ pool = odp_pool_create("pool 0", param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_pool_print(pool);
+
+ /* Allocate the only buffer from the pool */
+ buffer = odp_buffer_alloc(pool);
+ CU_ASSERT(buffer != ODP_BUFFER_INVALID);
+
+ /* Pool should be empty */
+ CU_ASSERT(odp_buffer_alloc(pool) == ODP_BUFFER_INVALID)
+
+ if (buffer != ODP_BUFFER_INVALID) {
+ odp_event_t ev = odp_buffer_to_event(buffer);
+
+ CU_ASSERT(odp_buffer_to_u64(buffer) !=
+ odp_buffer_to_u64(ODP_BUFFER_INVALID));
+ CU_ASSERT(odp_event_to_u64(ev) !=
+ odp_event_to_u64(ODP_EVENT_INVALID));
+ CU_ASSERT(odp_buffer_pool(buffer) == pool);
+ odp_buffer_print(buffer);
+ odp_buffer_free(buffer);
+ }
+
+ /* Check that the buffer was returned back to the pool */
+ buffer = odp_buffer_alloc(pool);
+ CU_ASSERT(buffer != ODP_BUFFER_INVALID);
+
+ if (buffer != ODP_BUFFER_INVALID)
+ odp_buffer_free(buffer);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void test_pool_two_pools(odp_pool_param_t *param)
+{
+ odp_pool_t pool0, pool1;
+ odp_buffer_t buf, buffer[2];
+ int num = 0;
+
+ if (pool_capa.buf.max_pools < 2)
+ return;
+
+ param->buf.num = 1;
+
+ pool0 = odp_pool_create("pool 0", param);
+ CU_ASSERT_FATAL(pool0 != ODP_POOL_INVALID);
+
+ pool1 = odp_pool_create("pool 1", param);
+ CU_ASSERT_FATAL(pool1 != ODP_POOL_INVALID);
+
+ buffer[0] = odp_buffer_alloc(pool0);
+ CU_ASSERT(buffer[0] != ODP_BUFFER_INVALID);
+
+ buffer[1] = odp_buffer_alloc(pool1);
+ CU_ASSERT(buffer[1] != ODP_BUFFER_INVALID);
+
+ if (buffer[0] != ODP_BUFFER_INVALID) {
+ CU_ASSERT(odp_buffer_pool(buffer[0]) == pool0);
+ num++;
+ }
+ if (buffer[1] != ODP_BUFFER_INVALID) {
+ CU_ASSERT(odp_buffer_pool(buffer[1]) == pool1);
+ num++;
+ }
+
+ CU_ASSERT(odp_buffer_alloc(pool0) == ODP_BUFFER_INVALID);
+ CU_ASSERT(odp_buffer_alloc(pool1) == ODP_BUFFER_INVALID);
+
+ /* free buffers from two different pools */
+ if (num)
+ odp_buffer_free_multi(buffer, num);
+
+ /* Check that buffers were returned back into pools */
+ buf = odp_buffer_alloc(pool0);
+ CU_ASSERT(buf != ODP_BUFFER_INVALID);
+
+ if (buf != ODP_BUFFER_INVALID) {
+ CU_ASSERT(odp_buffer_pool(buf) == pool0);
+ odp_buffer_free(buf);
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool0) == 0);
+
+ buf = odp_buffer_alloc(pool1);
+ CU_ASSERT(buf != ODP_BUFFER_INVALID);
+
+ if (buf != ODP_BUFFER_INVALID) {
+ CU_ASSERT(odp_buffer_pool(buf) == pool1);
+ odp_buffer_free(buf);
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool1) == 0);
+}
+
+static void test_pool_max_pools(odp_pool_param_t *param)
+{
+ uint32_t i, num_pool, num_buf;
+ void *addr;
+ odp_event_t ev;
+ uint32_t max_pools = pool_capa.buf.max_pools;
+ uint32_t size = param->buf.size;
+ uint32_t align = param->buf.align;
+ odp_pool_t pool[max_pools];
+ odp_buffer_t buffer[max_pools];
+
+ CU_ASSERT_FATAL(max_pools != 0);
+
+ printf("\n Creating %u pools\n", max_pools);
+
+ param->buf.num = 1;
+
+ for (i = 0; i < max_pools; i++) {
+ pool[i] = odp_pool_create(NULL, param);
+
+ if (pool[i] == ODP_POOL_INVALID)
+ break;
+ }
+
+ num_pool = i;
+
+ CU_ASSERT(num_pool == max_pools);
+ if (num_pool != max_pools)
+ ODPH_ERR("Created only %u pools\n", num_pool);
+
+ for (i = 0; i < num_pool; i++) {
+ buffer[i] = odp_buffer_alloc(pool[i]);
+
+ if (buffer[i] == ODP_BUFFER_INVALID)
+ break;
+
+ CU_ASSERT_FATAL(odp_buffer_pool(buffer[i]) == pool[i]);
+
+ ev = odp_buffer_to_event(buffer[i]);
+ CU_ASSERT(odp_buffer_from_event(ev) == buffer[i]);
+ CU_ASSERT(odp_event_type(ev) == ODP_EVENT_BUFFER);
+
+ addr = odp_buffer_addr(buffer[i]);
+ CU_ASSERT(((uintptr_t)addr % align) == 0);
+
+ /* Write buffer data */
+ memset(addr, 0, size);
+ }
+
+ num_buf = i;
+ CU_ASSERT(num_buf == num_pool);
+
+ if (num_buf)
+ odp_buffer_free_multi(buffer, num_buf);
+
+ for (i = 0; i < num_pool; i++)
+ CU_ASSERT(odp_pool_destroy(pool[i]) == 0);
+}
+
+static void buffer_test_pool_alloc_free(void)
+{
+ test_pool_alloc_free(&default_param);
+}
+
+static void buffer_test_pool_alloc_free_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_alloc_free(&param);
+}
+
+static void buffer_test_pool_alloc_free_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_alloc_free(&param);
+}
+
+static void buffer_test_pool_alloc_free_multi(void)
+{
+ test_pool_alloc_free_multi(&default_param);
+}
+
+static void buffer_test_pool_alloc_free_multi_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_alloc_free_multi(&param);
+}
+
+static void buffer_test_pool_alloc_free_multi_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_alloc_free_multi(&param);
+}
+
+static void buffer_test_pool_single_pool(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ test_pool_single_pool(&param);
+}
+
+static void buffer_test_pool_single_pool_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_single_pool(&param);
+}
+
+static void buffer_test_pool_single_pool_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_single_pool(&param);
+}
+
+static void buffer_test_pool_two_pools(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ test_pool_two_pools(&param);
+}
+
+static void buffer_test_pool_two_pools_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_two_pools(&param);
+}
+
+static void buffer_test_pool_two_pools_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_two_pools(&param);
+}
+
+static void buffer_test_pool_max_pools(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ test_pool_max_pools(&param);
+}
+
+static void buffer_test_pool_max_pools_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_max_pools(&param);
+}
+
+static void buffer_test_pool_max_pools_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_max_pools(&param);
+}
+
+static void buffer_test_user_area(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ uint32_t i, num;
+ void *addr;
+ void *prev = NULL;
+ uint32_t num_alloc = 0;
+ uint32_t size = 1024;
+ const uint32_t max_size = pool_capa.buf.max_uarea_size;
+
+ if (max_size == 0) {
+ ODPH_DBG("Buffer user area not supported\n");
+ return;
+ }
+
+ if (size > max_size)
+ size = max_size;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.uarea_size = size;
+
+ num = param.buf.num;
+
+ odp_buffer_t buffer[num];
+
+ pool = odp_pool_create("test_user_area", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < num; i++) {
+ odp_event_t ev;
+ int flag = 0;
+
+ buffer[i] = odp_buffer_alloc(pool);
+
+ if (buffer[i] == ODP_BUFFER_INVALID)
+ break;
+ num_alloc++;
+
+ addr = odp_buffer_user_area(buffer[i]);
+ CU_ASSERT_FATAL(addr != NULL);
+ CU_ASSERT(prev != addr);
+
+ ev = odp_buffer_to_event(buffer[i]);
+ CU_ASSERT(odp_event_user_area(ev) == addr);
+ CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == addr);
+ CU_ASSERT(flag < 0);
+
+ prev = addr;
+ memset(addr, 0, size);
+ }
+
+ CU_ASSERT(i == num);
+
+ if (num_alloc)
+ odp_buffer_free_multi(buffer, num_alloc);
+
+ CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
+}
+
+odp_testinfo_t buffer_suite[] = {
+ ODP_TEST_INFO(buffer_test_pool_alloc_free),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_max_cache),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_multi),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_multi_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_multi_max_cache),
+ ODP_TEST_INFO(buffer_test_pool_single_pool),
+ ODP_TEST_INFO(buffer_test_pool_single_pool_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_single_pool_max_cache),
+ ODP_TEST_INFO(buffer_test_pool_two_pools),
+ ODP_TEST_INFO(buffer_test_pool_two_pools_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_two_pools_max_cache),
+ ODP_TEST_INFO(buffer_test_pool_max_pools),
+ ODP_TEST_INFO(buffer_test_pool_max_pools_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_max_pools_max_cache),
+ ODP_TEST_INFO(buffer_test_user_area),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t buffer_suites[] = {
+ {"buffer tests", buffer_suite_init, NULL, buffer_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(buffer_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/api/byteorder/.gitignore b/test/validation/api/byteorder/.gitignore
new file mode 100644
index 000000000..0b0d91053
--- /dev/null
+++ b/test/validation/api/byteorder/.gitignore
@@ -0,0 +1 @@
+byteorder_main
diff --git a/test/validation/api/byteorder/Makefile.am b/test/validation/api/byteorder/Makefile.am
new file mode 100644
index 000000000..186f4ecbc
--- /dev/null
+++ b/test/validation/api/byteorder/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = byteorder_main
+byteorder_main_SOURCES = byteorder.c
diff --git a/test/validation/api/byteorder/byteorder.c b/test/validation/api/byteorder/byteorder.c
new file mode 100644
index 000000000..087dfce95
--- /dev/null
+++ b/test/validation/api/byteorder/byteorder.c
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 Nokia
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+
+#include <stdint.h>
+
+static void test_defines(void)
+{
+ /* Endianness */
+ CU_ASSERT(ODP_BIG_ENDIAN || ODP_LITTLE_ENDIAN);
+
+ if (ODP_BIG_ENDIAN) {
+ CU_ASSERT(ODP_BYTE_ORDER == ODP_BIG_ENDIAN);
+ CU_ASSERT(!ODP_LITTLE_ENDIAN);
+ }
+
+ if (ODP_LITTLE_ENDIAN) {
+ CU_ASSERT(ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN);
+ CU_ASSERT(!ODP_BIG_ENDIAN);
+ }
+
+ /* Bitfield endianness */
+ CU_ASSERT(ODP_BIG_ENDIAN_BITFIELD || ODP_LITTLE_ENDIAN_BITFIELD);
+
+ if (ODP_BIG_ENDIAN_BITFIELD) {
+ CU_ASSERT(ODP_BITFIELD_ORDER == ODP_BIG_ENDIAN_BITFIELD);
+ CU_ASSERT(!ODP_LITTLE_ENDIAN_BITFIELD);
+ }
+
+ if (ODP_LITTLE_ENDIAN_BITFIELD) {
+ CU_ASSERT(ODP_BITFIELD_ORDER == ODP_LITTLE_ENDIAN_BITFIELD);
+ CU_ASSERT(!ODP_BIG_ENDIAN_BITFIELD);
+ }
+}
+
+static void test_types(void)
+{
+ const uint16_t u16_val = 0x1234;
+ const uint32_t u32_val = 0x12345678;
+ const uint64_t u64_val = 0x1234567890123456;
+ const uint16_t u16_val_conv = 0x3412;
+ const uint32_t u32_val_conv = 0x78563412;
+ const uint64_t u64_val_conv = 0x5634129078563412;
+ odp_u16be_t be16 = odp_cpu_to_be_16(u16_val);
+ odp_u32be_t be32 = odp_cpu_to_be_32(u32_val);
+ odp_u64be_t be64 = odp_cpu_to_be_64(u64_val);
+ odp_u16le_t le16 = odp_cpu_to_le_16(u16_val);
+ odp_u32le_t le32 = odp_cpu_to_le_32(u32_val);
+ odp_u64le_t le64 = odp_cpu_to_le_64(u64_val);
+ odp_u16sum_t sum16 = u16_val;
+ odp_u32sum_t sum32 = u16_val;
+
+ CU_ASSERT(sum16 == sum32);
+
+ if (ODP_BIG_ENDIAN) {
+ CU_ASSERT(be16 == u16_val);
+ CU_ASSERT(be32 == u32_val);
+ CU_ASSERT(be64 == u64_val);
+ CU_ASSERT(le16 == u16_val_conv);
+ CU_ASSERT(le32 == u32_val_conv);
+ CU_ASSERT(le64 == u64_val_conv);
+ } else {
+ CU_ASSERT(le16 == u16_val);
+ CU_ASSERT(le32 == u32_val);
+ CU_ASSERT(le64 == u64_val);
+ CU_ASSERT(be16 == u16_val_conv);
+ CU_ASSERT(be32 == u32_val_conv);
+ CU_ASSERT(be64 == u64_val_conv);
+ }
+
+ CU_ASSERT(odp_be_to_cpu_16(be16) == u16_val);
+ CU_ASSERT(odp_be_to_cpu_32(be32) == u32_val);
+ CU_ASSERT(odp_be_to_cpu_64(be64) == u64_val);
+ CU_ASSERT(odp_le_to_cpu_16(le16) == u16_val);
+ CU_ASSERT(odp_le_to_cpu_32(le32) == u32_val);
+ CU_ASSERT(odp_le_to_cpu_64(le64) == u64_val);
+}
+
+odp_testinfo_t byteorder_suite[] = {
+ ODP_TEST_INFO(test_defines),
+ ODP_TEST_INFO(test_types),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t byteorder_suites[] = {
+ {"byteorder", NULL, NULL, byteorder_suite},
+ ODP_SUITE_INFO_NULL
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* Parse common options */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(byteorder_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/api/chksum/.gitignore b/test/validation/api/chksum/.gitignore
new file mode 100644
index 000000000..c69e8c470
--- /dev/null
+++ b/test/validation/api/chksum/.gitignore
@@ -0,0 +1 @@
+chksum_main
diff --git a/test/validation/api/chksum/Makefile.am b/test/validation/api/chksum/Makefile.am
new file mode 100644
index 000000000..349fdd641
--- /dev/null
+++ b/test/validation/api/chksum/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = chksum_main
+chksum_main_SOURCES = chksum.c
diff --git a/test/validation/api/chksum/chksum.c b/test/validation/api/chksum/chksum.c
new file mode 100644
index 000000000..0be418f3a
--- /dev/null
+++ b/test/validation/api/chksum/chksum.c
@@ -0,0 +1,454 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+
+#define NUM_IP_HDR 5
+#define IP_HDR_LEN 20
+
+#define NUM_UDP 4
+#define MAX_UDP_LEN 128
+
+static uint8_t ip_hdr_test_vect[NUM_IP_HDR][IP_HDR_LEN] ODP_ALIGNED(4) = {
+ { 0x45, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xAB, 0x33, 0xC0, 0xA8, 0x2C, 0xA2, 0xC0, 0xA8, 0x21, 0x99
+ },
+ { 0x45, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xAB, 0xCA, 0xC0, 0xA8, 0x2C, 0x5E, 0xC0, 0xA8, 0x21, 0x46
+ },
+ { 0x45, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xAB, 0x64, 0xC0, 0xA8, 0x2C, 0x20, 0xC0, 0xA8, 0x21, 0xEA
+ },
+ { 0x45, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xAB, 0x59, 0xC0, 0xA8, 0x2C, 0xD2, 0xC0, 0xA8, 0x21, 0x43
+ },
+ { 0x45, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xAC, 0x06, 0xC0, 0xA8, 0x2C, 0x5C, 0xC0, 0xA8, 0x21, 0x0C
+ }
+};
+
+struct udp_test_vect_s {
+ uint32_t len;
+
+ uint8_t data[MAX_UDP_LEN];
+};
+
+static struct udp_test_vect_s udp_test_vect[NUM_UDP] ODP_ALIGNED(4) = {
+ {.len = 38,
+ .data = { 0x00, 0x11, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x3F, 0x00, 0x3F, 0x00, 0x1A, 0xFF, 0x3C,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
+ 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0xC7, 0xBF
+ } },
+
+ {.len = 39,
+ .data = { 0x00, 0x11, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x20, 0x40, 0x09, 0x35, 0x00, 0x1B, 0xD6, 0x43,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
+ 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0xBF, 0xB7
+ } },
+
+ {.len = 59,
+ .data = { 0x00, 0x11, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x32, 0x5F, 0x01, 0x71, 0x00, 0x2F, 0xCB, 0xC0,
+ 0x09, 0x8B, 0x61, 0x3E, 0x3A, 0x7F, 0x30, 0x0F, 0x4D, 0xEE,
+ 0x2D, 0x7D, 0x11, 0xBB, 0xBB, 0x34, 0x0E, 0x9E, 0xC5, 0x3D,
+ 0xBB, 0x81, 0x9A, 0x7F, 0xF2, 0x2A, 0xFC, 0x85, 0xA0, 0x1B,
+ 0x73, 0x81, 0xC1, 0xB6, 0xE8, 0x91, 0x8C, 0xD8, 0x7F
+ } },
+
+ {.len = 109,
+ .data = { 0x00, 0x11, 0x00, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x32, 0x5F, 0x01, 0x71, 0x00, 0x61, 0xCB, 0x5C,
+ 0x61, 0xAD, 0xFD, 0xE7, 0x4E, 0x98, 0x69, 0x59, 0x12, 0x3F,
+ 0xDF, 0xF6, 0x79, 0x8B, 0xB3, 0x94, 0x94, 0x9F, 0x8C, 0x0B,
+ 0x67, 0xBA, 0xBA, 0x3C, 0xE2, 0x5F, 0xCA, 0x52, 0x13, 0xB4,
+ 0x57, 0x48, 0x99, 0x29, 0x23, 0xAC, 0x5C, 0x59, 0x66, 0xA2,
+ 0x7B, 0x35, 0x65, 0x2B, 0x86, 0x5F, 0x47, 0xA7, 0xEE, 0xD4,
+ 0x24, 0x99, 0xB9, 0xCE, 0x60, 0xAB, 0x7A, 0xE9, 0x37, 0xF2,
+ 0x81, 0x84, 0x98, 0x72, 0x4F, 0x6A, 0x37, 0xE5, 0x4D, 0xB2,
+ 0xDE, 0xB8, 0xBD, 0xE3, 0x03, 0x57, 0xF0, 0x5C, 0xA0, 0xAA,
+ 0xB9, 0xF3, 0x3F, 0xDF, 0x23, 0xDD, 0x54, 0x2F, 0xCE
+ } }
+};
+
+/* Correct checksum in network byte order is 0xF3, 0x96 */
+#define UDP_LONG_CHKSUM odp_be_to_cpu_16(0xF396)
+
+/* Number of padding bytes in the end of the array */
+#define UDP_LONG_PADDING 11
+
+/* Long UDP packet with pseudo header. Checksum field is set to zero.
+ * The array contains padding, so that a possible overrun is more likely
+ * detected (overrun bytes are not all zeros). */
+static uint8_t udp_test_vect_long[] ODP_ALIGNED(4) = {
+ 0x00, 0x11, 0x05, 0xED, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x3F, 0x00, 0x3F, 0x05, 0xED, 0x00, 0x00,
+ 0x0B, 0x2C, 0x9C, 0x06, 0x07, 0xF3, 0x51, 0x05, 0xF7, 0xA7,
+ 0xF4, 0x24, 0xBB, 0x2F, 0x19, 0xBB, 0x23, 0xB4, 0x99, 0x50,
+ 0x69, 0x10, 0x34, 0xAD, 0xFF, 0x83, 0x7B, 0x36, 0x8B, 0xA8,
+ 0xEE, 0x7A, 0x31, 0xA8, 0x61, 0x08, 0x10, 0xAE, 0xA4, 0x84,
+ 0x0F, 0x9B, 0x62, 0xFA, 0xD9, 0xFA, 0x4A, 0x71, 0x26, 0x36,
+ 0x6D, 0xC4, 0x3D, 0x0A, 0xE7, 0xAD, 0xB6, 0x3A, 0xC2, 0x23,
+ 0x85, 0x81, 0x17, 0xE6, 0x34, 0xCA, 0x76, 0x58, 0x70, 0xA3,
+ 0x88, 0x8E, 0xC7, 0xEE, 0xF3, 0xA7, 0xB0, 0xD9, 0x7E, 0x5C,
+ 0xCC, 0x11, 0x76, 0xD9, 0x6B, 0x33, 0x50, 0xCB, 0x52, 0x84,
+ 0x8F, 0xBB, 0xBC, 0xE1, 0xE9, 0x9A, 0x9E, 0xF9, 0xA5, 0x9D,
+ 0x2F, 0xB9, 0x47, 0x4B, 0xA8, 0x08, 0x6F, 0xD4, 0x48, 0xB5,
+ 0xFF, 0xA4, 0x9C, 0xAD, 0x7C, 0x38, 0xFD, 0x72, 0xB7, 0x30,
+ 0x5A, 0xC8, 0xAC, 0xA6, 0x38, 0xB2, 0xAA, 0xBF, 0xF6, 0x9C,
+ 0xB4, 0x7F, 0x56, 0xDA, 0x28, 0xE2, 0x39, 0x51, 0x9E, 0x28,
+ 0xAA, 0x74, 0x13, 0x70, 0x1E, 0x73, 0x47, 0x88, 0xFA, 0xAE,
+ 0x63, 0x04, 0x56, 0x70, 0xDF, 0x8D, 0x01, 0xDA, 0xA3, 0x7C,
+ 0xC5, 0x1D, 0x6C, 0xA8, 0xEC, 0xED, 0x72, 0x7B, 0x02, 0x32,
+ 0x48, 0x4C, 0xDD, 0x00, 0x03, 0xE8, 0xDB, 0xC9, 0x9E, 0x72,
+ 0x6B, 0xD1, 0x9D, 0x6E, 0xCF, 0xE3, 0xF1, 0x18, 0x20, 0x43,
+ 0x1C, 0xFA, 0x92, 0xE3, 0x95, 0xBD, 0xF9, 0xD9, 0x6E, 0x40,
+ 0x8C, 0x11, 0x34, 0xE7, 0xE9, 0x3F, 0x17, 0x36, 0x5A, 0x18,
+ 0x4A, 0x9E, 0x57, 0xC2, 0xAD, 0x96, 0x4C, 0x89, 0xAA, 0xEE,
+ 0x9C, 0xD3, 0x5C, 0x60, 0xE5, 0x56, 0xF7, 0x69, 0x86, 0x88,
+ 0x64, 0x57, 0x5E, 0x57, 0x2B, 0xA2, 0xD7, 0x1B, 0x96, 0x7F,
+ 0x23, 0xC9, 0x14, 0xB3, 0xC6, 0x69, 0xDB, 0xA5, 0x55, 0xCA,
+ 0xD1, 0xB8, 0x7D, 0x74, 0xAD, 0xD7, 0x46, 0xDE, 0x59, 0x52,
+ 0x99, 0xE1, 0x9B, 0xE8, 0x01, 0x6D, 0xF5, 0x25, 0xAE, 0x7B,
+ 0xCA, 0xEE, 0xBF, 0x42, 0xC1, 0x5A, 0xC1, 0xAE, 0x6F, 0xC3,
+ 0x72, 0x0E, 0x30, 0x1D, 0xCB, 0x0D, 0x55, 0x87, 0x3E, 0xE3,
+ 0x85, 0x20, 0x3D, 0xCC, 0x5D, 0x1C, 0xFB, 0xB9, 0x5A, 0x17,
+ 0x76, 0x46, 0xF9, 0xA8, 0xB5, 0xED, 0x1C, 0x3A, 0x4E, 0x79,
+ 0xB0, 0x17, 0x2C, 0xBD, 0x8D, 0xC6, 0x8F, 0x85, 0x9D, 0x97,
+ 0x54, 0xCD, 0x41, 0x7C, 0x77, 0x31, 0xF0, 0x1A, 0xD6, 0xA5,
+ 0x22, 0x20, 0x38, 0x66, 0x6C, 0xD8, 0x8D, 0x31, 0xEC, 0xFC,
+ 0x78, 0xB2, 0xE7, 0xCA, 0x0A, 0x3F, 0xB1, 0x5A, 0xD1, 0xC6,
+ 0xCD, 0x30, 0x11, 0x04, 0x56, 0x0E, 0x51, 0xF8, 0x9D, 0x0B,
+ 0x19, 0x98, 0x14, 0xE9, 0xFB, 0xC2, 0x6B, 0xA4, 0xD0, 0x75,
+ 0xF8, 0x47, 0x44, 0x41, 0x5C, 0x03, 0x95, 0xFC, 0x64, 0x82,
+ 0x97, 0xB7, 0x2D, 0x79, 0xFC, 0xF0, 0x8C, 0x7D, 0xD2, 0x0A,
+ 0xAC, 0x17, 0x4D, 0xFA, 0xF9, 0x1A, 0xC7, 0x42, 0x3D, 0x34,
+ 0x05, 0x23, 0x09, 0xC4, 0xBC, 0x0E, 0x57, 0xEB, 0x53, 0x5D,
+ 0x6D, 0xE6, 0xEB, 0x40, 0x05, 0x9B, 0x9D, 0xFA, 0xAA, 0x71,
+ 0x02, 0x4D, 0x65, 0x65, 0xF9, 0x32, 0x99, 0x5B, 0xA4, 0xCE,
+ 0x2C, 0xB1, 0xB4, 0xA2, 0xE7, 0x1A, 0xE7, 0x02, 0x20, 0xAA,
+ 0xCE, 0xC8, 0x96, 0x2D, 0xD4, 0x49, 0x9C, 0xBD, 0x7C, 0x88,
+ 0x7A, 0x94, 0xEA, 0xAA, 0x10, 0x1E, 0xA5, 0xAA, 0xBC, 0x52,
+ 0x9B, 0x4E, 0x7E, 0x43, 0x66, 0x5A, 0x5A, 0xF2, 0xCD, 0x03,
+ 0xFE, 0x67, 0x8E, 0xA6, 0xA5, 0x00, 0x5B, 0xBA, 0x3B, 0x08,
+ 0x22, 0x04, 0xC2, 0x8B, 0x91, 0x09, 0xF4, 0x69, 0xDA, 0xC9,
+ 0x2A, 0xAA, 0xB3, 0xAA, 0x7C, 0x11, 0xA1, 0xB3, 0x2A, 0xF4,
+ 0x77, 0xFA, 0x3B, 0x4B, 0x19, 0x60, 0x63, 0x06, 0x86, 0x7B,
+ 0x2A, 0xA4, 0x16, 0xD4, 0x4B, 0x01, 0x00, 0x53, 0x5A, 0x6F,
+ 0x1E, 0xF7, 0xAA, 0x09, 0xF5, 0xCA, 0x6E, 0x44, 0xF0, 0x15,
+ 0x1E, 0xC7, 0xEC, 0xD0, 0x1D, 0x7D, 0xF9, 0x5C, 0x98, 0xE8,
+ 0x5F, 0x75, 0xB1, 0xB8, 0xE0, 0x62, 0xDD, 0x9C, 0x3D, 0x6E,
+ 0x8A, 0x58, 0xFC, 0x9C, 0x06, 0x18, 0x67, 0x97, 0x9C, 0x03,
+ 0xD8, 0xE8, 0x00, 0x14, 0x34, 0x6B, 0xED, 0x25, 0xB6, 0x04,
+ 0x0C, 0x4D, 0xEE, 0x8E, 0x18, 0x6C, 0x09, 0x14, 0x40, 0x04,
+ 0x52, 0x44, 0xCC, 0x4B, 0xF9, 0x20, 0x04, 0x7E, 0x13, 0xF7,
+ 0x4D, 0x77, 0xE3, 0x94, 0x96, 0x82, 0x58, 0xB5, 0xF2, 0x6D,
+ 0xD6, 0xBF, 0x86, 0xDE, 0x3A, 0xAF, 0xA6, 0xE9, 0x18, 0x54,
+ 0x3B, 0xE2, 0x46, 0xBC, 0x68, 0x70, 0x9F, 0xEC, 0x6D, 0xE3,
+ 0x01, 0xD3, 0xCB, 0xC8, 0x98, 0x81, 0xA9, 0xBA, 0x5F, 0x95,
+ 0x76, 0x7B, 0xE3, 0xF4, 0xD0, 0x43, 0x4B, 0xC1, 0xA4, 0x57,
+ 0x95, 0x89, 0x97, 0xDE, 0x22, 0xBD, 0xA5, 0xF0, 0x75, 0x66,
+ 0x08, 0xF1, 0x38, 0x14, 0x5C, 0x1D, 0x7F, 0x17, 0x00, 0x63,
+ 0x51, 0xF2, 0xBF, 0x77, 0x65, 0x0D, 0xB0, 0x23, 0x29, 0xAA,
+ 0x5A, 0xDE, 0x08, 0x1A, 0x1C, 0x9F, 0xED, 0x31, 0xCD, 0xF3,
+ 0x03, 0xF7, 0x9F, 0x4E, 0xC0, 0xA0, 0x49, 0x9E, 0x21, 0xBF,
+ 0x65, 0x26, 0x37, 0xB6, 0x16, 0x8A, 0xE2, 0x71, 0xEF, 0x26,
+ 0xDD, 0x54, 0x10, 0xA4, 0xFA, 0x71, 0x92, 0xCE, 0xAF, 0xFF,
+ 0x26, 0xA0, 0xD2, 0x07, 0xF8, 0xA2, 0xA1, 0x61, 0xDA, 0x75,
+ 0x05, 0xDF, 0x9E, 0xAB, 0x18, 0xC8, 0xC3, 0xDC, 0xE4, 0x88,
+ 0x55, 0xF5, 0x6E, 0xA5, 0x0E, 0xD4, 0xF3, 0xF5, 0xC1, 0x12,
+ 0x71, 0x0F, 0xF9, 0x20, 0x69, 0xFF, 0xDD, 0x96, 0xE3, 0x3E,
+ 0x56, 0xC7, 0xB4, 0xE1, 0x74, 0x99, 0xFC, 0x10, 0x0E, 0x94,
+ 0xF2, 0xBA, 0xA9, 0x38, 0xE4, 0x87, 0x9B, 0x94, 0x79, 0x0E,
+ 0x71, 0x14, 0x3A, 0x49, 0x8D, 0x9B, 0x50, 0x45, 0x14, 0xD1,
+ 0x8F, 0x1E, 0x07, 0xBF, 0xB4, 0x2B, 0xAF, 0x99, 0xEB, 0x76,
+ 0x3F, 0xDA, 0x95, 0x6F, 0xB5, 0x96, 0x47, 0xE1, 0x01, 0x0C,
+ 0x16, 0x24, 0x69, 0x7C, 0x12, 0x95, 0x2C, 0x38, 0x10, 0x43,
+ 0x65, 0xFC, 0xAD, 0xEB, 0x33, 0x82, 0x8F, 0x27, 0x17, 0x52,
+ 0xEE, 0xE2, 0x9D, 0xD8, 0x53, 0x0F, 0x3F, 0xB8, 0xA0, 0x9A,
+ 0x86, 0x66, 0x51, 0x9F, 0x72, 0xF1, 0x01, 0xDB, 0x1C, 0x1F,
+ 0x30, 0x60, 0x9A, 0xBF, 0x43, 0x8C, 0x23, 0x3B, 0xCC, 0x3B,
+ 0x73, 0x6D, 0x0C, 0x3C, 0x71, 0xB3, 0xB7, 0x02, 0x10, 0x46,
+ 0xF6, 0x9C, 0x73, 0xC7, 0xB2, 0xE8, 0x54, 0x1B, 0x10, 0x03,
+ 0xA6, 0x79, 0x38, 0x03, 0x79, 0xC1, 0x5B, 0xE9, 0x1F, 0x10,
+ 0xF4, 0xD1, 0x8D, 0x91, 0x4E, 0x6C, 0x03, 0x96, 0x46, 0xB0,
+ 0xF0, 0xE7, 0x52, 0xCE, 0x10, 0x59, 0xC2, 0x65, 0xD7, 0xA3,
+ 0x46, 0xF5, 0x12, 0x6E, 0xB1, 0x96, 0xCC, 0xAB, 0xFC, 0xEA,
+ 0x6E, 0x29, 0x8E, 0x50, 0x2B, 0x67, 0xBA, 0x5A, 0x9B, 0xA7,
+ 0x8A, 0x82, 0xA6, 0x43, 0xBB, 0x18, 0xA4, 0x44, 0x08, 0x7F,
+ 0xC2, 0x31, 0xAC, 0x99, 0xA8, 0x25, 0x22, 0x80, 0x59, 0x24,
+ 0x2F, 0x77, 0x5A, 0xAF, 0x22, 0x20, 0x16, 0x96, 0x5B, 0xEF,
+ 0x81, 0x0E, 0x0A, 0xDE, 0xFC, 0x03, 0x39, 0x62, 0x79, 0xB0,
+ 0x0D, 0x9E, 0xDF, 0x6C, 0x48, 0xD7, 0xB0, 0xC7, 0x13, 0x29,
+ 0xE9, 0xD5, 0xFB, 0x78, 0x29, 0xCA, 0x39, 0xA9, 0x16, 0xC7,
+ 0x36, 0x11, 0xFC, 0xF4, 0x4E, 0x2D, 0xB8, 0xCF, 0xD4, 0x94,
+ 0xD5, 0xC4, 0x57, 0x2B, 0xF4, 0xFD, 0x24, 0x98, 0x71, 0x7B,
+ 0x0C, 0xF9, 0x43, 0x66, 0x68, 0xD5, 0x24, 0xA1, 0x5A, 0x52,
+ 0xF3, 0xA2, 0x55, 0xA9, 0x56, 0x81, 0xDF, 0xD8, 0xA3, 0x4E,
+ 0x95, 0x97, 0x01, 0xA8, 0x70, 0x8C, 0xCA, 0x8B, 0x48, 0xC2,
+ 0x34, 0x6A, 0x96, 0x58, 0x31, 0x7E, 0x7E, 0x76, 0x93, 0x5D,
+ 0x0D, 0x85, 0x74, 0xCE, 0xBF, 0xA0, 0xD5, 0xDC, 0x44, 0x45,
+ 0x85, 0x29, 0x83, 0x51, 0x45, 0x85, 0xE0, 0x2B, 0x29, 0xBF,
+ 0xBA, 0x3F, 0x41, 0xBB, 0x38, 0xAE, 0x79, 0xC5, 0x46, 0x43,
+ 0xBE, 0x25, 0xDA, 0xAA, 0x62, 0xF4, 0x7C, 0xDC, 0xC2, 0x2E,
+ 0x05, 0xDE, 0x26, 0x08, 0xA7, 0xAB, 0xE8, 0x83, 0x2D, 0x6F,
+ 0xD9, 0x41, 0x84, 0xF5, 0xE0, 0x97, 0x7B, 0x63, 0xE4, 0xE5,
+ 0xC7, 0x25, 0xEC, 0x22, 0x4A, 0x27, 0x85, 0xBB, 0x95, 0x47,
+ 0x65, 0x9E, 0xAB, 0x0A, 0x4D, 0x91, 0x07, 0x8D, 0x34, 0xC9,
+ 0xE1, 0xBF, 0xA1, 0xB8, 0xAE, 0xCE, 0x59, 0x26, 0xE6, 0xDF,
+ 0x3A, 0x83, 0x09, 0x02, 0x67, 0x7C, 0xE4, 0x65, 0xA5, 0xCC,
+ 0x11, 0xC8, 0x05, 0x55, 0xBD, 0x30, 0xC4, 0x6F, 0xAD, 0xE0,
+ 0x6F, 0x80, 0x83, 0x85, 0x4A, 0xCD, 0x3E, 0xB1, 0xF3, 0x8F,
+ 0x01, 0x8A, 0x43, 0x4A, 0x15, 0xC4, 0x75, 0x5A, 0x30, 0xCC,
+ 0x8A, 0xCE, 0xF8, 0x46, 0xEB, 0x7A, 0xC3, 0xBA, 0x51, 0x48,
+ 0xA1, 0x8A, 0xE2, 0xCF, 0x9C, 0x28, 0x9D, 0x27, 0x3E, 0x85,
+ 0xF7, 0xFB, 0x54, 0xCD, 0xC6, 0xDF, 0xF2, 0x51, 0x5F, 0xE4,
+ 0xB7, 0xC5, 0xFB, 0x6A, 0x52, 0xAB, 0x60, 0x36, 0x45, 0x0F,
+ 0xBD, 0xC5, 0xE9, 0x75, 0xD5, 0xDF, 0xB3, 0x10, 0x5F, 0x6F,
+ 0xB5, 0x34, 0xAD, 0x91, 0x68, 0x0E, 0x8D, 0xED, 0xA8, 0x93,
+ 0x6D, 0x44, 0x00, 0xB6, 0xC2, 0x48, 0x28, 0xDE, 0xAA, 0xB1,
+ 0xCC, 0x97, 0xCF, 0x8D, 0x8F, 0x87, 0x8F, 0xD7, 0x50, 0xA5,
+ 0x5B, 0x4C, 0xAC, 0xA5, 0x5D, 0x7A, 0xC4, 0xB6, 0x5E, 0x1A,
+ 0x40, 0x70, 0xE6, 0x9F, 0x94, 0x08, 0xA5, 0x0F, 0x81, 0xC7,
+ 0x11, 0x12, 0xDF, 0xBA, 0x51, 0x49, 0x9B, 0xAA, 0x5A, 0xE0,
+ 0xFC, 0x4E, 0x58, 0x67, 0x2A, 0xC0, 0x4F, 0xDD, 0xF0, 0x2E,
+ 0x02, 0x0E, 0xC1, 0xD2, 0x14, 0x20, 0xF9, 0x24, 0x6D, 0x68,
+ 0x66, 0x4E, 0xDF, 0x82, 0x07, 0xE0, 0x09, 0xA0, 0x13, 0xC5,
+ 0x7C, 0x22, 0x3D, 0x76, 0x1D, 0x67, 0x37, 0x6D, 0xCB, 0xE3,
+ 0x75, 0xDD, 0x41, 0x72, 0x33, 0xA0, 0x3D, 0xEC, 0xB9, 0x70,
+ 0xE2, 0xFA, 0xDE, 0x5B, 0x5A, 0x28, 0xCB, 0x71, 0xC1, 0x3B,
+ 0x01, 0xC0, 0x3E, 0xC4, 0x9E, 0x82, 0x73, 0xF5, 0xDB, 0x94,
+ 0x18, 0xB4, 0xDA, 0x2A, 0xE2, 0xEE, 0x9F, 0xC2, 0xAA, 0x2E,
+ 0x5C, 0x56, 0xCB, 0x6E, 0xF1, 0xD6, 0xCC, 0x2D, 0xB3, 0xD5,
+ 0x3F, 0xC1, 0x6C, 0x83, 0xE8, 0xEF, 0xA4, 0xDB, 0x22, 0xB9,
+ 0x1F, 0x1D, 0x7F, 0x77, 0xA7, 0x7F, 0xAF, 0x29, 0x0C, 0x1F,
+ 0xA3, 0x0C, 0x68, 0x3D, 0xF1, 0x6B, 0xA7, 0xA7, 0x7B, 0xB8,
+ 0x47, 0x74, 0x4C, 0xDB, 0x5D, 0xF5, 0xC2, 0xCA, 0xD9, 0xE9,
+ 0xF2, 0x5A, 0x0A, 0xF0, 0x48, 0x55, 0x65, 0x43, 0x6E, 0xCC,
+ 0x82, 0xA1, 0x6F, 0xAE, 0x67, 0x8D, 0x1D, 0x9A, 0x09, 0xB6,
+ 0xB0, 0xF2, 0x10, 0xB7, 0xAF, 0x31, 0xDB, 0x00, 0x14, 0x7E,
+ 0xC4, 0x14, 0xAB, 0x81, 0xA5, 0xF6, 0xBB, 0x75, 0x9B, 0xDD,
+ 0xE8, 0x7E, 0x09, 0x2F, 0x58, 0x3D, 0xE0, 0xAD, 0x15, 0xA2,
+ 0x1E, 0xEB, 0xB2, 0x02, 0x95, 0x04, 0x32, 0x6A, 0xEE, 0x8B,
+ 0x25, 0x32, 0xED, 0xC5, 0x14, 0xD5, 0xF7, 0x15, 0x1F, 0x00,
+ 0xD1, 0xB7, 0xE5, 0xE8, 0xAA, 0xB1, 0xA4, 0xE1, 0x5C, 0x07,
+ 0xA1, 0x2D, 0xEF, 0x2F, 0xCB, 0x11, 0x5E, 0xC4, 0x9B, 0x2E,
+ 0x9E, 0x7F, 0x3E, 0x0F, 0xDD, 0x62, 0xF6, 0xB3, 0xE2, 0xEE,
+ 0xDE, 0xAD, 0xBE, 0xEF, 0xEE, 0xEE, 0xDE, 0xAD, 0xBE, 0xEF
+};
+
+/* Test ones complement sum with IPv4 headers */
+static void chksum_ones_complement_ip(void)
+{
+ int i;
+ uint16_t sum, res;
+
+ for (i = 0; i < NUM_IP_HDR; i++) {
+ sum = odp_chksum_ones_comp16(ip_hdr_test_vect[i], IP_HDR_LEN);
+ res = ~sum;
+
+ CU_ASSERT(res == 0);
+ }
+}
+
+/* Test ones complement sum with various length pseudo UDP packets */
+static void chksum_ones_complement_udp(void)
+{
+ int i;
+ uint16_t sum, res;
+
+ for (i = 0; i < NUM_UDP; i++) {
+ sum = odp_chksum_ones_comp16(udp_test_vect[i].data,
+ udp_test_vect[i].len);
+ res = ~sum;
+
+ CU_ASSERT(res == 0);
+ }
+}
+
+/* Test ones complement sum with a long pseudo UDP packet */
+static void chksum_ones_complement_udp_long(void)
+{
+ int i;
+ uint16_t sum, res;
+ uint32_t offset, frag_sum;
+ uint32_t len = sizeof(udp_test_vect_long) - UDP_LONG_PADDING;
+ int num_frag = 7;
+ uint32_t frag_len = len / num_frag;
+
+ /* Checksum all data */
+ sum = odp_chksum_ones_comp16(udp_test_vect_long, len);
+ res = ~sum;
+
+ CU_ASSERT(res == UDP_LONG_CHKSUM);
+
+ /* Checksum data in fragments */
+ frag_sum = 0;
+ offset = 0;
+
+ for (i = 0; i < num_frag; i++) {
+ if (i == num_frag - 1)
+ frag_len = len - offset;
+
+ /* Check that test passes 16 bit aligned addresses */
+ CU_ASSERT_FATAL((offset % 2) == 0);
+
+ frag_sum += odp_chksum_ones_comp16(&udp_test_vect_long[offset],
+ frag_len);
+
+ offset += frag_len;
+ }
+
+ /* Fold 32-bit sum to 16 bits */
+ while (frag_sum >> 16)
+ frag_sum = (frag_sum & 0xffff) + (frag_sum >> 16);
+
+ res = ~frag_sum;
+
+ CU_ASSERT(res == UDP_LONG_CHKSUM);
+}
+
+static uint16_t chksum_rfc1071(const void *p, uint32_t len)
+{
+ uint32_t sum = 0;
+ const uint16_t *data = p;
+
+ while (len > 1) {
+ sum += *data++;
+ len -= 2;
+ }
+
+ /* Add left-over byte, if any */
+ if (len > 0) {
+ uint16_t left_over = 0;
+
+ *(uint8_t *)&left_over = *(const uint8_t *)data;
+ sum += left_over;
+ }
+
+ /* Fold 32-bit sum to 16 bits */
+ while (sum >> 16)
+ sum = (sum & 0xffff) + (sum >> 16);
+
+ return sum;
+}
+
+/*
+ * 64-bit KISS RNGs
+ * George Marsaglia
+ * https://www.thecodingforums.com/threads/64-bit-kiss-rngs.673657
+ */
+
+static unsigned long long x = 1234567890987654321ULL, c = 123456123456123456ULL,
+ y = 362436362436362436ULL, z = 1066149217761810ULL, t;
+
+#define MWC (t = (x << 58) + c, c = (x >> 6), x += t, c += (x < t), x)
+#define XSH (y ^= (y << 13), y ^= (y >> 17), y ^= (y << 43))
+#define CNG (z = 6906969069LL * z + 1234567)
+#define KISS (MWC + XSH + CNG)
+
+/*
+ * Test with pseudorandom data and different data lengths and alignments.
+ */
+static void chksum_ones_complement_pseudorandom(void)
+{
+ const int size = 32 * 1024;
+ const unsigned long page = 4096;
+ /* Allocate some extra pages for alignment and length. */
+ uint8_t *buf = (uint8_t *)malloc(size + page * 4);
+ uint8_t *data = (uint8_t *)(((uintptr_t)buf + (page - 1)) & ~(page - 1));
+
+ for (int i = 0; i < (size + (int)page * 3) / 8; i++)
+ ((uint64_t *)(uintptr_t)data)[i] = KISS;
+
+ /* Test data lengths from 1 to more than 9000 bytes. */
+ for (int len = 1; len < 10000; len++) {
+ /*
+ * To avoid spending too much time on long data, the number of
+ * rounds goes down as data length goes up.
+ */
+ int rounds = 1000000000 / (len * len + 1000000);
+
+ for (int i = 0; i < rounds; i++) {
+ /* Align p to two bytes. */
+ uint8_t *p = data + (KISS & (size - 1) & ~1UL);
+ /*
+ * Generate some fresh random bits at the start of the
+ * data to be checksummed.
+ */
+ uint64_t rnd = KISS;
+
+ memcpy(p, &rnd, sizeof(rnd));
+ CU_ASSERT(chksum_rfc1071(p, len) ==
+ odp_chksum_ones_comp16(p, len));
+ }
+ }
+
+ free(buf);
+}
+
+/*
+ * Test with very long data with most of the bits set. The idea is to
+ * maximize the number of carries.
+ */
+static void chksum_ones_complement_very_long(void)
+{
+ const int size = 64 * 1024;
+ const unsigned long page = 4096;
+ /* Allocate two extra pages for alignment. */
+ uint8_t *buf = (uint8_t *)malloc(size + page * 2);
+ uint8_t *data = (uint8_t *)(((uintptr_t)buf + (page - 1)) & ~(page - 1));
+
+ /* Start with all bits set. */
+ memset(data, 0xff, size + page);
+
+ for (int i = 0; i < 100; i++) {
+ for (int len = size - 8; len <= size; len++) {
+ /* Alignment 0, 2, 4, 6, 8. */
+ for (int a = 0; a <= 8; a += 2)
+ CU_ASSERT(chksum_rfc1071(data + a, len) ==
+ odp_chksum_ones_comp16(data + a, len));
+ }
+
+ /* Turn off some random bits in the data. */
+ uint64_t rnd = KISS;
+ ((uint8_t *)data)[rnd & (size - 1)] &= (rnd >> 32) & 0xff;
+ }
+
+ free(buf);
+}
+
+odp_testinfo_t chksum_suite[] = {
+ ODP_TEST_INFO(chksum_ones_complement_ip),
+ ODP_TEST_INFO(chksum_ones_complement_udp),
+ ODP_TEST_INFO(chksum_ones_complement_udp_long),
+ ODP_TEST_INFO(chksum_ones_complement_pseudorandom),
+ ODP_TEST_INFO(chksum_ones_complement_very_long),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t chksum_suites[] = {
+ {"Checksum", NULL, NULL, chksum_suite},
+ ODP_SUITE_INFO_NULL
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(chksum_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/classification/.gitignore b/test/validation/api/classification/.gitignore
index e2cdfefe1..e2cdfefe1 100644
--- a/test/common_plat/validation/api/classification/.gitignore
+++ b/test/validation/api/classification/.gitignore
diff --git a/test/validation/api/classification/Makefile.am b/test/validation/api/classification/Makefile.am
new file mode 100644
index 000000000..e17f9f654
--- /dev/null
+++ b/test/validation/api/classification/Makefile.am
@@ -0,0 +1,11 @@
+include ../Makefile.inc
+
+test_PROGRAMS = classification_main
+classification_main_SOURCES = \
+ odp_classification_basic.c \
+ odp_classification_tests.c \
+ odp_classification_testsuites.h \
+ odp_classification_test_pmr.c \
+ odp_classification_common.c \
+ classification.c \
+ classification.h
diff --git a/test/validation/api/classification/classification.c b/test/validation/api/classification/classification.c
new file mode 100644
index 000000000..ef9a647cb
--- /dev/null
+++ b/test/validation/api/classification/classification.c
@@ -0,0 +1,48 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include "odp_classification_testsuites.h"
+#include "classification.h"
+
+odp_suiteinfo_t classification_suites[] = {
+ { .name = "classification basic",
+ .testinfo_tbl = classification_suite_basic,
+ },
+ { .name = "classification pmr tests",
+ .testinfo_tbl = classification_suite_pmr,
+ .init_func = classification_suite_pmr_init,
+ .term_func = classification_suite_pmr_term,
+ },
+ { .name = "classification tests",
+ .testinfo_tbl = classification_suite,
+ .init_func = classification_suite_init,
+ .term_func = classification_suite_term,
+ },
+ { .name = "classification packet vector tests",
+ .testinfo_tbl = classification_suite_pktv,
+ .init_func = classification_suite_pktv_init,
+ .term_func = classification_suite_pktv_term,
+ },
+ ODP_SUITE_INFO_NULL,
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(classification_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/classification/classification.h b/test/validation/api/classification/classification.h
index 53c527f38..70dcc6230 100644
--- a/test/common_plat/validation/api/classification/classification.h
+++ b/test/validation/api/classification/classification.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -19,8 +19,14 @@
#define CLS_DEFAULT_DADDR "10.0.0.100/32"
#define CLS_DEFAULT_SPORT 1024
#define CLS_DEFAULT_DPORT 2048
-#define CLS_DEFAULT_DMAC 0x010203040506
-#define CLS_DEFAULT_SMAC 0x060504030201
+#define CLS_DEFAULT_DMAC {0x01, 0x02, 0x03, 0x04, 0x05, 0x06}
+#define CLS_DEFAULT_SMAC {0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c}
+#define CLS_MAGIC_VAL 0xdeadbeef
+
+/* Config values for Drop CoS */
+#define TEST_DROP 1
+#define CLS_DROP 6
+#define CLS_DROP_PORT 4001
/* Config values for Error CoS */
#define TEST_ERROR 1
@@ -46,7 +52,7 @@
/* Config values for CoS L2 Priority */
#define TEST_L2_QOS 1
-#define CLS_L2_QOS_0 6
+#define CLS_L2_QOS_0 7
#define CLS_L2_QOS_MAX 5
#define CLS_ENTRIES (CLS_L2_QOS_0 + CLS_L2_QOS_MAX)
@@ -76,45 +82,4 @@
#define DEFAULT_TOS ((DEFAULT_DSCP << ODPH_IP_TOS_DSCP_SHIFT) | \
DEFAULT_ECN)
-/* test functions: */
-void classification_test_create_cos(void);
-void classification_test_destroy_cos(void);
-void classification_test_create_pmr_match(void);
-void classification_test_cos_set_queue(void);
-void classification_test_cos_set_pool(void);
-void classification_test_cos_set_drop(void);
-void classification_test_pmr_composite_create(void);
-void classification_test_pmr_composite_destroy(void);
-
-void classification_test_pktio_set_skip(void);
-void classification_test_pktio_set_headroom(void);
-void classification_test_pktio_configure(void);
-void classification_test_pktio_test(void);
-
-void classification_test_pmr_term_tcp_dport(void);
-void classification_test_pmr_term_tcp_sport(void);
-void classification_test_pmr_term_udp_dport(void);
-void classification_test_pmr_term_udp_sport(void);
-void classification_test_pmr_term_ipproto(void);
-void classification_test_pmr_term_dmac(void);
-void classification_test_pmr_term_packet_len(void);
-void classification_test_pmr_term_vlan_id_0(void);
-void classification_test_pmr_term_vlan_id_x(void);
-void classification_test_pmr_term_eth_type_0(void);
-void classification_test_pmr_term_eth_type_x(void);
-
-/* test arrays: */
-extern odp_testinfo_t classification_suite_basic[];
-extern odp_testinfo_t classification_suite[];
-
-/* test array init/term functions: */
-int classification_suite_init(void);
-int classification_suite_term(void);
-
-/* test registry: */
-extern odp_suiteinfo_t classification_suites[];
-
-/* main test program: */
-int classification_main(int argc, char *argv[]);
-
#endif
diff --git a/test/validation/api/classification/odp_classification_basic.c b/test/validation/api/classification/odp_classification_basic.c
new file mode 100644
index 000000000..ca0b58ad5
--- /dev/null
+++ b/test/validation/api/classification/odp_classification_basic.c
@@ -0,0 +1,753 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_cunit_common.h>
+#include "odp_classification_testsuites.h"
+#include "classification.h"
+
+/* Limit handle array allocation from stack to about 256kB */
+#define MAX_HANDLES (32 * 1024)
+
+static void test_defaults(uint8_t fill)
+{
+ odp_cls_cos_param_t cos_param;
+ odp_pmr_param_t pmr_param;
+
+ memset(&cos_param, fill, sizeof(cos_param));
+ odp_cls_cos_param_init(&cos_param);
+
+ CU_ASSERT(cos_param.action == ODP_COS_ACTION_ENQUEUE);
+ CU_ASSERT(cos_param.num_queue == 1);
+ CU_ASSERT_EQUAL(cos_param.stats_enable, false);
+ CU_ASSERT_EQUAL(cos_param.red.enable, false);
+ CU_ASSERT_EQUAL(cos_param.bp.enable, false);
+ CU_ASSERT_EQUAL(cos_param.vector.enable, false);
+
+ memset(&pmr_param, fill, sizeof(pmr_param));
+ odp_cls_pmr_param_init(&pmr_param);
+ CU_ASSERT_EQUAL(pmr_param.range_term, false);
+}
+
+static void cls_default_values(void)
+{
+ test_defaults(0);
+ test_defaults(0xff);
+}
+
+static void cls_create_cos(void)
+{
+ odp_cos_t cos;
+ odp_cls_cos_param_t cls_param;
+ odp_pool_t pool;
+ odp_queue_t queue;
+
+ pool = pool_create("cls_basic_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ queue = queue_create("cls_basic_queue", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+
+ cos = odp_cls_cos_create(NULL, &cls_param);
+ CU_ASSERT(odp_cos_to_u64(cos) != odp_cos_to_u64(ODP_COS_INVALID));
+ odp_cos_destroy(cos);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+}
+
+static void cls_create_cos_max_common(odp_bool_t stats)
+{
+ uint32_t i, num;
+ odp_cls_cos_param_t cls_param;
+ odp_cls_capability_t capa;
+
+ CU_ASSERT_FATAL(odp_cls_capability(&capa) == 0);
+
+ num = capa.max_cos;
+ if (num > MAX_HANDLES)
+ num = MAX_HANDLES;
+
+ if (stats && capa.max_cos_stats < num)
+ num = capa.max_cos_stats;
+
+ odp_cos_t cos[num];
+
+ for (i = 0; i < num; i++) {
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.action = ODP_COS_ACTION_DROP;
+ cls_param.stats_enable = stats;
+
+ cos[i] = odp_cls_cos_create(NULL, &cls_param);
+ if (cos[i] == ODP_COS_INVALID) {
+ ODPH_ERR("odp_cls_cos_create() failed at CoS %u out of %u.\n", i + 1, num);
+ break;
+ }
+ }
+
+ CU_ASSERT(i == num);
+
+ for (uint32_t j = 0; j < i; j++)
+ CU_ASSERT(!odp_cos_destroy(cos[j]));
+}
+
+static int cos_create_multi(const char *name[], const odp_cls_cos_param_t param[], odp_cos_t cos[],
+ uint32_t num)
+{
+ const uint32_t max_retries = 100;
+ uint32_t num_created = 0;
+ uint32_t num_retries = 0;
+
+ do {
+ const char **cur_name = (name != NULL) ? &name[num_created] : NULL;
+ int ret = odp_cls_cos_create_multi(cur_name, &param[num_created],
+ &cos[num_created], num - num_created);
+ if (ret < 0) {
+ CU_FAIL("CoS create multi failed");
+ break;
+ }
+ num_retries = (ret == 0) ? num_retries + 1 : 0;
+ num_created += ret;
+ } while (num_created < num && num_retries < max_retries);
+
+ return num_created;
+}
+
+static void cos_destroy_multi(odp_cos_t cos[], uint32_t num)
+{
+ uint32_t num_left = num;
+ uint32_t num_freed = 0;
+
+ while (num_left) {
+ int ret = odp_cos_destroy_multi(&cos[num_freed], num_left);
+
+ CU_ASSERT_FATAL(ret > 0 && (uint32_t)ret <= num_left);
+
+ num_left -= ret;
+ num_freed += ret;
+ }
+ CU_ASSERT_FATAL(num_freed == num);
+}
+
+static void cls_create_cos_multi(void)
+{
+ odp_cls_cos_param_t param_single;
+ odp_cls_cos_param_t param[MAX_HANDLES];
+ odp_cls_capability_t capa;
+ odp_cos_t cos[MAX_HANDLES];
+ const char *name[MAX_HANDLES] = {NULL, "aaa", NULL, "bbb", "ccc", NULL, "ddd"};
+ uint32_t num, num_created;
+
+ CU_ASSERT_FATAL(odp_cls_capability(&capa) == 0);
+ CU_ASSERT_FATAL(capa.max_cos);
+
+ num = capa.max_cos < MAX_HANDLES ? capa.max_cos : MAX_HANDLES;
+
+ for (uint32_t i = 0; i < num; i++) {
+ odp_cls_cos_param_init(&param[i]);
+ param[i].action = ODP_COS_ACTION_DROP;
+ }
+ odp_cls_cos_param_init(&param_single);
+ param_single.action = ODP_COS_ACTION_DROP;
+
+ num_created = cos_create_multi(NULL, &param_single, cos, 1);
+ CU_ASSERT(num_created == 1)
+ cos_destroy_multi(cos, num_created);
+
+ num_created = cos_create_multi(name, param, cos, num);
+ CU_ASSERT(num_created == num)
+ cos_destroy_multi(cos, num_created);
+
+ num_created = cos_create_multi(NULL, param, cos, num);
+ CU_ASSERT(num_created == num)
+ cos_destroy_multi(cos, num_created);
+}
+
+static void cls_create_cos_max(void)
+{
+ cls_create_cos_max_common(false);
+}
+
+static void cls_create_cos_max_stats(void)
+{
+ cls_create_cos_max_common(true);
+}
+
+static void cls_destroy_cos(void)
+{
+ odp_cos_t cos;
+ char name[ODP_COS_NAME_LEN];
+ odp_pool_t pool;
+ odp_queue_t queue;
+ odp_cls_cos_param_t cls_param;
+ int retval;
+
+ pool = pool_create("cls_basic_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ queue = queue_create("cls_basic_queue", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ sprintf(name, "ClassOfService");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+
+ cos = odp_cls_cos_create(name, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+ retval = odp_cos_destroy(cos);
+ CU_ASSERT(retval == 0);
+ retval = odp_cos_destroy(ODP_COS_INVALID);
+ CU_ASSERT(retval < 0);
+
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+}
+
+static void cls_create_pmr_match(void)
+{
+ odp_pmr_t pmr;
+ uint16_t val;
+ uint16_t mask;
+ int retval;
+ odp_pmr_param_t pmr_param;
+ odp_cos_t default_cos;
+ odp_cos_t cos;
+ odp_queue_t default_queue;
+ odp_queue_t queue;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t pkt_pool;
+ odp_cls_cos_param_t cls_param;
+ odp_pktio_t pktio;
+
+ pkt_pool = pool_create("pkt_pool");
+ CU_ASSERT_FATAL(pkt_pool != ODP_POOL_INVALID);
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("pmr_match", true);
+ CU_ASSERT(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("pmr_match");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+
+ cos = odp_cls_cos_create("pmr_match", &cls_param);
+ CU_ASSERT(cos != ODP_COS_INVALID);
+
+ val = 1024;
+ mask = 0xffff;
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = find_first_supported_l3_pmr();
+ pmr_param.range_term = false;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVALID);
+ CU_ASSERT(odp_pmr_to_u64(pmr) != odp_pmr_to_u64(ODP_PMR_INVALID));
+ /* destroy the created PMR */
+ retval = odp_cls_pmr_destroy(pmr);
+ CU_ASSERT(retval == 0);
+
+ /* destroy an INVALID PMR */
+ retval = odp_cls_pmr_destroy(ODP_PMR_INVALID);
+ CU_ASSERT(retval < 0);
+
+ odp_cos_destroy(cos);
+ odp_queue_destroy(queue);
+ odp_pool_destroy(pool);
+ odp_pool_destroy(pkt_pool);
+ odp_pktio_default_cos_set(pktio, ODP_COS_INVALID);
+ odp_cos_destroy(default_cos);
+ odp_queue_destroy(default_queue);
+ odp_pool_destroy(default_pool);
+ odp_pktio_close(pktio);
+}
+
+/* Create maximum number of PMRs into the default CoS */
+static void cls_max_pmr_from_default_action(int drop)
+{
+ odp_cls_cos_param_t cos_param;
+ odp_queue_param_t queue_param;
+ odp_cls_capability_t capa;
+ odp_schedule_capability_t sched_capa;
+ odp_pmr_param_t pmr_param;
+ odp_pool_t pool;
+ odp_pktio_t pktio;
+ odp_cos_t default_cos;
+ uint32_t i, num_cos, num_pmr;
+ int ret;
+ uint32_t cos_created = 0;
+ uint32_t queue_created = 0;
+ uint32_t pmr_created = 0;
+ uint16_t val = 1024;
+ uint16_t mask = 0xffff;
+
+ CU_ASSERT_FATAL(odp_cls_capability(&capa) == 0);
+
+ CU_ASSERT_FATAL(odp_schedule_capability(&sched_capa) == 0);
+
+ pool = pool_create("pkt_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pool, true);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ num_cos = capa.max_cos;
+
+ if (num_cos > sched_capa.max_queues)
+ num_cos = sched_capa.max_queues;
+
+ if (num_cos > MAX_HANDLES)
+ num_cos = MAX_HANDLES;
+
+ CU_ASSERT_FATAL(num_cos > 1);
+
+ num_pmr = capa.max_pmr_per_cos;
+
+ odp_cos_t cos[num_cos];
+ odp_queue_t queue[num_cos];
+ odp_pmr_t pmr[num_pmr];
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+
+ odp_cls_cos_param_init(&cos_param);
+ if (drop)
+ cos_param.action = ODP_COS_ACTION_DROP;
+
+ for (i = 0; i < num_cos; i++) {
+ if (!drop) {
+ queue[i] = odp_queue_create(NULL, &queue_param);
+
+ if (queue[i] == ODP_QUEUE_INVALID) {
+ ODPH_ERR("odp_queue_create() failed %u / %u\n", i + 1, num_cos);
+ break;
+ }
+
+ cos_param.queue = queue[i];
+ queue_created++;
+ }
+
+ cos[i] = odp_cls_cos_create(NULL, &cos_param);
+
+ if (cos[i] == ODP_COS_INVALID) {
+ ODPH_ERR("odp_cls_cos_create() failed %u / %u\n", i + 1, num_cos);
+ break;
+ }
+
+ cos_created++;
+ }
+
+ if (!drop)
+ CU_ASSERT(queue_created == num_cos);
+
+ CU_ASSERT(cos_created == num_cos);
+
+ if (cos_created != num_cos)
+ goto destroy_cos;
+
+ default_cos = cos[0];
+
+ ret = odp_pktio_default_cos_set(pktio, default_cos);
+ CU_ASSERT_FATAL(ret == 0);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = find_first_supported_l3_pmr();
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ for (i = 0; i < num_pmr; i++) {
+ pmr[i] = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos[i + 1]);
+
+ if (pmr[i] == ODP_PMR_INVALID) {
+ ODPH_ERR("odp_cls_pmr_create() failed %u / %u\n", i + 1, num_pmr);
+ break;
+ }
+
+ val++;
+ pmr_created++;
+ }
+
+ printf("\n Number of CoS created: %u\n Number of PMR created: %u\n", cos_created,
+ pmr_created);
+
+ CU_ASSERT(pmr_created == num_pmr);
+
+ for (i = 0; i < pmr_created; i++)
+ CU_ASSERT(odp_cls_pmr_destroy(pmr[i]) == 0);
+
+ ret = odp_pktio_default_cos_set(pktio, ODP_COS_INVALID);
+ CU_ASSERT_FATAL(ret == 0);
+
+destroy_cos:
+ for (i = 0; i < cos_created; i++)
+ CU_ASSERT(odp_cos_destroy(cos[i]) == 0);
+
+ for (i = 0; i < queue_created; i++)
+ CU_ASSERT(odp_queue_destroy(queue[i]) == 0);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void cls_max_pmr_from_default_drop(void)
+{
+ cls_max_pmr_from_default_action(1);
+}
+
+static void cls_max_pmr_from_default_enqueue(void)
+{
+ cls_max_pmr_from_default_action(0);
+}
+
+static void cls_create_pmr_multi(void)
+{
+ odp_cls_cos_param_t cos_param;
+ odp_cls_capability_t capa;
+ odp_pool_t pool;
+ odp_pktio_t pktio;
+ uint32_t i, num_cos, num_pmr, num_left;
+ int ret;
+ const uint32_t max_retries = 100;
+ uint32_t num_retries = 0;
+ uint32_t num_freed = 0;
+ uint32_t cos_created = 0;
+ uint32_t pmr_created = 0;
+ uint16_t mask = 0xffff;
+
+ CU_ASSERT_FATAL(odp_cls_capability(&capa) == 0);
+
+ pool = pool_create("pkt_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pool, true);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ num_cos = capa.max_cos;
+ if (num_cos > MAX_HANDLES)
+ num_cos = MAX_HANDLES;
+
+ CU_ASSERT_FATAL(num_cos > 1);
+
+ num_pmr = num_cos - 1;
+ if (num_pmr > capa.max_pmr)
+ num_pmr = capa.max_pmr;
+
+ odp_cos_t src_cos[num_cos];
+ odp_cos_t cos[num_cos];
+ odp_pmr_t pmr[num_pmr];
+ odp_pmr_create_opt_t pmr_opt[num_pmr];
+ odp_pmr_param_t pmr_param[num_pmr];
+ uint16_t val[num_pmr];
+
+ odp_cls_cos_param_init(&cos_param);
+ cos_param.action = ODP_COS_ACTION_DROP;
+
+ for (i = 0; i < num_cos; i++) {
+ cos[i] = odp_cls_cos_create(NULL, &cos_param);
+
+ if (cos[i] == ODP_COS_INVALID) {
+ ODPH_ERR("odp_cls_cos_create() failed %u / %u\n", i + 1, num_cos);
+ break;
+ }
+ /* Same source CoS used for all PMRs */
+ src_cos[i] = cos[0];
+
+ cos_created++;
+ }
+
+ CU_ASSERT(cos_created == num_cos);
+
+ if (cos_created != num_cos)
+ goto destroy_cos;
+
+ ret = odp_pktio_default_cos_set(pktio, cos[0]);
+ CU_ASSERT_FATAL(ret == 0);
+
+ for (i = 0; i < num_pmr; i++) {
+ val[i] = 1024 + i;
+
+ odp_cls_pmr_param_init(&pmr_param[i]);
+ pmr_param[i].term = find_first_supported_l3_pmr();
+ pmr_param[i].match.value = &val[i];
+ pmr_param[i].match.mask = &mask;
+ pmr_param[i].val_sz = sizeof(val[i]);
+
+ odp_cls_pmr_create_opt_init(&pmr_opt[i]);
+ pmr_opt[i].terms = &pmr_param[i];
+ pmr_opt[i].num_terms = 1;
+ }
+
+ do {
+ ret = odp_cls_pmr_create_multi(&pmr_opt[pmr_created],
+ &src_cos[pmr_created],
+ &cos[pmr_created + 1],
+ &pmr[pmr_created],
+ num_pmr - pmr_created);
+ CU_ASSERT_FATAL(ret <= (int)(num_pmr - pmr_created));
+
+ if (ret < 0)
+ break;
+
+ num_retries = (ret == 0) ? num_retries + 1 : 0;
+ pmr_created += ret;
+ } while (pmr_created < num_pmr && num_retries < max_retries);
+
+ CU_ASSERT(pmr_created > 0);
+
+ num_left = pmr_created;
+ while (num_left) {
+ ret = odp_cls_pmr_destroy_multi(&pmr[num_freed], num_left);
+
+ CU_ASSERT_FATAL(ret > 0 && (uint32_t)ret <= num_left);
+
+ num_left -= ret;
+ num_freed += ret;
+ }
+
+ ret = odp_pktio_default_cos_set(pktio, ODP_COS_INVALID);
+ CU_ASSERT_FATAL(ret == 0);
+
+destroy_cos:
+ for (i = 0; i < cos_created; i++)
+ CU_ASSERT(odp_cos_destroy(cos[i]) == 0);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void cls_cos_set_queue(void)
+{
+ int retval;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pool_t pool;
+ odp_queue_t queue;
+ odp_queue_t queue_cos;
+ odp_cos_t cos_queue;
+ odp_queue_t recvqueue;
+ odp_queue_t queue_out = ODP_QUEUE_INVALID;
+
+ pool = pool_create("cls_basic_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ queue = queue_create("cls_basic_queue", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ sprintf(cosname, "CoSQueue");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+
+ cos_queue = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos_queue != ODP_COS_INVALID);
+
+ queue_cos = queue_create("QueueCoS", true);
+ CU_ASSERT_FATAL(queue_cos != ODP_QUEUE_INVALID);
+
+ retval = odp_cos_queue_set(cos_queue, queue_cos);
+ CU_ASSERT(retval == 0);
+ recvqueue = odp_cos_queue(cos_queue);
+ CU_ASSERT(recvqueue == queue_cos);
+ CU_ASSERT(odp_cls_cos_num_queue(cos_queue) == 1);
+ CU_ASSERT(odp_cls_cos_queues(cos_queue, &queue_out, 1) == 1);
+ CU_ASSERT(queue_out == queue_cos);
+
+ odp_cos_destroy(cos_queue);
+ odp_queue_destroy(queue_cos);
+ odp_queue_destroy(queue);
+ odp_pool_destroy(pool);
+}
+
+static void cls_cos_set_pool(void)
+{
+ int retval;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pool_t pool;
+ odp_queue_t queue;
+ odp_pool_t cos_pool;
+ odp_cos_t cos;
+ odp_pool_t recvpool;
+
+ pool = pool_create("cls_basic_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ queue = queue_create("cls_basic_queue", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ sprintf(cosname, "CoSQueue");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ cos_pool = pool_create("PoolCoS");
+ CU_ASSERT_FATAL(cos_pool != ODP_POOL_INVALID);
+
+ retval = odp_cls_cos_pool_set(cos, cos_pool);
+ CU_ASSERT(retval == 0);
+ recvpool = odp_cls_cos_pool(cos);
+ CU_ASSERT(recvpool == cos_pool);
+
+ odp_cos_destroy(cos);
+ odp_queue_destroy(queue);
+ odp_pool_destroy(pool);
+ odp_pool_destroy(cos_pool);
+}
+
+static void cls_pmr_composite_create(void)
+{
+ odp_cls_capability_t capa;
+ odp_pmr_t pmr_composite;
+ int retval;
+ odp_cos_t default_cos;
+ odp_cos_t cos;
+ odp_queue_t default_queue;
+ odp_queue_t queue;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t pkt_pool;
+ odp_cls_cos_param_t cls_param;
+ odp_pktio_t pktio;
+ uint32_t max_terms_per_pmr;
+ uint16_t val = 1024;
+ uint16_t mask = 0xffff;
+
+ CU_ASSERT_FATAL(odp_cls_capability(&capa) == 0);
+
+ pkt_pool = pool_create("pkt_pool");
+ CU_ASSERT_FATAL(pkt_pool != ODP_POOL_INVALID);
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("pmr_match", true);
+ CU_ASSERT(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("pmr_match");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+
+ cos = odp_cls_cos_create("pmr_match", &cls_param);
+ CU_ASSERT(cos != ODP_COS_INVALID);
+
+ max_terms_per_pmr = capa.max_terms_per_pmr;
+ odp_pmr_param_t pmr_terms[max_terms_per_pmr];
+
+ for (uint32_t i = 0; i < max_terms_per_pmr; i++) {
+ odp_cls_pmr_param_init(&pmr_terms[i]);
+ pmr_terms[i].term = ODP_PMR_TCP_DPORT;
+ pmr_terms[i].match.value = &val;
+ pmr_terms[i].range_term = false;
+ pmr_terms[i].match.mask = &mask;
+ pmr_terms[i].val_sz = sizeof(val);
+ }
+
+ pmr_composite = odp_cls_pmr_create(pmr_terms, max_terms_per_pmr, default_cos, cos);
+ CU_ASSERT(odp_pmr_to_u64(pmr_composite) !=
+ odp_pmr_to_u64(ODP_PMR_INVALID));
+
+ printf("\n");
+ odp_cls_print_all();
+
+ retval = odp_cls_pmr_destroy(pmr_composite);
+ CU_ASSERT(retval == 0);
+
+ odp_cos_destroy(cos);
+ odp_queue_destroy(queue);
+ odp_pool_destroy(pool);
+ odp_pool_destroy(pkt_pool);
+ odp_pktio_default_cos_set(pktio, ODP_COS_INVALID);
+ odp_cos_destroy(default_cos);
+ odp_queue_destroy(default_queue);
+ odp_pool_destroy(default_pool);
+ odp_pktio_close(pktio);
+}
+
+static void cls_create_cos_with_hash_queues(void)
+{
+ odp_pool_t pool;
+ odp_cls_capability_t capa;
+ int ret;
+ odp_queue_param_t q_param;
+ odp_cls_cos_param_t cls_param;
+ odp_cos_t cos;
+
+ pool = pool_create("cls_basic_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ ret = odp_cls_capability(&capa);
+ CU_ASSERT_FATAL(ret == 0);
+ CU_ASSERT_FATAL(capa.hash_protocols.all_bits != 0);
+
+ odp_queue_param_init(&q_param);
+ q_param.type = ODP_QUEUE_TYPE_SCHED;
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.num_queue = capa.max_hash_queues;
+ cls_param.queue_param = q_param;
+ cls_param.hash_proto.all_bits = capa.hash_protocols.all_bits;
+ cls_param.pool = pool;
+
+ cos = odp_cls_cos_create(NULL, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ ret = odp_cos_destroy(cos);
+ CU_ASSERT(ret == 0);
+
+ odp_pool_destroy(pool);
+}
+
+static int check_capa_cos_hashing(void)
+{
+ odp_cls_capability_t capa;
+
+ if (odp_cls_capability(&capa) < 0)
+ return ODP_TEST_INACTIVE;
+
+ return capa.max_hash_queues > 1 ? ODP_TEST_ACTIVE : ODP_TEST_INACTIVE;
+}
+
+odp_testinfo_t classification_suite_basic[] = {
+ ODP_TEST_INFO(cls_default_values),
+ ODP_TEST_INFO(cls_create_cos),
+ ODP_TEST_INFO(cls_create_cos_multi),
+ ODP_TEST_INFO(cls_create_cos_max),
+ ODP_TEST_INFO(cls_create_cos_max_stats),
+ ODP_TEST_INFO(cls_destroy_cos),
+ ODP_TEST_INFO(cls_create_pmr_match),
+ ODP_TEST_INFO(cls_create_pmr_multi),
+ ODP_TEST_INFO(cls_max_pmr_from_default_drop),
+ ODP_TEST_INFO(cls_max_pmr_from_default_enqueue),
+ ODP_TEST_INFO(cls_cos_set_queue),
+ ODP_TEST_INFO(cls_cos_set_pool),
+ ODP_TEST_INFO(cls_pmr_composite_create),
+ ODP_TEST_INFO_CONDITIONAL(cls_create_cos_with_hash_queues, check_capa_cos_hashing),
+ ODP_TEST_INFO_NULL,
+};
diff --git a/test/common_plat/validation/api/classification/odp_classification_common.c b/test/validation/api/classification/odp_classification_common.c
index 3b379c14c..1fb4c51b5 100644
--- a/test/common_plat/validation/api/classification/odp_classification_common.c
+++ b/test/validation/api/classification/odp_classification_common.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2020, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,7 +8,7 @@
#include "odp_classification_testsuites.h"
#include "classification.h"
#include <odp_cunit_common.h>
-#include "test_debug.h"
+#include <odp/helper/odph_api.h>
typedef struct cls_test_packet {
odp_u32be_t magic;
@@ -24,7 +25,11 @@ static uint8_t IPV6_DST_ADDR[ODPH_IPV6ADDR_LEN] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 10, 0, 0, 100
};
-odp_pktio_t create_pktio(odp_queue_type_t q_type, odp_pool_t pool)
+#define ODP_GTPU_UDP_PORT 2152
+#define AH_HDR_LEN 24
+
+odp_pktio_t create_pktio(odp_queue_type_t q_type, odp_pool_t pool,
+ odp_bool_t cls_enable)
{
odp_pktio_t pktio;
odp_pktio_param_t pktio_param;
@@ -44,20 +49,22 @@ odp_pktio_t create_pktio(odp_queue_type_t q_type, odp_pool_t pool)
if (pktio == ODP_PKTIO_INVALID) {
ret = odp_pool_destroy(pool);
if (ret)
- fprintf(stderr, "unable to destroy pool.\n");
+ ODPH_ERR("Unable to destroy pool\n");
return ODP_PKTIO_INVALID;
}
odp_pktin_queue_param_init(&pktin_param);
pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ pktin_param.classifier_enable = cls_enable;
+ pktin_param.hash_enable = false;
if (odp_pktin_queue_config(pktio, &pktin_param)) {
- fprintf(stderr, "pktin queue config failed.\n");
+ ODPH_ERR("Pktin queue config failed\n");
return ODP_PKTIO_INVALID;
}
if (odp_pktout_queue_config(pktio, NULL)) {
- fprintf(stderr, "pktout queue config failed.\n");
+ ODPH_ERR("Pktout queue config failed\n");
return ODP_PKTIO_INVALID;
}
@@ -69,7 +76,7 @@ int stop_pktio(odp_pktio_t pktio)
odp_event_t ev;
if (odp_pktio_stop(pktio)) {
- fprintf(stderr, "pktio stop failed.\n");
+ ODPH_ERR("Pktio stop failed\n");
return -1;
}
@@ -85,55 +92,60 @@ int stop_pktio(odp_pktio_t pktio)
return 0;
}
+static uint32_t seqno_offset(odp_packet_t pkt)
+{
+ uint32_t l3_offset = odp_packet_l3_offset(pkt);
+ int rc;
+ uint16_t len = 0;
+
+ CU_ASSERT_FATAL(l3_offset != ODP_PACKET_OFFSET_INVALID);
+
+ if (odp_packet_has_ipv4(pkt)) {
+ odph_ipv4hdr_t ip;
+
+ rc = odp_packet_copy_to_mem(pkt, l3_offset, sizeof(ip), &ip);
+ CU_ASSERT_FATAL(rc == 0);
+ len = odp_be_to_cpu_16(ip.tot_len);
+ } else if (odp_packet_has_ipv6(pkt)) {
+ odph_ipv6hdr_t ip;
+
+ rc = odp_packet_copy_to_mem(pkt, l3_offset, sizeof(ip), &ip);
+ CU_ASSERT_FATAL(rc == 0);
+ len = sizeof(ip) + odp_be_to_cpu_16(ip.payload_len);
+ } else {
+ CU_FAIL_FATAL("Unexpected packet type");
+ }
+
+ return l3_offset + len - sizeof(cls_test_packet_t);
+}
+
int cls_pkt_set_seq(odp_packet_t pkt)
{
- static uint32_t seq;
cls_test_packet_t data;
+ static uint32_t seq;
uint32_t offset;
- odph_ipv4hdr_t *ip;
- odph_tcphdr_t *tcp;
int status;
data.magic = DATA_MAGIC;
data.seq = ++seq;
- ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
- offset = odp_packet_l4_offset(pkt);
- CU_ASSERT_FATAL(offset != ODP_PACKET_OFFSET_INVALID);
-
- if (ip->proto == ODPH_IPPROTO_UDP)
- status = odp_packet_copy_from_mem(pkt, offset + ODPH_UDPHDR_LEN,
- sizeof(data), &data);
- else {
- tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- status = odp_packet_copy_from_mem(pkt, offset + tcp->hl * 4,
- sizeof(data), &data);
- }
+ offset = seqno_offset(pkt);
+
+ status = odp_packet_copy_from_mem(pkt, offset, sizeof(data), &data);
return status;
}
uint32_t cls_pkt_get_seq(odp_packet_t pkt)
{
- uint32_t offset;
cls_test_packet_t data;
- odph_ipv4hdr_t *ip;
- odph_tcphdr_t *tcp;
-
- ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
- offset = odp_packet_l4_offset(pkt);
+ uint32_t offset;
+ int rc;
- if (offset == ODP_PACKET_OFFSET_INVALID || ip == NULL)
- return TEST_SEQ_INVALID;
+ offset = seqno_offset(pkt);
- if (ip->proto == ODPH_IPPROTO_UDP)
- odp_packet_copy_to_mem(pkt, offset + ODPH_UDPHDR_LEN,
- sizeof(data), &data);
- else {
- tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- odp_packet_copy_to_mem(pkt, offset + tcp->hl * 4,
- sizeof(data), &data);
- }
+ rc = odp_packet_copy_to_mem(pkt, offset, sizeof(data), &data);
+ CU_ASSERT_FATAL(rc == 0);
if (data.magic == DATA_MAGIC)
return data.seq;
@@ -175,17 +187,50 @@ int parse_ipv4_string(const char *ipaddress, uint32_t *addr, uint32_t *mask)
void enqueue_pktio_interface(odp_packet_t pkt, odp_pktio_t pktio)
{
odp_pktout_queue_t pktout;
+ int ret;
+
+ ret = odp_pktout_queue(pktio, &pktout, 1);
+
+ if (ret != 1) {
+ CU_FAIL_FATAL("No pktout queue");
+ return;
+ }
- CU_ASSERT_FATAL(odp_pktout_queue(pktio, &pktout, 1) == 1);
CU_ASSERT(odp_pktout_send(pktout, &pkt, 1) == 1);
}
-odp_packet_t receive_packet(odp_queue_t *queue, uint64_t ns)
+odp_packet_t receive_packet(odp_queue_t *queue, uint64_t ns, odp_bool_t enable_pktv)
{
odp_event_t ev;
+ uint64_t wait = odp_schedule_wait_time(ns);
+
+ ev = odp_schedule(queue, wait);
+ if (ev == ODP_EVENT_INVALID)
+ return ODP_PACKET_INVALID;
+
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ return odp_packet_from_event(ev);
+ } else if (enable_pktv && odp_event_type(ev) == ODP_EVENT_PACKET_VECTOR) {
+ odp_packet_vector_t pktv;
+ odp_packet_t *pkt_tbl;
+ odp_packet_t pkt;
+ uint32_t pktv_len;
+
+ pktv = odp_packet_vector_from_event(ev);
+ pktv_len = odp_packet_vector_tbl(pktv, &pkt_tbl);
+
+ CU_ASSERT_FATAL(pktv_len > 0);
+
+ pkt = pkt_tbl[0];
+ if (pktv_len > 1)
+ odp_packet_free_multi(&pkt_tbl[1], pktv_len - 1);
+ odp_packet_vector_free(pktv);
+ return pkt;
+ }
+
+ odp_event_free(ev);
+ return ODP_PACKET_INVALID;
- ev = odp_schedule(queue, ns);
- return odp_packet_from_event(ev);
}
odp_queue_t queue_create(const char *queuename, bool sched)
@@ -196,7 +241,7 @@ odp_queue_t queue_create(const char *queuename, bool sched)
if (sched) {
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
+ qparam.sched.prio = odp_schedule_max_prio();
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
@@ -221,18 +266,51 @@ odp_pool_t pool_create(const char *poolname)
return odp_pool_create(poolname, &param);
}
+odp_pool_t pktv_pool_create(const char *poolname)
+{
+ odp_pool_capability_t capa;
+ odp_pool_param_t param;
+
+ if (odp_pool_capability(&capa)) {
+ ODPH_ERR("Pool capability failed\n");
+ return ODP_POOL_INVALID;
+ }
+
+ if (capa.vector.max_pools == 0) {
+ ODPH_ERR("No packet vector pools available\n");
+ return ODP_POOL_INVALID;
+ }
+
+ if (capa.vector.max_num && capa.vector.max_num < SHM_PKT_NUM_BUFS) {
+ ODPH_ERR("Unable to create large enough (%d) packet vector pool\n",
+ SHM_PKT_NUM_BUFS);
+ return ODP_POOL_INVALID;
+ }
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_VECTOR;
+ param.vector.num = SHM_PKT_NUM_BUFS;
+ param.vector.max_size = capa.vector.max_size;
+
+ return odp_pool_create(poolname, &param);
+}
+
odp_packet_t create_packet(cls_packet_info_t pkt_info)
{
uint32_t seqno;
odph_ethhdr_t *ethhdr;
odph_udphdr_t *udp;
odph_tcphdr_t *tcp;
+ odph_sctphdr_t *sctp;
+ odph_icmphdr_t *icmp;
odph_ipv4hdr_t *ip;
odph_ipv6hdr_t *ipv6;
+ odph_gtphdr_t *gtpu;
+ odph_igmphdr_t *igmp;
+ odph_ahhdr_t *ah;
+ odph_esphdr_t *esp;
+ uint8_t *hlen = 0;
uint16_t payload_len;
- uint64_t src_mac = CLS_DEFAULT_SMAC;
- uint64_t dst_mac = CLS_DEFAULT_DMAC;
- uint64_t dst_mac_be;
uint32_t addr = 0;
uint32_t mask;
odp_packet_t pkt;
@@ -245,24 +323,57 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
uint16_t l3_hdr_len = 0;
uint16_t l4_hdr_len = 0;
uint16_t eth_type;
- odp_u16be_t *vlan_type;
odph_vlanhdr_t *vlan_hdr;
-
- /* 48 bit ethernet address needs to be left shifted for proper
- value after changing to be*/
- dst_mac_be = odp_cpu_to_be_64(dst_mac);
- if (dst_mac != dst_mac_be)
- dst_mac_be = dst_mac_be >> (64 - 8 * ODPH_ETHADDR_LEN);
+ uint8_t src_mac[] = CLS_DEFAULT_SMAC;
+ uint8_t dst_mac[] = CLS_DEFAULT_DMAC;
payload_len = sizeof(cls_test_packet_t) + pkt_info.len;
+ if (pkt_info.l4_type == CLS_PKT_L4_GTP)
+ payload_len += sizeof(odph_gtphdr_t);
+
seqno = odp_atomic_fetch_inc_u32(pkt_info.seq);
vlan_hdr_len = pkt_info.vlan ? ODPH_VLANHDR_LEN : 0;
vlan_hdr_len = pkt_info.vlan_qinq ? 2 * vlan_hdr_len : vlan_hdr_len;
l3_hdr_len = pkt_info.ipv6 ? ODPH_IPV6HDR_LEN : ODPH_IPV4HDR_LEN;
- l4_hdr_len = pkt_info.udp ? ODPH_UDPHDR_LEN : ODPH_TCPHDR_LEN;
eth_type = pkt_info.ipv6 ? ODPH_ETHTYPE_IPV6 : ODPH_ETHTYPE_IPV4;
- next_hdr = pkt_info.udp ? ODPH_IPPROTO_UDP : ODPH_IPPROTO_TCP;
+ next_hdr = ODPH_IPPROTO_TCP;
+ l4_hdr_len = ODPH_TCPHDR_LEN;
+
+ switch (pkt_info.l4_type) {
+ case CLS_PKT_L4_TCP:
+ next_hdr = ODPH_IPPROTO_TCP;
+ l4_hdr_len = ODPH_TCPHDR_LEN;
+ break;
+ case CLS_PKT_L4_GTP:
+ case CLS_PKT_L4_UDP:
+ next_hdr = ODPH_IPPROTO_UDP;
+ l4_hdr_len = ODPH_UDPHDR_LEN;
+ break;
+ case CLS_PKT_L4_SCTP:
+ next_hdr = ODPH_IPPROTO_SCTP;
+ l4_hdr_len = ODPH_SCTPHDR_LEN;
+ break;
+ case CLS_PKT_L4_ICMP:
+ next_hdr = ODPH_IPPROTO_ICMPV4;
+ l4_hdr_len = ODPH_ICMPHDR_LEN;
+ break;
+ case CLS_PKT_L4_IGMP:
+ next_hdr = ODPH_IPPROTO_IGMP;
+ l4_hdr_len = ODPH_IGMP_HLEN;
+ break;
+ case CLS_PKT_L4_AH:
+ next_hdr = ODPH_IPPROTO_AH;
+ l4_hdr_len = AH_HDR_LEN;
+ break;
+ case CLS_PKT_L4_ESP:
+ next_hdr = ODPH_IPPROTO_ESP;
+ l4_hdr_len = ODPH_ESPHDR_LEN;
+ break;
+ default:
+ ODPH_ASSERT(0);
+ }
+
l2_hdr_len = ODPH_ETHHDR_LEN + vlan_hdr_len;
l4_len = l4_hdr_len + payload_len;
l3_len = l3_hdr_len + l4_len;
@@ -277,24 +388,25 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
odp_packet_l2_offset_set(pkt, 0);
ethhdr = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
memcpy(ethhdr->src.addr, &src_mac, ODPH_ETHADDR_LEN);
- memcpy(ethhdr->dst.addr, &dst_mac_be, ODPH_ETHADDR_LEN);
- vlan_type = (odp_u16be_t *)&ethhdr->type;
+ memcpy(ethhdr->dst.addr, &dst_mac, ODPH_ETHADDR_LEN);
vlan_hdr = (odph_vlanhdr_t *)(ethhdr + 1);
- if (pkt_info.vlan_qinq) {
- odp_packet_has_vlan_qinq_set(pkt, 1);
- *vlan_type = odp_cpu_to_be_16(ODPH_ETHTYPE_VLAN_OUTER);
- vlan_hdr->tci = odp_cpu_to_be_16(0);
- vlan_type = (uint16_t *)&vlan_hdr->type;
- vlan_hdr++;
- }
if (pkt_info.vlan) {
+ if (pkt_info.vlan_qinq) {
+ odp_packet_has_vlan_qinq_set(pkt, 1);
+ ethhdr->type = odp_cpu_to_be_16(ODPH_ETHTYPE_VLAN_OUTER);
+ vlan_hdr->tci = odp_cpu_to_be_16(0);
+ vlan_hdr->type = odp_cpu_to_be_16(ODPH_ETHTYPE_VLAN);
+ vlan_hdr++;
+ } else {
+ odp_packet_has_vlan_set(pkt, 1);
+ ethhdr->type = odp_cpu_to_be_16(ODPH_ETHTYPE_VLAN);
+ }
/* Default vlan header */
- odp_packet_has_vlan_set(pkt, 1);
- *vlan_type = odp_cpu_to_be_16(ODPH_ETHTYPE_VLAN);
vlan_hdr->tci = odp_cpu_to_be_16(0);
vlan_hdr->type = odp_cpu_to_be_16(eth_type);
} else {
+ CU_ASSERT_FATAL(!pkt_info.vlan_qinq);
ethhdr->type = odp_cpu_to_be_16(eth_type);
}
@@ -312,18 +424,20 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
ip->src_addr = odp_cpu_to_be_32(addr);
ip->ver_ihl = ODPH_IPV4 << 4 | ODPH_IPV4HDR_IHL_MIN;
ip->id = odp_cpu_to_be_16(seqno);
- ip->chksum = 0;
- ip->chksum = odph_ipv4_csum_update(pkt);
ip->proto = next_hdr;
ip->tot_len = odp_cpu_to_be_16(l3_len);
ip->ttl = DEFAULT_TTL;
+ ip->frag_offset = 0;
+ ip->tos = pkt_info.dscp << ODPH_IP_TOS_DSCP_SHIFT;
odp_packet_has_ipv4_set(pkt, 1);
+ odph_ipv4_csum_update(pkt);
} else {
/* ipv6 */
odp_packet_has_ipv6_set(pkt, 1);
ipv6 = (odph_ipv6hdr_t *)odp_packet_l3_ptr(pkt, NULL);
version = ODPH_IPV6 << ODPH_IPV6HDR_VERSION_SHIFT;
- tc = DEFAULT_TOS << ODPH_IPV6HDR_TC_SHIFT;
+ tc = pkt_info.dscp << ODPH_IP_TOS_DSCP_SHIFT;
+ tc <<= ODPH_IPV6HDR_TC_SHIFT;
flow = seqno << ODPH_IPV6HDR_FLOW_LABEL_SHIFT;
ver_tc_flow = version | tc | flow;
@@ -339,26 +453,85 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
odp_packet_l4_offset_set(pkt, l4_offset);
tcp = (odph_tcphdr_t *)(buf + l4_offset);
udp = (odph_udphdr_t *)(buf + l4_offset);
-
- /* udp */
- if (pkt_info.udp) {
+ sctp = (odph_sctphdr_t *)(buf + l4_offset);
+ icmp = (odph_icmphdr_t *)(buf + l4_offset);
+ igmp = (odph_igmphdr_t *)(buf + l4_offset);
+ ah = (odph_ahhdr_t *)(buf + l4_offset);
+ esp = (odph_esphdr_t *)(buf + l4_offset);
+
+ if (pkt_info.l4_type == CLS_PKT_L4_IGMP) {
+ igmp->group = odp_cpu_to_be_32(CLS_MAGIC_VAL);
+ igmp->type = 0x12;
+ igmp->code = 0;
+ igmp->csum = 0;
+ } else if (pkt_info.l4_type == CLS_PKT_L4_ICMP) {
+ icmp->type = ODPH_ICMP_ECHO;
+ icmp->code = 0;
+ icmp->un.echo.id = 0;
+ icmp->un.echo.sequence = 0;
+ icmp->chksum = 0;
+ } else if (pkt_info.l4_type == CLS_PKT_L4_SCTP) {
+ sctp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
+ sctp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+ sctp->tag = 0;
+ sctp->chksum = 0;
+ odp_packet_has_sctp_set(pkt, 1);
+ if (odph_sctp_chksum_set(pkt) != 0) {
+ ODPH_ERR("odph_sctp_chksum failed\n");
+ return ODP_PACKET_INVALID;
+ }
+ } else if (pkt_info.l4_type == CLS_PKT_L4_UDP) {
+ /* udp */
udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
udp->length = odp_cpu_to_be_16(payload_len + ODPH_UDPHDR_LEN);
udp->chksum = 0;
odp_packet_has_udp_set(pkt, 1);
if (odph_udp_tcp_chksum(pkt, ODPH_CHKSUM_GENERATE, NULL) != 0) {
- LOG_ERR("odph_udp_tcp_chksum failed\n");
+ ODPH_ERR("odph_udp_tcp_chksum failed\n");
+ return ODP_PACKET_INVALID;
+ }
+ } else if (pkt_info.l4_type == CLS_PKT_L4_GTP) {
+ udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
+ udp->dst_port = odp_cpu_to_be_16(ODP_GTPU_UDP_PORT);
+ udp->length = odp_cpu_to_be_16(payload_len + ODPH_UDPHDR_LEN);
+ udp->chksum = 0;
+ odp_packet_has_udp_set(pkt, 1);
+ hlen = (uint8_t *)odp_packet_l4_ptr(pkt, NULL);
+ gtpu = (odph_gtphdr_t *)(hlen + sizeof(odph_udphdr_t));
+ gtpu->teid = odp_cpu_to_be_32(CLS_MAGIC_VAL);
+ /* GTPv1 without optional headers */
+ gtpu->gtp_hdr_info = 0x30;
+ /* GTP echo request */
+ gtpu->msg_type = 1;
+ gtpu->plen = sizeof(cls_test_packet_t);
+ if (odph_udp_tcp_chksum(pkt, ODPH_CHKSUM_GENERATE, NULL) != 0) {
+ ODPH_ERR("odph_udp_tcp_chksum failed\n");
return ODP_PACKET_INVALID;
}
+ } else if (pkt_info.l4_type == CLS_PKT_L4_AH) {
+ ah->next_header = ODPH_IPV4;
+ ah->ah_len = AH_HDR_LEN / 4 - 2;
+ ah->pad = 0;
+ ah->spi = 256;
+ ah->seq_no = 1;
+ } else if (pkt_info.l4_type == CLS_PKT_L4_ESP) {
+ esp->spi = 256;
+ esp->seq_no = 1;
} else {
tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+ tcp->doffset_flags = 0;
+ tcp->seq_no = 0;
+ tcp->ack_no = 0;
+ tcp->window = 0;
+ tcp->urgptr = 0;
tcp->hl = ODPH_TCPHDR_LEN / 4;
+ tcp->ack = 1;
tcp->cksm = 0;
odp_packet_has_tcp_set(pkt, 1);
if (odph_udp_tcp_chksum(pkt, ODPH_CHKSUM_GENERATE, NULL) != 0) {
- LOG_ERR("odph_udp_tcp_chksum failed\n");
+ ODPH_ERR("odph_udp_tcp_chksum failed\n");
return ODP_PACKET_INVALID;
}
diff --git a/test/validation/api/classification/odp_classification_test_pmr.c b/test/validation/api/classification/odp_classification_test_pmr.c
new file mode 100644
index 000000000..7db0e1b5e
--- /dev/null
+++ b/test/validation/api/classification/odp_classification_test_pmr.c
@@ -0,0 +1,2186 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "odp_classification_testsuites.h"
+#include "classification.h"
+#include <odp_cunit_common.h>
+#include <odp/helper/odph_api.h>
+
+#define MAX_NUM_UDP 4
+#define MARK_IP 1
+#define MARK_UDP 2
+#define TEST_IPV4 false
+#define TEST_IPV6 true
+
+static odp_pool_t pkt_pool;
+/** sequence number of IP packets */
+static odp_atomic_u32_t seq;
+
+static cls_packet_info_t default_pkt_info;
+static odp_cls_capability_t cls_capa;
+
+int classification_suite_pmr_init(void)
+{
+ memset(&cls_capa, 0, sizeof(odp_cls_capability_t));
+
+ if (odp_cls_capability(&cls_capa)) {
+ ODPH_ERR("Classifier capability call failed\n");
+ return -1;
+ }
+
+ pkt_pool = pool_create("classification_pmr_pool");
+ if (ODP_POOL_INVALID == pkt_pool) {
+ ODPH_ERR("Packet pool creation failed\n");
+ return -1;
+ }
+
+ memset(&default_pkt_info, 0, sizeof(cls_packet_info_t));
+ default_pkt_info.pool = pkt_pool;
+ default_pkt_info.seq = &seq;
+
+ odp_atomic_init_u32(&seq, 0);
+
+ return 0;
+}
+
+static int start_pktio(odp_pktio_t pktio)
+{
+ if (odp_pktio_start(pktio)) {
+ ODPH_ERR("Unable to start loop\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void configure_default_cos(odp_pktio_t pktio, odp_cos_t *cos,
+ odp_queue_t *queue, odp_pool_t *pool)
+{
+ odp_cls_cos_param_t cls_param;
+ odp_pool_t default_pool;
+ odp_cos_t default_cos;
+ odp_queue_t default_queue;
+ int retval;
+ char cosname[ODP_COS_NAME_LEN];
+
+ default_pool = pool_create("DefaultPool");
+ CU_ASSERT(default_pool != ODP_POOL_INVALID);
+
+ default_queue = queue_create("DefaultQueue", true);
+ CU_ASSERT(default_queue != ODP_QUEUE_INVALID);
+
+ sprintf(cosname, "DefaultCos");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = default_pool;
+ cls_param.queue = default_queue;
+
+ default_cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT(default_cos != ODP_COS_INVALID);
+
+ retval = odp_pktio_default_cos_set(pktio, default_cos);
+ CU_ASSERT(retval == 0);
+
+ *cos = default_cos;
+ *queue = default_queue;
+ *pool = default_pool;
+}
+
+int classification_suite_pmr_term(void)
+{
+ int ret = 0;
+
+ if (0 != odp_pool_destroy(pkt_pool)) {
+ ODPH_ERR("Packet pool destroy failed\n");
+ ret += -1;
+ }
+
+ if (odp_cunit_print_inactive())
+ ret += -1;
+
+ return ret;
+}
+
+static void cls_pktin_classifier_flag(void)
+{
+ odp_packet_t pkt;
+ odph_tcphdr_t *tcp;
+ uint32_t seqno;
+ uint16_t val;
+ uint16_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pool_t pool;
+ odp_pool_t pool_recv;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+
+ val = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+ mask = odp_cpu_to_be_16(0xffff);
+ seqno = 0;
+
+ /* classifier is disabled in pktin queue configuration */
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, false);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("tcp_dport1", true);
+ CU_ASSERT(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("tcp_dport1");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "tcp_dport");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT(cos != ODP_COS_INVALID);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_TCP_DPORT;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVALID);
+
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp->dst_port = val;
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ /* since classifier flag is disabled in pktin queue configuration
+ packet will not be delivered in classifier queues */
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ pool_recv = odp_packet_pool(pkt);
+ /* since classifier is disabled packet should not be received in
+ pool and queue configured with classifier */
+ CU_ASSERT(pool != pool_recv);
+ CU_ASSERT(retqueue != queue);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+
+ odp_packet_free(pkt);
+ odp_cls_pmr_destroy(pmr);
+ odp_cos_destroy(cos);
+ odp_pktio_default_cos_set(pktio, ODP_COS_INVALID);
+ odp_cos_destroy(default_cos);
+ stop_pktio(pktio);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pool_destroy(pool);
+ odp_pool_destroy(default_pool);
+ odp_pktio_close(pktio);
+}
+
+static void cls_pmr_term_tcp_dport_n(int num_pkt)
+{
+ odp_packet_t pkt;
+ odph_tcphdr_t *tcp;
+ uint32_t seqno[num_pkt];
+ uint16_t val;
+ uint16_t mask;
+ int retval, i, sent_queue, recv_queue, sent_default, recv_default;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pool_t pool;
+ odp_pool_t pool_recv;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+ val = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+ mask = odp_cpu_to_be_16(0xffff);
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("tcp_dport1", true);
+ CU_ASSERT(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("tcp_dport1");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "tcp_dport");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT(cos != ODP_COS_INVALID);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_TCP_DPORT;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVALID);
+
+ for (i = 0; i < num_pkt; i++) {
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno[i] = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno[i] != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp->dst_port = val;
+
+ enqueue_pktio_interface(pkt, pktio);
+ }
+
+ for (i = 0; i < num_pkt; i++) {
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ pool_recv = odp_packet_pool(pkt);
+ CU_ASSERT(pool == pool_recv);
+ CU_ASSERT(retqueue == queue);
+ CU_ASSERT(seqno[i] == cls_pkt_get_seq(pkt));
+
+ odp_packet_free(pkt);
+ }
+
+ /* Other packets are delivered to default queue */
+ for (i = 0; i < num_pkt; i++) {
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno[i] = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno[i] != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
+
+ enqueue_pktio_interface(pkt, pktio);
+ }
+
+ for (i = 0; i < num_pkt; i++) {
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno[i] == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == default_queue);
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == default_pool);
+
+ odp_packet_free(pkt);
+ }
+
+ sent_queue = 0;
+ sent_default = 0;
+
+ /* Both queues simultaneously */
+ for (i = 0; i < 2 * num_pkt; i++) {
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+
+ if ((i % 5) < 2) {
+ sent_queue++;
+ tcp->dst_port = val;
+ } else {
+ sent_default++;
+ tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
+ }
+
+ enqueue_pktio_interface(pkt, pktio);
+ }
+
+ recv_queue = 0;
+ recv_default = 0;
+
+ for (i = 0; i < 2 * num_pkt; i++) {
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(retqueue == queue || retqueue == default_queue);
+
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+
+ if (retqueue == queue) {
+ recv_queue++;
+ CU_ASSERT(tcp->dst_port == val);
+ } else if (retqueue == default_queue) {
+ recv_default++;
+ CU_ASSERT(tcp->dst_port ==
+ odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1));
+ }
+ odp_packet_free(pkt);
+ }
+
+ CU_ASSERT(sent_queue == recv_queue);
+ CU_ASSERT(sent_default == recv_default);
+
+ odp_cls_pmr_destroy(pmr);
+ odp_cos_destroy(cos);
+ odp_pktio_default_cos_set(pktio, ODP_COS_INVALID);
+ odp_cos_destroy(default_cos);
+ stop_pktio(pktio);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pool_destroy(pool);
+ odp_pool_destroy(default_pool);
+ odp_pktio_close(pktio);
+}
+
+typedef enum match_t {
+ MATCH,
+ NO_MATCH
+} match_t;
+
+/*
+ * Test that PMR created using the given parameters matches or does not match
+ * given packet. The packet, that gets consumed, must have been created using
+ * create_packet() so that it contains the testing sequence number.
+ *
+ * Ethernet addresses of the packet will be overwritten.
+ */
+static void test_pmr(const odp_pmr_param_t *pmr_param, odp_packet_t pkt,
+ match_t match)
+{
+ uint32_t seqno;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ odp_cls_cos_param_t cls_param;
+ odph_ethhdr_t *eth;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("PMR test queue", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("PMR test pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+
+ cos = odp_cls_cos_create("PMR test cos", &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ pmr = odp_cls_pmr_create(pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVALID);
+
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+
+ enqueue_pktio_interface(pkt, pktio);
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
+
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+
+ if (match == MATCH) {
+ CU_ASSERT(retqueue == queue);
+ CU_ASSERT(recvpool == pool);
+ } else {
+ CU_ASSERT(retqueue == default_queue);
+ CU_ASSERT(recvpool == default_pool);
+ }
+
+ odp_packet_free(pkt);
+ odp_cls_pmr_destroy(pmr);
+ odp_cos_destroy(cos);
+ odp_pktio_default_cos_set(pktio, ODP_COS_INVALID);
+ odp_cos_destroy(default_cos);
+ stop_pktio(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+static void cls_pmr_term_tcp_sport(void)
+{
+ odp_packet_t pkt;
+ odph_tcphdr_t *tcp;
+ uint16_t val;
+ uint16_t mask;
+ odp_pmr_param_t pmr_param;
+
+ val = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
+ mask = odp_cpu_to_be_16(0xffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_TCP_SPORT;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp->src_port = val;
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_udp_dport(void)
+{
+ odp_packet_t pkt;
+ odph_udphdr_t *udp;
+ uint16_t val;
+ uint16_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+ mask = odp_cpu_to_be_16(0xffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_UDP_DPORT;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ udp->dst_port = val;
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_udp_sport(void)
+{
+ odp_packet_t pkt;
+ odph_udphdr_t *udp;
+ uint16_t val;
+ uint16_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
+ mask = odp_cpu_to_be_16(0xffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_UDP_SPORT;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ udp->src_port = val;
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_proto_ip(odp_bool_t ipv6)
+{
+ odp_packet_t pkt;
+ uint8_t val;
+ uint8_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = ODPH_IPPROTO_UDP;
+ mask = 0xff;
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IPPROTO;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.ipv6 = ipv6;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt_info.l4_type = CLS_PKT_L4_TCP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_ipv4_proto(void)
+{
+ cls_pmr_term_proto_ip(TEST_IPV4);
+}
+
+static void cls_pmr_term_ipv6_proto(void)
+{
+ cls_pmr_term_proto_ip(TEST_IPV6);
+}
+
+static void cls_pmr_term_dscp_ip(odp_bool_t ipv6)
+{
+ odp_packet_t pkt;
+ uint8_t val;
+ uint8_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = DSCP_CLASS4;
+ mask = 0x3f;
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IP_DSCP;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.ipv6 = ipv6;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
+ pkt_info.dscp = DSCP_CLASS4;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt_info.dscp = 0;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_ipv4_dscp(void)
+{
+ cls_pmr_term_dscp_ip(TEST_IPV4);
+}
+
+static void cls_pmr_term_ipv6_dscp(void)
+{
+ cls_pmr_term_dscp_ip(TEST_IPV6);
+}
+
+static void cls_pmr_term_dmac(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+ cls_packet_info_t pkt_info;
+ uint8_t val[] = {0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee};
+ uint8_t mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("dmac", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("dmac");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "dmac");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_DMAC;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = ODPH_ETHADDR_LEN;
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVALID);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ memcpy(eth->dst.addr, val, ODPH_ETHADDR_LEN);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool);
+ CU_ASSERT(retqueue == queue);
+ odp_packet_free(pkt);
+
+ /* Other packets delivered to default queue */
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == default_pool);
+ CU_ASSERT(retqueue == default_queue);
+
+ odp_cls_pmr_destroy(pmr);
+ odp_cos_destroy(cos);
+ odp_pktio_default_cos_set(pktio, ODP_COS_INVALID);
+ odp_cos_destroy(default_cos);
+ odp_packet_free(pkt);
+ stop_pktio(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+static void cls_pmr_term_packet_len(void)
+{
+ odp_packet_t pkt;
+ uint32_t val;
+ uint32_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = 1024;
+ /*Mask value will match any packet of length 1000 - 1099*/
+ mask = 0xff00;
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_LEN;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ /* create packet of payload length 1024 */
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
+ pkt_info.len = 1024;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_vlan_id_0(void)
+{
+ odp_packet_t pkt;
+ uint16_t val;
+ uint16_t mask;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+ odph_vlanhdr_t *vlan_0;
+ cls_packet_info_t pkt_info;
+
+ val = odp_cpu_to_be_16(0x123);
+ mask = odp_cpu_to_be_16(0xfff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_VLAN_ID_0;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.vlan = true;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ vlan_0 = (odph_vlanhdr_t *)(eth + 1);
+ vlan_0->tci = val;
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_vlan_id_x(void)
+{
+ odp_packet_t pkt;
+ uint16_t val;
+ uint16_t mask;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+ odph_vlanhdr_t *vlan_x;
+ cls_packet_info_t pkt_info;
+
+ val = odp_cpu_to_be_16(0x345);
+ mask = odp_cpu_to_be_16(0xfff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_VLAN_ID_X;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ /* Single VLAN */
+ pkt_info = default_pkt_info;
+ pkt_info.vlan = true;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ vlan_x = (odph_vlanhdr_t *)(eth + 1);
+ vlan_x->tci = val;
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ /* Two VLANs */
+ pkt_info.vlan_qinq = true;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ vlan_x = (odph_vlanhdr_t *)(eth + 1);
+ vlan_x++;
+ vlan_x->tci = val;
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ /* No VLAN */
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_vlan_pcp_0(void)
+{
+ odp_packet_t pkt;
+ uint8_t val;
+ uint8_t mask;
+ uint16_t tci;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+ odph_vlanhdr_t *vlan_0;
+ cls_packet_info_t pkt_info;
+
+ val = 5;
+ mask = 0x7;
+ tci = ((uint16_t)val) << ODPH_VLANHDR_PCP_SHIFT;
+ tci |= 0x123;
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_VLAN_PCP_0;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.vlan = true;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ vlan_0 = (odph_vlanhdr_t *)(eth + 1);
+ vlan_0->tci = odp_cpu_to_be_16(tci);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_eth_type_0(void)
+{
+ odp_packet_t pkt;
+ uint16_t val;
+ uint16_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV6);
+ mask = odp_cpu_to_be_16(0xffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_ETHTYPE_0;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.ipv6 = true;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_eth_type_x(void)
+{
+ odp_packet_t pkt;
+ uint16_t val;
+ uint16_t mask;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+ odph_vlanhdr_t *vlan_x;
+ cls_packet_info_t pkt_info;
+
+ val = odp_cpu_to_be_16(0x0800);
+ mask = odp_cpu_to_be_16(0xffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_ETHTYPE_X;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ /* Single VLAN */
+ pkt_info = default_pkt_info;
+ pkt_info.vlan = true;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ vlan_x = (odph_vlanhdr_t *)(eth + 1);
+ vlan_x->tci = odp_cpu_to_be_16(0x123);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ /* Two VLANs */
+ pkt_info.vlan_qinq = true;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ vlan_x = (odph_vlanhdr_t *)(eth + 1);
+ vlan_x++;
+ vlan_x->tci = odp_cpu_to_be_16(0x123);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ /* No VLAN */
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_pool_set(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ uint8_t val;
+ uint8_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t pool_new;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+ cls_packet_info_t pkt_info;
+
+ val = ODPH_IPPROTO_UDP;
+ mask = 0xff;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("ipproto1", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("ipproto1");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "ipproto1");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ pool_new = pool_create("ipproto2");
+ CU_ASSERT_FATAL(pool_new != ODP_POOL_INVALID);
+
+ /* new pool is set on CoS */
+ retval = odp_cls_cos_pool_set(cos, pool_new);
+ CU_ASSERT(retval == 0);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IPPROTO;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVALID);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool_new);
+ CU_ASSERT(retqueue == queue);
+ odp_packet_free(pkt);
+
+ odp_cls_pmr_destroy(pmr);
+ odp_cos_destroy(cos);
+ odp_pktio_default_cos_set(pktio, ODP_COS_INVALID);
+ odp_cos_destroy(default_cos);
+ stop_pktio(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_pool_destroy(pool_new);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+static void cls_pmr_queue_set(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ uint8_t val;
+ uint8_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_queue_t queue_new;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+ cls_packet_info_t pkt_info;
+
+ val = ODPH_IPPROTO_UDP;
+ mask = 0xff;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("ipproto1", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("ipproto1");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "ipproto1");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ queue_new = queue_create("ipproto2", true);
+ CU_ASSERT_FATAL(queue_new != ODP_QUEUE_INVALID);
+
+ /* new queue is set on CoS */
+ retval = odp_cos_queue_set(cos, queue_new);
+ CU_ASSERT(retval == 0);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IPPROTO;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVALID);
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool);
+ CU_ASSERT(retqueue == queue_new);
+ odp_packet_free(pkt);
+
+ odp_cls_pmr_destroy(pmr);
+ odp_cos_destroy(cos);
+ odp_pktio_default_cos_set(pktio, ODP_COS_INVALID);
+ odp_cos_destroy(default_cos);
+ stop_pktio(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue_new);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+static void test_pmr_term_ipv4_addr(int dst)
+{
+ odp_packet_t pkt;
+ uint32_t dst_addr, src_addr;
+ uint32_t dst_mask, src_mask;
+ odp_pmr_param_t pmr_param;
+ odph_ipv4hdr_t *ip;
+ const char *src_str = "10.0.0.88/32";
+ const char *dst_str = "10.0.0.99/32";
+
+ parse_ipv4_string(src_str, &src_addr, &src_mask);
+ parse_ipv4_string(dst_str, &dst_addr, &dst_mask);
+ src_addr = odp_cpu_to_be_32(src_addr);
+ src_mask = odp_cpu_to_be_32(src_mask);
+ dst_addr = odp_cpu_to_be_32(dst_addr);
+ dst_mask = odp_cpu_to_be_32(dst_mask);
+
+ odp_cls_pmr_param_init(&pmr_param);
+
+ if (dst) {
+ pmr_param.term = ODP_PMR_DIP_ADDR;
+ pmr_param.match.value = &dst_addr;
+ pmr_param.match.mask = &dst_mask;
+ pmr_param.val_sz = sizeof(dst_addr);
+ } else {
+ pmr_param.term = ODP_PMR_SIP_ADDR;
+ pmr_param.match.value = &src_addr;
+ pmr_param.match.mask = &src_mask;
+ pmr_param.val_sz = sizeof(src_addr);
+ }
+
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ ip->src_addr = src_addr;
+ ip->dst_addr = dst_addr;
+ odph_ipv4_csum_update(pkt);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_ipv4_saddr(void)
+{
+ test_pmr_term_ipv4_addr(0);
+}
+
+static void cls_pmr_term_ipv4_daddr(void)
+{
+ test_pmr_term_ipv4_addr(1);
+}
+
+static void cls_pmr_term_ipv6daddr(void)
+{
+ odp_packet_t pkt;
+ odp_pmr_param_t pmr_param;
+ odph_ipv6hdr_t *ip;
+ cls_packet_info_t pkt_info;
+
+ uint8_t IPV6_DST_ADDR[ODPH_IPV6ADDR_LEN] = {
+ /* I.e. ::ffff:10.1.1.100 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 10, 1, 1, 100
+ };
+ uint8_t ipv6_mask[ODPH_IPV6ADDR_LEN] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+ };
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_DIP6_ADDR;
+ pmr_param.match.value = IPV6_DST_ADDR;
+ pmr_param.match.mask = ipv6_mask;
+ pmr_param.val_sz = ODPH_IPV6ADDR_LEN;
+
+ pkt_info = default_pkt_info;
+ pkt_info.ipv6 = true;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ ip = (odph_ipv6hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ memcpy(ip->dst_addr, IPV6_DST_ADDR, ODPH_IPV6ADDR_LEN);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_ipv6saddr(void)
+{
+ odp_packet_t pkt;
+ odp_pmr_param_t pmr_param;
+ odph_ipv6hdr_t *ip;
+ cls_packet_info_t pkt_info;
+ uint8_t IPV6_SRC_ADDR[ODPH_IPV6ADDR_LEN] = {
+ /* I.e. ::ffff:10.0.0.100 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 10, 1, 1, 1
+ };
+ uint8_t ipv6_mask[ODPH_IPV6ADDR_LEN] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+ };
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_SIP6_ADDR;
+ pmr_param.match.value = IPV6_SRC_ADDR;
+ pmr_param.match.mask = ipv6_mask;
+ pmr_param.val_sz = ODPH_IPV6ADDR_LEN;
+
+ pkt_info = default_pkt_info;
+ pkt_info.ipv6 = true;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ ip = (odph_ipv6hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ memcpy(ip->src_addr, IPV6_SRC_ADDR, ODPH_IPV6ADDR_LEN);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_tcp_dport(void)
+{
+ cls_pmr_term_tcp_dport_n(2);
+}
+
+static void cls_pmr_term_tcp_dport_multi(void)
+{
+ cls_pmr_term_tcp_dport_n(SHM_PKT_NUM_BUFS / 4);
+}
+
+static void test_pmr_term_custom(int custom_l3)
+{
+ odp_packet_t pkt;
+ uint32_t dst_addr, src_addr;
+ uint32_t addr_be, mask_be;
+ uint32_t dst_mask, src_mask;
+ odp_pmr_param_t pmr_param;
+ odph_ipv4hdr_t *ip;
+ const char *pmr_src_str = "10.0.8.0/24";
+ const char *pmr_dst_str = "10.0.9.0/24";
+ const char *pkt_src_str = "10.0.8.88/32";
+ const char *pkt_dst_str = "10.0.9.99/32";
+
+ /* Match values for custom PRM rules are passed in network endian */
+ parse_ipv4_string(pmr_src_str, &src_addr, &src_mask);
+ parse_ipv4_string(pmr_dst_str, &dst_addr, &dst_mask);
+
+ odp_cls_pmr_param_init(&pmr_param);
+
+ if (custom_l3) {
+ addr_be = odp_cpu_to_be_32(dst_addr);
+ mask_be = odp_cpu_to_be_32(dst_mask);
+ pmr_param.term = ODP_PMR_CUSTOM_L3;
+ pmr_param.match.value = &addr_be;
+ pmr_param.match.mask = &mask_be;
+ pmr_param.val_sz = sizeof(addr_be);
+ /* Offset from start of L3 to IPv4 dst address */
+ pmr_param.offset = 16;
+ } else {
+ addr_be = odp_cpu_to_be_32(src_addr);
+ mask_be = odp_cpu_to_be_32(src_mask);
+ pmr_param.term = ODP_PMR_CUSTOM_FRAME;
+ pmr_param.match.value = &addr_be;
+ pmr_param.match.mask = &mask_be;
+ pmr_param.val_sz = sizeof(addr_be);
+ /* Offset from start of ethernet/IPv4 frame to IPv4
+ * src address */
+ pmr_param.offset = 26;
+ }
+
+ /* IPv4 packet with matching addresses */
+ parse_ipv4_string(pkt_src_str, &src_addr, NULL);
+ parse_ipv4_string(pkt_dst_str, &dst_addr, NULL);
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ ip->src_addr = odp_cpu_to_be_32(src_addr);
+ ip->dst_addr = odp_cpu_to_be_32(dst_addr);
+ odph_ipv4_csum_update(pkt);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+/*
+ * Test a series of PMR rules and CoS. When num_udp is 1, test is serial
+ * from IP CoS to UDP CoS. When num_udp is larger than 1, a set of parallel
+ * UDP CoS are tested.
+ *
+ * dst IP dst UDP[0 ... 3]
+ * default_cos -> cos_ip -> cos_udp[0 ... 3]
+ */
+static void test_pmr_series(const int num_udp, int marking)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ int i, retval;
+ cls_packet_info_t pkt_info;
+ odp_pktio_t pktio;
+ odp_pool_t pool;
+ odp_queue_t default_queue;
+ odp_pool_t default_pool;
+ odp_cos_t default_cos;
+ odp_queue_t retqueue;
+ odp_pmr_t pmr_ip;
+ odp_queue_t queue_ip;
+ odp_cos_t cos_ip;
+ uint32_t dst_addr;
+ uint32_t dst_addr_be, ip_mask_be;
+ uint32_t dst_mask;
+ odp_pmr_param_t pmr_param;
+ odp_cls_cos_param_t cls_param;
+ odp_pmr_create_opt_t create_opt;
+ odph_ethhdr_t *eth;
+ odph_ipv4hdr_t *ip;
+ odph_udphdr_t *udp;
+ odp_cos_t cos_udp[num_udp];
+ odp_queue_t queue_udp[num_udp];
+ odp_pmr_t pmr_udp[num_udp];
+ uint16_t dst_port = 1000;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ pool = pool_create("pmr_series");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ /* Dest IP address */
+ queue_ip = queue_create("queue_ip", true);
+ CU_ASSERT_FATAL(queue_ip != ODP_QUEUE_INVALID);
+
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue_ip;
+
+ cos_ip = odp_cls_cos_create("cos_ip", &cls_param);
+ CU_ASSERT_FATAL(cos_ip != ODP_COS_INVALID);
+
+ parse_ipv4_string("10.0.9.99/32", &dst_addr, &dst_mask);
+ dst_addr_be = odp_cpu_to_be_32(dst_addr);
+ ip_mask_be = odp_cpu_to_be_32(dst_mask);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_DIP_ADDR;
+ pmr_param.match.value = &dst_addr_be;
+ pmr_param.match.mask = &ip_mask_be;
+ pmr_param.val_sz = sizeof(dst_addr_be);
+ pmr_param.offset = 0;
+
+ if (marking) {
+ odp_cls_pmr_create_opt_init(&create_opt);
+ create_opt.terms = &pmr_param;
+ create_opt.num_terms = 1;
+ create_opt.mark = MARK_IP;
+
+ pmr_ip = odp_cls_pmr_create_opt(&create_opt, default_cos, cos_ip);
+ } else {
+ pmr_ip = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos_ip);
+ }
+
+ CU_ASSERT_FATAL(pmr_ip != ODP_PMR_INVALID);
+
+ /* Dest UDP port */
+ for (i = 0; i < num_udp; i++) {
+ uint16_t dst_port_be = odp_cpu_to_be_16(dst_port + i);
+ uint16_t port_mask_be = odp_cpu_to_be_16(0xffff);
+ char name[] = "udp_0";
+
+ name[4] += i;
+ queue_udp[i] = queue_create(name, true);
+ CU_ASSERT_FATAL(queue_udp[i] != ODP_QUEUE_INVALID);
+
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue_udp[i];
+
+ cos_udp[i] = odp_cls_cos_create(name, &cls_param);
+ CU_ASSERT_FATAL(cos_udp[i] != ODP_COS_INVALID);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_UDP_DPORT;
+ pmr_param.match.value = &dst_port_be;
+ pmr_param.match.mask = &port_mask_be;
+ pmr_param.val_sz = 2;
+ pmr_param.offset = 0;
+
+ if (marking) {
+ odp_cls_pmr_create_opt_init(&create_opt);
+ create_opt.terms = &pmr_param;
+ create_opt.num_terms = 1;
+ create_opt.mark = MARK_UDP + i;
+
+ pmr_udp[i] = odp_cls_pmr_create_opt(&create_opt, cos_ip, cos_udp[i]);
+ } else {
+ pmr_udp[i] = odp_cls_pmr_create(&pmr_param, 1, cos_ip, cos_udp[i]);
+ }
+
+ CU_ASSERT_FATAL(pmr_udp[i] != ODP_PMR_INVALID);
+ }
+
+ /* Matching TCP/IP packet */
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_TCP;
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+ ip->dst_addr = dst_addr_be;
+ odph_ipv4_csum_update(pkt);
+
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == queue_ip);
+
+ if (marking) {
+ CU_ASSERT(odp_packet_cls_mark(pkt) == MARK_IP);
+ CU_ASSERT(odp_packet_reset(pkt, odp_packet_len(pkt)) == 0);
+ CU_ASSERT(odp_packet_cls_mark(pkt) == 0);
+ } else {
+ /* Default is 0 */
+ CU_ASSERT(odp_packet_cls_mark(pkt) == 0);
+ }
+
+ odp_packet_free(pkt);
+
+ /* Matching UDP/IP packets */
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
+
+ for (i = 0; i < num_udp; i++) {
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+ ip->dst_addr = dst_addr_be;
+ odph_ipv4_csum_update(pkt);
+ udp->dst_port = odp_cpu_to_be_16(dst_port + i);
+
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == queue_udp[i]);
+
+ if (marking) {
+ CU_ASSERT(odp_packet_cls_mark(pkt) == (uint64_t)(MARK_UDP + i));
+ } else {
+ /* Default is 0 */
+ CU_ASSERT(odp_packet_cls_mark(pkt) == 0);
+ }
+
+ odp_packet_free(pkt);
+ }
+
+ /* Other packets delivered to default queue */
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == default_queue);
+ odp_packet_free(pkt);
+ odp_cls_pmr_destroy(pmr_ip);
+
+ for (i = 0; i < num_udp; i++) {
+ odp_cls_pmr_destroy(pmr_udp[i]);
+ odp_cos_destroy(cos_udp[i]);
+ }
+
+ odp_cos_destroy(cos_ip);
+ odp_pktio_default_cos_set(pktio, ODP_COS_INVALID);
+ odp_cos_destroy(default_cos);
+ stop_pktio(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+
+ for (i = 0; i < num_udp; i++)
+ odp_queue_destroy(queue_udp[i]);
+
+ odp_queue_destroy(queue_ip);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+static void cls_pmr_term_sctp_port(bool is_dport)
+{
+ odp_packet_t pkt;
+ odph_sctphdr_t *sctp;
+ uint16_t val;
+ uint16_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
+ if (is_dport)
+ val = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+ mask = odp_cpu_to_be_16(0xffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_SCTP_SPORT;
+ if (is_dport)
+ pmr_param.term = ODP_PMR_SCTP_DPORT;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_SCTP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ sctp = (odph_sctphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ if (is_dport)
+ sctp->dst_port = val;
+ else
+ sctp->src_port = val;
+ CU_ASSERT(odph_sctp_chksum_set(pkt) == 0);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ sctp = (odph_sctphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ if (is_dport)
+ sctp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
+ else
+ sctp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
+ CU_ASSERT(odph_sctp_chksum_set(pkt) == 0);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_sctp_sport(void)
+{
+ cls_pmr_term_sctp_port(0);
+}
+
+static void cls_pmr_term_sctp_dport(void)
+{
+ cls_pmr_term_sctp_port(1);
+}
+
+static void cls_pmr_term_icmp_type(void)
+{
+ odp_packet_t pkt;
+ odph_icmphdr_t *icmp;
+ uint8_t val;
+ uint8_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = ODPH_ICMP_ECHO;
+ mask = 0xff;
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_ICMP_TYPE;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_ICMP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ icmp = (odph_icmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ icmp->type = val;
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ icmp = (odph_icmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ icmp->type = ODPH_ICMP_ECHOREPLY;
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_icmp_code(void)
+{
+ odp_packet_t pkt;
+ odph_icmphdr_t *icmp;
+ uint8_t val;
+ uint8_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = 0x1;
+ mask = 0xff;
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_ICMP_CODE;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_ICMP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ icmp = (odph_icmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ icmp->code = 0x1;
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ icmp = (odph_icmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ icmp->code = 0;
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_icmp_id(void)
+{
+ odp_packet_t pkt;
+ odph_icmphdr_t *icmp;
+ uint16_t val;
+ uint16_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = odp_cpu_to_be_16(0x1234);
+ mask = odp_cpu_to_be_16(0xffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_ICMP_ID;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_ICMP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ icmp = (odph_icmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ icmp->un.echo.id = odp_cpu_to_be_16(0x1234);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ icmp = (odph_icmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ icmp->un.echo.id = 0x4567;
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_gtpu_teid(void)
+{
+ odp_packet_t pkt;
+ odph_gtphdr_t *gtpu;
+ odph_udphdr_t *udp;
+ uint32_t val;
+ uint32_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+ uint8_t *hlen = 0;
+
+ val = odp_cpu_to_be_32(CLS_MAGIC_VAL);
+ mask = odp_cpu_to_be_32(0xffffffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_GTPV1_TEID;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_GTP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ /* Check packet with wrong UDP port, packets should goto default cos */
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_GTP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+
+ /* Check GTPv2 packets, should goto default cos */
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_GTP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ hlen = (uint8_t *)odp_packet_l4_ptr(pkt, NULL);
+ gtpu = (odph_gtphdr_t *)(hlen + ODPH_UDPHDR_LEN);
+ /* Version:2, piggybacking:1, teid:1 */
+ gtpu->gtp_hdr_info = 0x58;
+ CU_ASSERT(odph_udp_tcp_chksum(pkt, ODPH_CHKSUM_GENERATE, NULL) == 0);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+
+ /* All other packets should goto default cos */
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_GTP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ hlen = (uint8_t *)odp_packet_l4_ptr(pkt, NULL);
+ gtpu = (odph_gtphdr_t *)(hlen + ODPH_UDPHDR_LEN);
+ gtpu->teid = odp_cpu_to_be_32(CLS_MAGIC_VAL + 1);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_igmp_grpaddr(void)
+{
+ odp_packet_t pkt;
+ odph_igmphdr_t *igmp;
+ uint32_t val;
+ uint32_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = odp_cpu_to_be_32(CLS_MAGIC_VAL);
+ mask = odp_cpu_to_be_32(0xffffffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IGMP_GRP_ADDR;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_IGMP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_IGMP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ igmp = (odph_igmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ igmp->group = odp_cpu_to_be_32(CLS_MAGIC_VAL + 1);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_serial(void)
+{
+ test_pmr_series(1, 0);
+}
+
+static void cls_pmr_parallel(void)
+{
+ test_pmr_series(MAX_NUM_UDP, 0);
+}
+
+static void cls_pmr_marking(void)
+{
+ test_pmr_series(MAX_NUM_UDP, 1);
+}
+
+static void cls_pmr_term_custom_frame(void)
+{
+ test_pmr_term_custom(0);
+}
+
+static void cls_pmr_term_custom_l3(void)
+{
+ test_pmr_term_custom(1);
+}
+
+static void test_pmr_term_ipsec_spi_ah(odp_bool_t is_ipv6)
+{
+ uint32_t val;
+ uint32_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+ odp_packet_t pkt;
+ odph_ahhdr_t *ah;
+
+ val = odp_cpu_to_be_32(0x11223344);
+ mask = odp_cpu_to_be_32(0xffffffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IPSEC_SPI;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_AH;
+ pkt_info.ipv6 = is_ipv6;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ ah = (odph_ahhdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ ah->spi = val;
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ ah = (odph_ahhdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ ah->spi = val + 1;
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_ipsec_spi_ah_ipv4(void)
+{
+ test_pmr_term_ipsec_spi_ah(TEST_IPV4);
+}
+
+static void test_pmr_term_ipsec_spi_esp(odp_bool_t is_ipv6)
+{
+ uint32_t val;
+ uint32_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+ odp_packet_t pkt;
+ odph_esphdr_t *esp;
+
+ val = odp_cpu_to_be_32(0x11223344);
+ mask = odp_cpu_to_be_32(0xffffffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IPSEC_SPI;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_ESP;
+ pkt_info.ipv6 = is_ipv6;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ esp = (odph_esphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ esp->spi = val;
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ esp = (odph_esphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ esp->spi = val + 1;
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void cls_pmr_term_ipsec_spi_esp_ipv4(void)
+{
+ test_pmr_term_ipsec_spi_esp(TEST_IPV4);
+}
+
+static void cls_pmr_term_ipsec_spi_ah_ipv6(void)
+{
+ test_pmr_term_ipsec_spi_ah(TEST_IPV6);
+}
+
+static void cls_pmr_term_ipsec_spi_esp_ipv6(void)
+{
+ test_pmr_term_ipsec_spi_esp(TEST_IPV6);
+}
+
+static int check_capa_tcp_dport(void)
+{
+ return cls_capa.supported_terms.bit.tcp_dport;
+}
+
+static int check_capa_tcp_sport(void)
+{
+ return cls_capa.supported_terms.bit.tcp_sport;
+}
+
+static int check_capa_udp_dport(void)
+{
+ return cls_capa.supported_terms.bit.udp_dport;
+}
+
+static int check_capa_udp_sport(void)
+{
+ return cls_capa.supported_terms.bit.udp_sport;
+}
+
+static int check_capa_ip_proto(void)
+{
+ return cls_capa.supported_terms.bit.ip_proto;
+}
+
+static int check_capa_ip_dscp(void)
+{
+ return cls_capa.supported_terms.bit.ip_dscp;
+}
+
+static int check_capa_dmac(void)
+{
+ return cls_capa.supported_terms.bit.dmac;
+}
+
+static int check_capa_ipv4_saddr(void)
+{
+ return cls_capa.supported_terms.bit.sip_addr;
+}
+
+static int check_capa_ipv4_daddr(void)
+{
+ return cls_capa.supported_terms.bit.dip_addr;
+}
+
+static int check_capa_ipv6_saddr(void)
+{
+ return cls_capa.supported_terms.bit.sip6_addr;
+}
+
+static int check_capa_ipv6_daddr(void)
+{
+ return cls_capa.supported_terms.bit.dip6_addr;
+}
+
+static int check_capa_packet_len(void)
+{
+ return cls_capa.supported_terms.bit.len;
+}
+
+static int check_capa_vlan_id_0(void)
+{
+ return cls_capa.supported_terms.bit.vlan_id_0;
+}
+
+static int check_capa_vlan_id_x(void)
+{
+ return cls_capa.supported_terms.bit.vlan_id_x;
+}
+
+static int check_capa_vlan_pcp_0(void)
+{
+ return cls_capa.supported_terms.bit.vlan_pcp_0;
+}
+
+static int check_capa_ethtype_0(void)
+{
+ return cls_capa.supported_terms.bit.ethtype_0;
+}
+
+static int check_capa_ethtype_x(void)
+{
+ return cls_capa.supported_terms.bit.ethtype_x;
+}
+
+static int check_capa_custom_frame(void)
+{
+ return cls_capa.supported_terms.bit.custom_frame;
+}
+
+static int check_capa_custom_l3(void)
+{
+ return cls_capa.supported_terms.bit.custom_l3;
+}
+
+static int check_capa_ipsec_spi(void)
+{
+ return cls_capa.supported_terms.bit.ipsec_spi;
+}
+
+static int check_capa_pmr_series(void)
+{
+ uint64_t support;
+
+ support = cls_capa.supported_terms.bit.dip_addr &&
+ cls_capa.supported_terms.bit.udp_dport;
+
+ return support;
+}
+
+static int check_capa_pmr_marking(void)
+{
+ uint64_t terms;
+
+ terms = cls_capa.supported_terms.bit.dip_addr &&
+ cls_capa.supported_terms.bit.udp_dport;
+
+ /* one PMR for IP, MAX_NUM_UDP PMRs for UDP */
+ if (terms && cls_capa.max_mark >= (MARK_UDP + MAX_NUM_UDP - 1))
+ return 1;
+
+ return 0;
+}
+
+static int check_capa_sctp_sport(void)
+{
+ return cls_capa.supported_terms.bit.sctp_sport;
+}
+
+static int check_capa_sctp_dport(void)
+{
+ return cls_capa.supported_terms.bit.sctp_dport;
+}
+
+static int check_capa_icmp_type(void)
+{
+ return cls_capa.supported_terms.bit.icmp_type;
+}
+
+static int check_capa_icmp_code(void)
+{
+ return cls_capa.supported_terms.bit.icmp_code;
+}
+
+static int check_capa_icmp_id(void)
+{
+ return cls_capa.supported_terms.bit.icmp_id;
+}
+
+static int check_capa_gtpu_teid(void)
+{
+ return cls_capa.supported_terms.bit.gtpv1_teid;
+}
+
+static int check_capa_igmp_grpaddr(void)
+{
+ return cls_capa.supported_terms.bit.igmp_grp_addr;
+}
+
+odp_testinfo_t classification_suite_pmr[] = {
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_tcp_dport, check_capa_tcp_dport),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_tcp_sport, check_capa_tcp_sport),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_udp_dport, check_capa_udp_dport),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_udp_sport, check_capa_udp_sport),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_gtpu_teid, check_capa_gtpu_teid),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_igmp_grpaddr, check_capa_igmp_grpaddr),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_sctp_sport, check_capa_sctp_sport),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_sctp_dport, check_capa_sctp_dport),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_icmp_type, check_capa_icmp_type),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_icmp_code, check_capa_icmp_code),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_icmp_id, check_capa_icmp_id),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_ipv4_proto, check_capa_ip_proto),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_ipv6_proto, check_capa_ip_proto),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_ipv4_dscp, check_capa_ip_dscp),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_ipv6_dscp, check_capa_ip_dscp),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_dmac, check_capa_dmac),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_pool_set, check_capa_ip_proto),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_queue_set, check_capa_ip_proto),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_ipv4_saddr, check_capa_ipv4_saddr),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_ipv4_daddr, check_capa_ipv4_daddr),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_ipv6saddr, check_capa_ipv6_saddr),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_ipv6daddr, check_capa_ipv6_daddr),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_packet_len, check_capa_packet_len),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_vlan_id_0, check_capa_vlan_id_0),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_vlan_id_x, check_capa_vlan_id_x),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_vlan_pcp_0, check_capa_vlan_pcp_0),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_eth_type_0, check_capa_ethtype_0),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_eth_type_x, check_capa_ethtype_x),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_custom_frame, check_capa_custom_frame),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_custom_l3, check_capa_custom_l3),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_ipsec_spi_ah_ipv4, check_capa_ipsec_spi),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_ipsec_spi_esp_ipv4, check_capa_ipsec_spi),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_ipsec_spi_ah_ipv6, check_capa_ipsec_spi),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_term_ipsec_spi_esp_ipv6, check_capa_ipsec_spi),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_serial, check_capa_pmr_series),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_parallel, check_capa_pmr_series),
+ ODP_TEST_INFO(cls_pktin_classifier_flag),
+ ODP_TEST_INFO(cls_pmr_term_tcp_dport_multi),
+ ODP_TEST_INFO_CONDITIONAL(cls_pmr_marking, check_capa_pmr_marking),
+ ODP_TEST_INFO_NULL,
+};
diff --git a/test/common_plat/validation/api/classification/odp_classification_tests.c b/test/validation/api/classification/odp_classification_tests.c
index 4f4308264..d81884006 100644
--- a/test/common_plat/validation/api/classification/odp_classification_tests.c
+++ b/test/validation/api/classification/odp_classification_tests.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2020-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,6 +8,7 @@
#include "odp_classification_testsuites.h"
#include "classification.h"
#include <odp_cunit_common.h>
+#include <odp/helper/odph_api.h>
static odp_cos_t cos_list[CLS_ENTRIES];
static odp_pmr_t pmr_list[CLS_ENTRIES];
@@ -15,23 +17,39 @@ static odp_pool_t pool_list[CLS_ENTRIES];
static odp_pool_t pool_default;
static odp_pktio_t pktio_loop;
+static odp_pktio_capability_t pktio_capa;
+static odp_cls_testcase_u tc;
+
+#define NUM_COS_PMR_CHAIN 2
+#define NUM_COS_DEFAULT 1
+#define NUM_COS_DROP 1
+#define NUM_COS_ERROR 1
+#define NUM_COS_L2_PRIO CLS_L2_QOS_MAX
+#define NUM_COS_PMR 1
+#define NUM_COS_COMPOSITE 1
+#define PKTV_DEFAULT_SIZE 8
/** sequence number of IP packets */
-odp_atomic_u32_t seq;
+static odp_atomic_u32_t seq;
/* default packet info */
static cls_packet_info_t default_pkt_info;
-int classification_suite_init(void)
+/* Packet vector configuration */
+static odp_pktin_vector_config_t pktv_config;
+
+static int classification_suite_common_init(odp_bool_t enable_pktv)
{
int i;
int ret;
odp_pktio_param_t pktio_param;
odp_pktin_queue_param_t pktin_param;
+ tc.all_bits = 0;
+
pool_default = pool_create("classification_pool");
if (ODP_POOL_INVALID == pool_default) {
- fprintf(stderr, "Packet pool creation failed.\n");
+ ODPH_ERR("Packet pool creation failed\n");
return -1;
}
@@ -42,7 +60,13 @@ int classification_suite_init(void)
if (pktio_loop == ODP_PKTIO_INVALID) {
ret = odp_pool_destroy(pool_default);
if (ret)
- fprintf(stderr, "unable to destroy pool.\n");
+ ODPH_ERR("Unable to destroy pool\n");
+ return -1;
+ }
+
+ ret = odp_pktio_capability(pktio_loop, &pktio_capa);
+ if (ret) {
+ ODPH_ERR("Unable to get pktio capability\n");
return -1;
}
@@ -52,14 +76,47 @@ int classification_suite_init(void)
odp_pktin_queue_param_init(&pktin_param);
pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ pktin_param.classifier_enable = true;
+ pktin_param.hash_enable = false;
+
+ if (enable_pktv) {
+ odp_pktio_capability_t capa;
+ odp_pool_t pktv_pool;
+
+ pktv_pool = pktv_pool_create("packet_vector_pool");
+ if (pktv_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Packet vector pool creation failed\n");
+ return -1;
+ }
+
+ if (odp_pktio_capability(pktio_loop, &capa)) {
+ ODPH_ERR("Pktio capability failed\n");
+ return -1;
+ }
+
+ if (!capa.vector.supported) {
+ printf("Packet vector mode is not supported. Test suite skipped.\n");
+ pktv_config.enable = false;
+ pktv_config.pool = pktv_pool;
+ } else {
+ pktin_param.vector.enable = true;
+ pktin_param.vector.pool = pktv_pool;
+ pktin_param.vector.max_size = capa.vector.max_size < PKTV_DEFAULT_SIZE ?
+ capa.vector.max_size : PKTV_DEFAULT_SIZE;
+ pktin_param.vector.max_tmo_ns = capa.vector.min_tmo_ns;
+
+ /* Copy packet vector config for global access */
+ pktv_config = pktin_param.vector;
+ }
+ }
if (odp_pktin_queue_config(pktio_loop, &pktin_param)) {
- fprintf(stderr, "pktin queue config failed.\n");
+ ODPH_ERR("Pktin queue config failed\n");
return -1;
}
if (odp_pktout_queue_config(pktio_loop, NULL)) {
- fprintf(stderr, "pktout queue config failed.\n");
+ ODPH_ERR("Pktout queue config failed\n");
return -1;
}
@@ -67,7 +124,7 @@ int classification_suite_init(void)
cos_list[i] = ODP_COS_INVALID;
for (i = 0; i < CLS_ENTRIES; i++)
- pmr_list[i] = ODP_PMR_INVAL;
+ pmr_list[i] = ODP_PMR_INVALID;
for (i = 0; i < CLS_ENTRIES; i++)
queue_list[i] = ODP_QUEUE_INVALID;
@@ -79,49 +136,87 @@ int classification_suite_init(void)
ret = odp_pktio_start(pktio_loop);
if (ret) {
- fprintf(stderr, "unable to start loop\n");
+ ODPH_ERR("Unable to start loop\n");
return -1;
}
return 0;
}
-int classification_suite_term(void)
+static int classification_suite_common_term(odp_bool_t enable_pktv)
{
int i;
int retcode = 0;
if (0 > stop_pktio(pktio_loop)) {
- fprintf(stderr, "stop pktio failed.\n");
+ ODPH_ERR("Stop pktio failed\n");
retcode = -1;
}
if (0 > odp_pktio_close(pktio_loop)) {
- fprintf(stderr, "pktio close failed.\n");
+ ODPH_ERR("Pktio close failed\n");
retcode = -1;
}
+ for (i = 0; i < CLS_ENTRIES; i++) {
+ if (pmr_list[i] != ODP_PMR_INVALID)
+ odp_cls_pmr_destroy(pmr_list[i]);
+ }
+
+ for (i = 0; i < CLS_ENTRIES; i++) {
+ if (cos_list[i] != ODP_COS_INVALID)
+ odp_cos_destroy(cos_list[i]);
+ }
+
if (0 != odp_pool_destroy(pool_default)) {
- fprintf(stderr, "pool_default destroy failed.\n");
+ ODPH_ERR("Pool_default destroy failed\n");
retcode = -1;
}
- for (i = 0; i < CLS_ENTRIES; i++)
- odp_cos_destroy(cos_list[i]);
+ if (enable_pktv) {
+ if (odp_pool_destroy(pktv_config.pool)) {
+ ODPH_ERR("Packet vector pool destroy failed\n");
+ retcode = -1;
+ }
+ }
- for (i = 0; i < CLS_ENTRIES; i++)
- odp_cls_pmr_destroy(pmr_list[i]);
+ for (i = 0; i < CLS_ENTRIES; i++) {
+ if (queue_list[i] != ODP_QUEUE_INVALID)
+ odp_queue_destroy(queue_list[i]);
+ }
- for (i = 0; i < CLS_ENTRIES; i++)
- odp_queue_destroy(queue_list[i]);
+ for (i = 0; i < CLS_ENTRIES; i++) {
+ if (pool_list[i] != ODP_POOL_INVALID)
+ odp_pool_destroy(pool_list[i]);
+ }
- for (i = 0; i < CLS_ENTRIES; i++)
- odp_pool_destroy(pool_list[i]);
+ if (odp_cunit_print_inactive())
+ retcode = -1;
return retcode;
}
-void configure_cls_pmr_chain(void)
+int classification_suite_init(void)
+{
+ return classification_suite_common_init(false);
+}
+
+int classification_suite_term(void)
+{
+ return classification_suite_common_term(false);
+}
+
+int classification_suite_pktv_init(void)
+{
+ return classification_suite_common_init(true);
+}
+
+int classification_suite_pktv_term(void)
+{
+ return classification_suite_common_term(true);
+}
+
+void configure_cls_pmr_chain(odp_bool_t enable_pktv)
{
/* PKTIO --> PMR_SRC(SRC IP ADDR) --> PMR_DST (TCP SPORT) */
@@ -139,16 +234,16 @@ void configure_cls_pmr_chain(void)
uint32_t addr;
uint32_t mask;
odp_pmr_param_t pmr_param;
- odp_queue_capability_t queue_capa;
+ odp_schedule_capability_t schedule_capa;
- CU_ASSERT_FATAL(odp_queue_capability(&queue_capa) == 0);
+ CU_ASSERT_FATAL(odp_schedule_capability(&schedule_capa) == 0);
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_NORMAL;
+ qparam.sched.prio = odp_schedule_default_prio();
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
- qparam.sched.lock_count = queue_capa.max_ordered_locks;
+ qparam.sched.lock_count = schedule_capa.max_ordered_locks;
sprintf(queuename, "%s", "SrcQueue");
queue_list[CLS_PMR_CHAIN_SRC] = odp_queue_create(queuename, &qparam);
@@ -163,14 +258,20 @@ void configure_cls_pmr_chain(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_PMR_CHAIN_SRC];
cls_param.queue = queue_list[CLS_PMR_CHAIN_SRC];
- cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ if (enable_pktv) {
+ cls_param.vector.enable = true;
+ cls_param.vector.pool = pktv_config.pool;
+ cls_param.vector.max_size = pktv_config.max_size;
+ cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
+ }
cos_list[CLS_PMR_CHAIN_SRC] = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos_list[CLS_PMR_CHAIN_SRC] != ODP_COS_INVALID);
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_NORMAL;
+ qparam.sched.prio = odp_schedule_default_prio();
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "DstQueue");
@@ -186,11 +287,21 @@ void configure_cls_pmr_chain(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_PMR_CHAIN_DST];
cls_param.queue = queue_list[CLS_PMR_CHAIN_DST];
- cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ if (enable_pktv) {
+ cls_param.vector.enable = true;
+ cls_param.vector.pool = pktv_config.pool;
+ cls_param.vector.max_size = pktv_config.max_size;
+ cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
+ }
+
cos_list[CLS_PMR_CHAIN_DST] = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos_list[CLS_PMR_CHAIN_DST] != ODP_COS_INVALID);
parse_ipv4_string(CLS_PMR_CHAIN_SADDR, &addr, &mask);
+ addr = odp_cpu_to_be_32(addr);
+ mask = odp_cpu_to_be_32(mask);
+
odp_cls_pmr_param_init(&pmr_param);
pmr_param.term = ODP_PMR_SIP_ADDR;
pmr_param.match.value = &addr;
@@ -199,10 +310,10 @@ void configure_cls_pmr_chain(void)
pmr_list[CLS_PMR_CHAIN_SRC] =
odp_cls_pmr_create(&pmr_param, 1, cos_list[CLS_DEFAULT],
cos_list[CLS_PMR_CHAIN_SRC]);
- CU_ASSERT_FATAL(pmr_list[CLS_PMR_CHAIN_SRC] != ODP_PMR_INVAL);
+ CU_ASSERT_FATAL(pmr_list[CLS_PMR_CHAIN_SRC] != ODP_PMR_INVALID);
- val = CLS_PMR_CHAIN_PORT;
- maskport = 0xffff;
+ val = odp_cpu_to_be_16(CLS_PMR_CHAIN_PORT);
+ maskport = odp_cpu_to_be_16(0xffff);
odp_cls_pmr_param_init(&pmr_param);
pmr_param.term = find_first_supported_l3_pmr();
pmr_param.match.value = &val;
@@ -211,10 +322,10 @@ void configure_cls_pmr_chain(void)
pmr_list[CLS_PMR_CHAIN_DST] =
odp_cls_pmr_create(&pmr_param, 1, cos_list[CLS_PMR_CHAIN_SRC],
cos_list[CLS_PMR_CHAIN_DST]);
- CU_ASSERT_FATAL(pmr_list[CLS_PMR_CHAIN_DST] != ODP_PMR_INVAL);
+ CU_ASSERT_FATAL(pmr_list[CLS_PMR_CHAIN_DST] != ODP_PMR_INVALID);
}
-void test_cls_pmr_chain(void)
+void test_cls_pmr_chain(odp_bool_t enable_pktv)
{
odp_packet_t pkt;
odph_ipv4hdr_t *ip;
@@ -226,7 +337,7 @@ void test_cls_pmr_chain(void)
cls_packet_info_t pkt_info;
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
@@ -235,14 +346,13 @@ void test_cls_pmr_chain(void)
ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
parse_ipv4_string(CLS_PMR_CHAIN_SADDR, &addr, &mask);
ip->src_addr = odp_cpu_to_be_32(addr);
- ip->chksum = 0;
- ip->chksum = odph_ipv4_csum_update(pkt);
+ odph_ipv4_csum_update(pkt);
set_first_supported_pmr_port(pkt, CLS_PMR_CHAIN_PORT);
enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(queue == queue_list[CLS_PMR_CHAIN_DST]);
CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
@@ -258,11 +368,10 @@ void test_cls_pmr_chain(void)
ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
parse_ipv4_string(CLS_PMR_CHAIN_SADDR, &addr, &mask);
ip->src_addr = odp_cpu_to_be_32(addr);
- ip->chksum = 0;
- ip->chksum = odph_ipv4_csum_update(pkt);
+ odph_ipv4_csum_update(pkt);
enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(queue == queue_list[CLS_PMR_CHAIN_SRC]);
CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
@@ -271,7 +380,7 @@ void test_cls_pmr_chain(void)
odp_packet_free(pkt);
}
-void configure_pktio_default_cos(void)
+void configure_pktio_default_cos(odp_bool_t enable_pktv)
{
int retval;
odp_queue_param_t qparam;
@@ -282,7 +391,7 @@ void configure_pktio_default_cos(void)
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qparam.sched.prio = odp_schedule_default_prio();
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "DefaultQueue");
@@ -297,7 +406,14 @@ void configure_pktio_default_cos(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_DEFAULT];
cls_param.queue = queue_list[CLS_DEFAULT];
- cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ if (enable_pktv) {
+ cls_param.vector.enable = true;
+ cls_param.vector.pool = pktv_config.pool;
+ cls_param.vector.max_size = pktv_config.max_size;
+ cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
+ }
+
cos_list[CLS_DEFAULT] = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos_list[CLS_DEFAULT] != ODP_COS_INVALID);
@@ -305,7 +421,7 @@ void configure_pktio_default_cos(void)
CU_ASSERT(retval == 0);
}
-void test_pktio_default_cos(void)
+void test_pktio_default_cos(odp_bool_t enable_pktv)
{
odp_packet_t pkt;
odp_queue_t queue;
@@ -315,7 +431,7 @@ void test_pktio_default_cos(void)
/* create a default packet */
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
@@ -323,7 +439,7 @@ void test_pktio_default_cos(void)
enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
/* Default packet should be received in default queue */
CU_ASSERT(queue == queue_list[CLS_DEFAULT]);
@@ -334,7 +450,149 @@ void test_pktio_default_cos(void)
odp_packet_free(pkt);
}
-void configure_pktio_error_cos(void)
+void configure_pktio_drop_cos(odp_bool_t enable_pktv, uint32_t max_cos_stats)
+{
+ uint16_t val;
+ uint16_t mask;
+ odp_pmr_param_t pmr_param;
+ odp_cls_cos_param_t cls_param;
+ char cosname[ODP_COS_NAME_LEN];
+
+ sprintf(cosname, "DropCoS");
+ odp_cls_cos_param_init(&cls_param);
+
+ cls_param.action = ODP_COS_ACTION_DROP;
+ cls_param.stats_enable = max_cos_stats > 0;
+
+ if (enable_pktv) {
+ cls_param.vector.enable = true;
+ cls_param.vector.pool = pktv_config.pool;
+ cls_param.vector.max_size = pktv_config.max_size;
+ cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
+ }
+
+ cos_list[CLS_DROP] = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos_list[CLS_DROP] != ODP_COS_INVALID);
+
+ val = odp_cpu_to_be_16(CLS_DROP_PORT);
+ mask = odp_cpu_to_be_16(0xffff);
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = find_first_supported_l3_pmr();
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr_list[CLS_DROP] = odp_cls_pmr_create(&pmr_param, 1,
+ cos_list[CLS_DEFAULT],
+ cos_list[CLS_DROP]);
+ CU_ASSERT_FATAL(pmr_list[CLS_DROP] != ODP_PMR_INVALID);
+}
+
+void test_pktio_drop_cos(odp_bool_t enable_pktv)
+{
+ odp_packet_t pkt;
+ odp_queue_t queue;
+ uint32_t seqno = 0;
+ cls_packet_info_t pkt_info;
+ odp_cls_capability_t capa;
+ odp_cls_cos_stats_t start, stop;
+
+ CU_ASSERT_FATAL(odp_cls_capability(&capa) == 0);
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ set_first_supported_pmr_port(pkt, CLS_DROP_PORT);
+ CU_ASSERT(odp_cls_cos_stats(cos_list[CLS_DROP], &start) == 0);
+ enqueue_pktio_interface(pkt, pktio_loop);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
+ CU_ASSERT(odp_cls_cos_stats(cos_list[CLS_DROP], &stop) == 0);
+ CU_ASSERT_FATAL(pkt == ODP_PACKET_INVALID);
+ if (capa.stats.cos.counter.packets)
+ CU_ASSERT((stop.packets - start.packets) == 1);
+ if (capa.stats.cos.counter.discards)
+ CU_ASSERT((stop.discards - start.discards) == 0);
+ if (capa.stats.cos.counter.errors)
+ CU_ASSERT((stop.errors - start.errors) == 0);
+}
+
+static int check_queue_stats(void)
+{
+ odp_cls_capability_t capa;
+
+ if (odp_cls_capability(&capa))
+ return ODP_TEST_INACTIVE;
+
+ if (capa.stats.queue.all_counters)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static void cls_queue_stats(odp_bool_t enable_pktv)
+{
+ odp_cls_capability_t capa;
+ odp_cls_queue_stats_t stats_start;
+ odp_cls_queue_stats_t stats_stop;
+ odp_cos_t cos;
+ odp_queue_t queue;
+
+ /* Default CoS used for test packets */
+ if (!tc.default_cos || !TEST_DEFAULT) {
+ printf("Default CoS not supported, skipping test\n");
+ return;
+ }
+
+ cos = cos_list[CLS_DEFAULT];
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+ queue = odp_cos_queue(cos);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ CU_ASSERT_FATAL(odp_cls_capability(&capa) == 0);
+
+ CU_ASSERT(odp_cls_queue_stats(cos, queue, &stats_start) == 0);
+
+ test_pktio_default_cos(enable_pktv);
+
+ CU_ASSERT(odp_cls_queue_stats(cos, queue, &stats_stop) == 0);
+
+ if (capa.stats.queue.counter.packets)
+ CU_ASSERT(stats_stop.packets > stats_start.packets);
+ if (capa.stats.queue.counter.octets)
+ CU_ASSERT(stats_stop.octets > stats_start.octets);
+ CU_ASSERT((stats_stop.discards - stats_start.discards) == 0);
+ CU_ASSERT((stats_stop.errors - stats_start.errors) == 0);
+
+ printf("\nQueue statistics\n----------------\n");
+ printf(" discards: %" PRIu64 "\n", stats_stop.discards);
+ printf(" errors: %" PRIu64 "\n", stats_stop.errors);
+ printf(" octets: %" PRIu64 "\n", stats_stop.octets);
+ printf(" packets: %" PRIu64 "\n", stats_stop.packets);
+
+ /* Check that all unsupported counters are still zero */
+ if (!capa.stats.queue.counter.discards)
+ CU_ASSERT(stats_stop.discards == 0);
+ if (!capa.stats.queue.counter.errors)
+ CU_ASSERT(stats_stop.errors == 0);
+ if (!capa.stats.queue.counter.octets)
+ CU_ASSERT(stats_stop.octets == 0);
+ if (!capa.stats.queue.counter.packets)
+ CU_ASSERT(stats_stop.packets == 0);
+}
+
+static void cls_queue_stats_pkt(void)
+{
+ cls_queue_stats(false);
+}
+
+static void cls_queue_stats_pktv(void)
+{
+ cls_queue_stats(true);
+}
+
+void configure_pktio_error_cos(odp_bool_t enable_pktv)
{
int retval;
odp_queue_param_t qparam;
@@ -345,7 +603,7 @@ void configure_pktio_error_cos(void)
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_LOWEST;
+ qparam.sched.prio = odp_schedule_min_prio();
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "ErrorCos");
@@ -361,7 +619,14 @@ void configure_pktio_error_cos(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_ERROR];
cls_param.queue = queue_list[CLS_ERROR];
- cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ if (enable_pktv) {
+ cls_param.vector.enable = true;
+ cls_param.vector.pool = pktv_config.pool;
+ cls_param.vector.max_size = pktv_config.max_size;
+ cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
+ }
+
cos_list[CLS_ERROR] = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos_list[CLS_ERROR] != ODP_COS_INVALID);
@@ -369,7 +634,7 @@ void configure_pktio_error_cos(void)
CU_ASSERT(retval == 0);
}
-void test_pktio_error_cos(void)
+void test_pktio_error_cos(odp_bool_t enable_pktv)
{
odp_queue_t queue;
odp_packet_t pkt;
@@ -378,7 +643,7 @@ void test_pktio_error_cos(void)
/*Create an error packet */
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
@@ -388,7 +653,7 @@ void test_pktio_error_cos(void)
ip->chksum = 0;
enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
/* Error packet should be received in error queue */
CU_ASSERT(queue == queue_list[CLS_ERROR]);
@@ -397,7 +662,7 @@ void test_pktio_error_cos(void)
odp_packet_free(pkt);
}
-void classification_test_pktio_set_skip(void)
+static void cls_pktio_set_skip(void)
{
int retval;
size_t offset = 5;
@@ -415,7 +680,7 @@ void classification_test_pktio_set_skip(void)
CU_ASSERT(retval == 0);
}
-void classification_test_pktio_set_headroom(void)
+static void cls_pktio_set_headroom(void)
{
size_t headroom;
int retval;
@@ -428,94 +693,7 @@ void classification_test_pktio_set_headroom(void)
CU_ASSERT(retval < 0);
}
-void configure_cos_with_l2_priority(void)
-{
- uint8_t num_qos = CLS_L2_QOS_MAX;
- odp_cos_t cos_tbl[CLS_L2_QOS_MAX];
- odp_queue_t queue_tbl[CLS_L2_QOS_MAX];
- odp_pool_t pool;
- uint8_t qos_tbl[CLS_L2_QOS_MAX];
- char cosname[ODP_COS_NAME_LEN];
- char queuename[ODP_QUEUE_NAME_LEN];
- char poolname[ODP_POOL_NAME_LEN];
- int retval;
- int i;
- odp_queue_param_t qparam;
- odp_cls_cos_param_t cls_param;
-
- /** Initialize scalar variable qos_tbl **/
- for (i = 0; i < CLS_L2_QOS_MAX; i++)
- qos_tbl[i] = 0;
-
- odp_queue_param_init(&qparam);
- qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
- qparam.sched.group = ODP_SCHED_GROUP_ALL;
- for (i = 0; i < num_qos; i++) {
- qparam.sched.prio = ODP_SCHED_PRIO_LOWEST - i;
- sprintf(queuename, "%s_%d", "L2_Queue", i);
- queue_tbl[i] = odp_queue_create(queuename, &qparam);
- CU_ASSERT_FATAL(queue_tbl[i] != ODP_QUEUE_INVALID);
- queue_list[CLS_L2_QOS_0 + i] = queue_tbl[i];
-
- sprintf(poolname, "%s_%d", "L2_Pool", i);
- pool = pool_create(poolname);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
- pool_list[CLS_L2_QOS_0 + i] = pool;
-
- sprintf(cosname, "%s_%d", "L2_Cos", i);
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue_tbl[i];
- cls_param.drop_policy = ODP_COS_DROP_POOL;
- cos_tbl[i] = odp_cls_cos_create(cosname, &cls_param);
- if (cos_tbl[i] == ODP_COS_INVALID)
- break;
-
- cos_list[CLS_L2_QOS_0 + i] = cos_tbl[i];
- qos_tbl[i] = i;
- }
- /* count 'i' is passed instead of num_qos to handle the rare scenario
- if the odp_cls_cos_create() failed in the middle*/
- retval = odp_cos_with_l2_priority(pktio_loop, i, qos_tbl, cos_tbl);
- CU_ASSERT(retval == 0);
-}
-
-void test_cos_with_l2_priority(void)
-{
- odp_packet_t pkt;
- odph_ethhdr_t *ethhdr;
- odph_vlanhdr_t *vlan;
- odp_queue_t queue;
- odp_pool_t pool;
- uint32_t seqno = 0;
- cls_packet_info_t pkt_info;
- uint8_t i;
-
- pkt_info = default_pkt_info;
- pkt_info.udp = true;
- pkt_info.vlan = true;
-
- for (i = 0; i < CLS_L2_QOS_MAX; i++) {
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- ethhdr = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- vlan = (odph_vlanhdr_t *)(ethhdr + 1);
- vlan->tci = odp_cpu_to_be_16(i << 13);
- enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(queue == queue_list[CLS_L2_QOS_0 + i]);
- pool = odp_packet_pool(pkt);
- CU_ASSERT(pool == pool_list[CLS_L2_QOS_0 + i]);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- odp_packet_free(pkt);
- }
-}
-
-void configure_pmr_cos(void)
+void configure_pmr_cos(odp_bool_t enable_pktv)
{
uint16_t val;
uint16_t mask;
@@ -528,7 +706,7 @@ void configure_pmr_cos(void)
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
+ qparam.sched.prio = odp_schedule_max_prio();
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "PMR_CoS");
@@ -544,12 +722,19 @@ void configure_pmr_cos(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_PMR];
cls_param.queue = queue_list[CLS_PMR];
- cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ if (enable_pktv) {
+ cls_param.vector.enable = true;
+ cls_param.vector.pool = pktv_config.pool;
+ cls_param.vector.max_size = pktv_config.max_size;
+ cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
+ }
+
cos_list[CLS_PMR] = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos_list[CLS_PMR] != ODP_COS_INVALID);
- val = CLS_PMR_PORT;
- mask = 0xffff;
+ val = odp_cpu_to_be_16(CLS_PMR_PORT);
+ mask = odp_cpu_to_be_16(0xffff);
odp_cls_pmr_param_init(&pmr_param);
pmr_param.term = find_first_supported_l3_pmr();
pmr_param.match.value = &val;
@@ -559,10 +744,10 @@ void configure_pmr_cos(void)
pmr_list[CLS_PMR] = odp_cls_pmr_create(&pmr_param, 1,
cos_list[CLS_DEFAULT],
cos_list[CLS_PMR]);
- CU_ASSERT_FATAL(pmr_list[CLS_PMR] != ODP_PMR_INVAL);
+ CU_ASSERT_FATAL(pmr_list[CLS_PMR] != ODP_PMR_INVALID);
}
-void test_pmr_cos(void)
+void test_pmr_cos(odp_bool_t enable_pktv)
{
odp_packet_t pkt;
odp_queue_t queue;
@@ -571,14 +756,14 @@ void test_pmr_cos(void)
cls_packet_info_t pkt_info;
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
set_first_supported_pmr_port(pkt, CLS_PMR_PORT);
enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(queue == queue_list[CLS_PMR]);
pool = odp_packet_pool(pkt);
@@ -587,7 +772,7 @@ void test_pmr_cos(void)
odp_packet_free(pkt);
}
-void configure_pktio_pmr_composite(void)
+void configure_pktio_pmr_composite(odp_bool_t enable_pktv)
{
odp_pmr_param_t pmr_params[2];
uint16_t val;
@@ -603,7 +788,7 @@ void configure_pktio_pmr_composite(void)
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
+ qparam.sched.prio = odp_schedule_max_prio();
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "cos_pmr_composite_queue");
@@ -619,19 +804,29 @@ void configure_pktio_pmr_composite(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_PMR_SET];
cls_param.queue = queue_list[CLS_PMR_SET];
- cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ if (enable_pktv) {
+ cls_param.vector.enable = true;
+ cls_param.vector.pool = pktv_config.pool;
+ cls_param.vector.max_size = pktv_config.max_size;
+ cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
+ }
+
cos_list[CLS_PMR_SET] = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos_list[CLS_PMR_SET] != ODP_COS_INVALID);
parse_ipv4_string(CLS_PMR_SET_SADDR, &addr, &mask);
+ addr = odp_cpu_to_be_32(addr);
+ mask = odp_cpu_to_be_32(mask);
+
odp_cls_pmr_param_init(&pmr_params[0]);
pmr_params[0].term = ODP_PMR_SIP_ADDR;
pmr_params[0].match.value = &addr;
pmr_params[0].match.mask = &mask;
pmr_params[0].val_sz = sizeof(addr);
- val = CLS_PMR_SET_PORT;
- maskport = 0xffff;
+ val = odp_cpu_to_be_16(CLS_PMR_SET_PORT);
+ maskport = odp_cpu_to_be_16(0xffff);
odp_cls_pmr_param_init(&pmr_params[1]);
pmr_params[1].term = find_first_supported_l3_pmr();
pmr_params[1].match.value = &val;
@@ -642,10 +837,10 @@ void configure_pktio_pmr_composite(void)
pmr_list[CLS_PMR_SET] = odp_cls_pmr_create(pmr_params, num_terms,
cos_list[CLS_DEFAULT],
cos_list[CLS_PMR_SET]);
- CU_ASSERT_FATAL(pmr_list[CLS_PMR_SET] != ODP_PMR_INVAL);
+ CU_ASSERT_FATAL(pmr_list[CLS_PMR_SET] != ODP_PMR_INVALID);
}
-void test_pktio_pmr_composite_cos(void)
+void test_pktio_pmr_composite_cos(odp_bool_t enable_pktv)
{
uint32_t addr = 0;
uint32_t mask;
@@ -657,7 +852,7 @@ void test_pktio_pmr_composite_cos(void)
cls_packet_info_t pkt_info;
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
@@ -666,12 +861,11 @@ void test_pktio_pmr_composite_cos(void)
ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
parse_ipv4_string(CLS_PMR_SET_SADDR, &addr, &mask);
ip->src_addr = odp_cpu_to_be_32(addr);
- ip->chksum = 0;
- ip->chksum = odph_ipv4_csum_update(pkt);
+ odph_ipv4_csum_update(pkt);
set_first_supported_pmr_port(pkt, CLS_PMR_SET_PORT);
enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(queue == queue_list[CLS_PMR_SET]);
pool = odp_packet_pool(pkt);
@@ -680,44 +874,107 @@ void test_pktio_pmr_composite_cos(void)
odp_packet_free(pkt);
}
-void classification_test_pktio_configure(void)
+static void cls_pktio_configure_common(odp_bool_t enable_pktv)
{
+ odp_cls_capability_t capa;
+ int num_cos;
+
+ odp_cls_capability(&capa);
+ num_cos = capa.max_cos;
+
/* Configure the Different CoS for the pktio interface */
- if (TEST_DEFAULT)
- configure_pktio_default_cos();
- if (TEST_ERROR)
- configure_pktio_error_cos();
- if (TEST_PMR_CHAIN)
- configure_cls_pmr_chain();
- if (TEST_L2_QOS)
- configure_cos_with_l2_priority();
- if (TEST_PMR)
- configure_pmr_cos();
- if (TEST_PMR_SET)
- configure_pktio_pmr_composite();
-}
-
-void classification_test_pktio_test(void)
+ if (num_cos >= NUM_COS_DEFAULT && TEST_DEFAULT) {
+ configure_pktio_default_cos(enable_pktv);
+ tc.default_cos = 1;
+ num_cos -= NUM_COS_DEFAULT;
+ }
+ if (num_cos >= NUM_COS_DEFAULT && TEST_DROP) {
+ configure_pktio_drop_cos(enable_pktv, capa.max_cos_stats);
+ tc.drop_cos = 1;
+ num_cos -= NUM_COS_DROP;
+ }
+ if (num_cos >= NUM_COS_ERROR && TEST_ERROR) {
+ configure_pktio_error_cos(enable_pktv);
+ tc.error_cos = 1;
+ num_cos -= NUM_COS_ERROR;
+ }
+ if (num_cos >= NUM_COS_PMR_CHAIN && TEST_PMR_CHAIN) {
+ configure_cls_pmr_chain(enable_pktv);
+ tc.pmr_chain = 1;
+ num_cos -= NUM_COS_PMR_CHAIN;
+ }
+ if (num_cos >= NUM_COS_PMR && TEST_PMR) {
+ configure_pmr_cos(enable_pktv);
+ tc.pmr_cos = 1;
+ num_cos -= NUM_COS_PMR;
+ }
+ if (num_cos >= NUM_COS_COMPOSITE && TEST_PMR_SET) {
+ configure_pktio_pmr_composite(enable_pktv);
+ tc.pmr_composite_cos = 1;
+ num_cos -= NUM_COS_COMPOSITE;
+ }
+
+}
+
+static void cls_pktio_configure(void)
+{
+ cls_pktio_configure_common(false);
+}
+
+static void cls_pktio_configure_pktv(void)
+{
+ cls_pktio_configure_common(true);
+}
+
+static void cls_pktio_test_common(odp_bool_t enable_pktv)
{
/* Test Different CoS on the pktio interface */
- if (TEST_DEFAULT)
- test_pktio_default_cos();
- if (TEST_ERROR)
- test_pktio_error_cos();
- if (TEST_PMR_CHAIN)
- test_cls_pmr_chain();
- if (TEST_L2_QOS)
- test_cos_with_l2_priority();
- if (TEST_PMR)
- test_pmr_cos();
- if (TEST_PMR_SET)
- test_pktio_pmr_composite_cos();
+ if (tc.default_cos && TEST_DEFAULT)
+ test_pktio_default_cos(enable_pktv);
+ if (tc.drop_cos && TEST_DROP)
+ test_pktio_drop_cos(enable_pktv);
+ if (tc.error_cos && TEST_ERROR)
+ test_pktio_error_cos(enable_pktv);
+ if (tc.pmr_chain && TEST_PMR_CHAIN)
+ test_cls_pmr_chain(enable_pktv);
+ if (tc.pmr_cos && TEST_PMR)
+ test_pmr_cos(enable_pktv);
+ if (tc.pmr_composite_cos && TEST_PMR_SET)
+ test_pktio_pmr_composite_cos(enable_pktv);
+}
+
+static void cls_pktio_test(void)
+{
+ cls_pktio_test_common(false);
+}
+
+static void cls_pktio_test_pktv(void)
+{
+ cls_pktio_test_common(true);
+}
+
+static int check_pktv(void)
+{
+ return pktv_config.enable ? ODP_TEST_ACTIVE : ODP_TEST_INACTIVE;
+}
+
+static int check_capa_skip_offset(void)
+{
+ return pktio_capa.set_op.op.skip_offset;
}
odp_testinfo_t classification_suite[] = {
- ODP_TEST_INFO(classification_test_pktio_set_skip),
- ODP_TEST_INFO(classification_test_pktio_set_headroom),
- ODP_TEST_INFO(classification_test_pktio_configure),
- ODP_TEST_INFO(classification_test_pktio_test),
+ ODP_TEST_INFO_CONDITIONAL(cls_pktio_set_skip, check_capa_skip_offset),
+ ODP_TEST_INFO(cls_pktio_set_headroom),
+ ODP_TEST_INFO(cls_pktio_configure),
+ ODP_TEST_INFO(cls_pktio_test),
+ ODP_TEST_INFO_CONDITIONAL(cls_queue_stats_pkt, check_queue_stats),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_testinfo_t classification_suite_pktv[] = {
+ ODP_TEST_INFO_CONDITIONAL(cls_pktio_configure_pktv, check_pktv),
+ ODP_TEST_INFO_CONDITIONAL(cls_pktio_test_pktv, check_pktv),
+ ODP_TEST_INFO_CONDITIONAL(cls_queue_stats_pktv, check_queue_stats),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/classification/odp_classification_testsuites.h b/test/validation/api/classification/odp_classification_testsuites.h
new file mode 100644
index 000000000..888613b1f
--- /dev/null
+++ b/test/validation/api/classification/odp_classification_testsuites.h
@@ -0,0 +1,94 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_CLASSIFICATION_TESTSUITES_H_
+#define ODP_CLASSIFICATION_TESTSUITES_H_
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include <odp_cunit_common.h>
+#include <stdbool.h>
+
+typedef enum cls_packet_l4_info {
+ CLS_PKT_L4_TCP,
+ CLS_PKT_L4_UDP,
+ CLS_PKT_L4_SCTP,
+ CLS_PKT_L4_ICMP,
+ CLS_PKT_L4_GTP,
+ CLS_PKT_L4_IGMP,
+ CLS_PKT_L4_AH,
+ CLS_PKT_L4_ESP
+} cls_packet_l4_info;
+
+typedef struct cls_packet_info {
+ odp_pool_t pool;
+ bool vlan;
+ bool vlan_qinq;
+ odp_atomic_u32_t *seq;
+ cls_packet_l4_info l4_type;
+ odp_bool_t ipv6;
+ uint8_t dscp;
+ uint32_t len;
+} cls_packet_info_t;
+
+typedef union odp_cls_testcase {
+ struct {
+ uint32_t default_cos:1;
+ uint32_t drop_cos:1;
+ uint32_t error_cos:1;
+ uint32_t pmr_chain:1;
+ uint32_t pmr_cos:1;
+ uint32_t pmr_composite_cos:1;
+ };
+ uint32_t all_bits;
+} odp_cls_testcase_u;
+
+extern odp_testinfo_t classification_suite[];
+extern odp_testinfo_t classification_suite_basic[];
+extern odp_testinfo_t classification_suite_pmr[];
+extern odp_testinfo_t classification_suite_pktv[];
+
+int classification_suite_init(void);
+int classification_suite_term(void);
+
+int classification_suite_pmr_term(void);
+int classification_suite_pmr_init(void);
+
+int classification_suite_pktv_init(void);
+int classification_suite_pktv_term(void);
+
+odp_packet_t create_packet(cls_packet_info_t pkt_info);
+int cls_pkt_set_seq(odp_packet_t pkt);
+uint32_t cls_pkt_get_seq(odp_packet_t pkt);
+odp_pktio_t create_pktio(odp_queue_type_t q_type, odp_pool_t pool,
+ odp_bool_t cls_enable);
+void configure_default_cos(odp_pktio_t pktio, odp_cos_t *cos,
+ odp_queue_t *queue, odp_pool_t *pool);
+int parse_ipv4_string(const char *ipaddress, uint32_t *addr, uint32_t *mask);
+void enqueue_pktio_interface(odp_packet_t pkt, odp_pktio_t pktio);
+odp_packet_t receive_packet(odp_queue_t *queue, uint64_t ns, odp_bool_t enable_pktv);
+odp_pool_t pool_create(const char *poolname);
+odp_pool_t pktv_pool_create(const char *poolname);
+odp_queue_t queue_create(const char *queuename, bool sched);
+void configure_pktio_default_cos(odp_bool_t enable_pktv);
+void test_pktio_default_cos(odp_bool_t enable_pktv);
+void configure_pktio_drop_cos(odp_bool_t enable_pktv, uint32_t max_cos_stats);
+void test_pktio_drop_cos(odp_bool_t enable_pktv);
+void configure_pktio_error_cos(odp_bool_t enable_pktv);
+void test_pktio_error_cos(odp_bool_t enable_pktv);
+void configure_cls_pmr_chain(odp_bool_t enable_pktv);
+void test_cls_pmr_chain(odp_bool_t enable_pktv);
+void configure_cos_with_l2_priority(odp_bool_t enable_pktv);
+void test_cos_with_l2_priority(odp_bool_t enable_pktv);
+void configure_pmr_cos(odp_bool_t enable_pktv);
+void test_pmr_cos(odp_bool_t enable_pktv);
+void configure_pktio_pmr_composite(odp_bool_t enable_pktv);
+void test_pktio_pmr_composite_cos(odp_bool_t enable_pktv);
+int stop_pktio(odp_pktio_t pktio);
+odp_cls_pmr_term_t find_first_supported_l3_pmr(void);
+int set_first_supported_pmr_port(odp_packet_t pkt, uint16_t port);
+
+#endif /* ODP_BUFFER_TESTSUITES_H_ */
diff --git a/test/validation/api/comp/.gitignore b/test/validation/api/comp/.gitignore
new file mode 100644
index 000000000..97aea05ab
--- /dev/null
+++ b/test/validation/api/comp/.gitignore
@@ -0,0 +1 @@
+comp_main
diff --git a/test/validation/api/comp/Makefile.am b/test/validation/api/comp/Makefile.am
new file mode 100644
index 000000000..2e5d3a26b
--- /dev/null
+++ b/test/validation/api/comp/Makefile.am
@@ -0,0 +1,7 @@
+include ../Makefile.inc
+
+test_PROGRAMS = comp_main
+
+comp_main_SOURCES = \
+ comp.c \
+ test_vectors.h
diff --git a/test/validation/api/comp/comp.c b/test/validation/api/comp/comp.c
new file mode 100644
index 000000000..b7dfcd359
--- /dev/null
+++ b/test/validation/api/comp/comp.c
@@ -0,0 +1,573 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#include <odp_cunit_common.h>
+#include "test_vectors.h"
+
+#define TEST_NUM_PKT 64
+#define TEST_PKT_LEN (8 * 1024)
+
+#define SEGMENTED_TEST_PKT_LEN (16 * 1024)
+#define SEGMENTED_TEST_PATTERN 0xAA
+
+#define COMP_PACKET_POOL "packet_pool"
+#define COMP_OUT_QUEUE "comp-out"
+
+struct suite_context_s {
+ odp_comp_op_mode_t op_mode;
+ odp_pool_t pool;
+ odp_queue_t queue;
+};
+
+static struct suite_context_s suite_context;
+
+/**
+ * Check if given compression and hash algorithms are supported
+ *
+ * @param comp Compression algorithm
+ * @param hash Hash algorithm
+ *
+ * @retval ODP_TEST_ACTIVE when both algorithms are supported
+ * @retval ODP_TEST_INACTIVE when either algorithm is not supported
+ */
+static int check_comp_alg_support(odp_comp_alg_t comp,
+ odp_comp_hash_alg_t hash)
+{
+ odp_comp_capability_t capability;
+
+ if (odp_comp_capability(&capability))
+ return ODP_TEST_INACTIVE;
+
+ if (suite_context.op_mode == ODP_COMP_OP_MODE_SYNC &&
+ capability.sync == ODP_SUPPORT_NO)
+ return ODP_TEST_INACTIVE;
+ if (suite_context.op_mode == ODP_COMP_OP_MODE_ASYNC &&
+ capability.async == ODP_SUPPORT_NO)
+ return ODP_TEST_INACTIVE;
+
+ /* Compression algorithms */
+ switch (comp) {
+ case ODP_COMP_ALG_NULL:
+ if (!capability.comp_algos.bit.null)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_COMP_ALG_DEFLATE:
+ if (!capability.comp_algos.bit.deflate)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_COMP_ALG_ZLIB:
+ if (!capability.comp_algos.bit.zlib)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_COMP_ALG_LZS:
+ if (!capability.comp_algos.bit.lzs)
+ return ODP_TEST_INACTIVE;
+ break;
+ default:
+ ODPH_ERR("Unsupported compression algorithm\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ /* Hash algorithms */
+ switch (hash) {
+ case ODP_COMP_HASH_ALG_NONE:
+ if (!capability.hash_algos.bit.none)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_COMP_HASH_ALG_SHA1:
+ if (!capability.hash_algos.bit.sha1)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_COMP_HASH_ALG_SHA256:
+ if (!capability.hash_algos.bit.sha256)
+ return ODP_TEST_INACTIVE;
+ break;
+ default:
+ ODPH_ERR("Unsupported hash algorithm\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+static odp_packet_t run_comp_op(odp_comp_op_t op,
+ odp_comp_alg_t comp_alg,
+ odp_comp_hash_alg_t hash_alg,
+ odp_packet_t inpkt,
+ unsigned int outtext_len)
+{
+ odp_comp_session_t session;
+ odp_comp_capability_t capa;
+ odp_comp_alg_capability_t comp_capa;
+ odp_comp_hash_alg_capability_t hash_capa;
+ odp_comp_session_param_t ses_params;
+ odp_comp_packet_op_param_t op_params;
+ odp_packet_t outpkt;
+ odp_comp_packet_result_t comp_result;
+ int rc;
+
+ rc = odp_comp_capability(&capa);
+ CU_ASSERT_FATAL(!rc);
+
+ if (comp_alg == ODP_COMP_ALG_NULL &&
+ !(capa.comp_algos.bit.null))
+ rc = -1;
+ if (comp_alg == ODP_COMP_ALG_DEFLATE &&
+ !(capa.comp_algos.bit.deflate))
+ rc = -1;
+ if (comp_alg == ODP_COMP_ALG_ZLIB &&
+ !(capa.comp_algos.bit.zlib))
+ rc = -1;
+ if (comp_alg == ODP_COMP_ALG_LZS &&
+ !(capa.comp_algos.bit.lzs))
+ rc = -1;
+
+ CU_ASSERT(!rc);
+
+ if (hash_alg == ODP_COMP_HASH_ALG_NONE &&
+ !(capa.hash_algos.bit.none))
+ rc = -1;
+ if (hash_alg == ODP_COMP_HASH_ALG_SHA1 &&
+ !(capa.hash_algos.bit.sha1))
+ rc = -1;
+ if (hash_alg == ODP_COMP_HASH_ALG_SHA256 &&
+ !(capa.hash_algos.bit.sha256))
+ rc = -1;
+
+ CU_ASSERT(!rc);
+
+ rc = odp_comp_alg_capability(comp_alg, &comp_capa);
+ CU_ASSERT(!rc);
+
+ rc = odp_comp_hash_alg_capability(hash_alg, &hash_capa);
+ CU_ASSERT(!rc);
+
+ if (hash_alg == ODP_COMP_HASH_ALG_NONE &&
+ !(comp_capa.hash_algo.bit.none))
+ rc = -1;
+ if (hash_alg == ODP_COMP_HASH_ALG_SHA1 &&
+ !(comp_capa.hash_algo.bit.sha1))
+ rc = -1;
+ if (hash_alg == ODP_COMP_HASH_ALG_SHA256 &&
+ !(comp_capa.hash_algo.bit.sha256))
+ rc = -1;
+
+ CU_ASSERT(!rc);
+
+ /* Create a compression session */
+ odp_comp_session_param_init(&ses_params);
+ ses_params.op = op;
+ ses_params.comp_algo = comp_alg;
+ ses_params.hash_algo = hash_alg;
+ ses_params.compl_queue = suite_context.queue;
+ ses_params.mode = suite_context.op_mode;
+
+ session = odp_comp_session_create(&ses_params);
+ CU_ASSERT_FATAL(session != ODP_COMP_SESSION_INVALID);
+ CU_ASSERT(odp_comp_session_to_u64(session) !=
+ odp_comp_session_to_u64(ODP_COMP_SESSION_INVALID));
+
+ /* Allocate compression output packet */
+ outpkt = odp_packet_alloc(suite_context.pool, outtext_len);
+ CU_ASSERT(outpkt != ODP_PACKET_INVALID);
+
+ op_params.out_data_range.offset = 0;
+ op_params.out_data_range.length = outtext_len;
+ op_params.in_data_range.offset = 0;
+ op_params.in_data_range.length = odp_packet_len(inpkt);
+ op_params.session = session;
+
+ if (suite_context.op_mode == ODP_COMP_OP_MODE_SYNC) {
+ rc = odp_comp_op(&inpkt, &outpkt, 1, &op_params);
+ CU_ASSERT(rc >= 0);
+ if (rc < 0)
+ goto cleanup;
+ } else {
+ odp_event_t event;
+ odp_packet_t packet;
+
+ rc = odp_comp_op_enq(&inpkt, &outpkt, 1, &op_params);
+ CU_ASSERT(rc == 1);
+ if (rc <= 0)
+ goto cleanup;
+ /* Poll completion queue for results */
+ do {
+ event = odp_queue_deq(suite_context.queue);
+ } while (event == ODP_EVENT_INVALID);
+ CU_ASSERT(ODP_EVENT_PACKET == odp_event_type(event));
+ CU_ASSERT(ODP_EVENT_PACKET_COMP ==
+ odp_event_subtype(event));
+
+ packet = odp_comp_packet_from_event(event);
+ CU_ASSERT(packet != ODP_PACKET_INVALID);
+ CU_ASSERT(packet == outpkt);
+ }
+
+ rc = odp_comp_result(&comp_result, outpkt);
+ CU_ASSERT(!rc);
+ CU_ASSERT(comp_result.status == ODP_COMP_STATUS_SUCCESS);
+ CU_ASSERT(comp_result.output_data_range.offset == 0);
+ odp_packet_trunc_tail(&outpkt,
+ odp_packet_len(outpkt) -
+ comp_result.output_data_range.length,
+ NULL, NULL);
+
+cleanup:
+
+ rc = odp_comp_session_destroy(session);
+ CU_ASSERT(!rc);
+
+ if (rc < 0) {
+ odp_packet_free(outpkt);
+ return ODP_PACKET_INVALID;
+ }
+
+ return outpkt;
+}
+
+static void packet_cmp(odp_packet_t pkt,
+ const uint8_t *text,
+ unsigned int text_len)
+{
+ odp_packet_seg_t seg;
+ uint32_t cmp_offset = 0, outlen = 0;
+ uint32_t compare_len = 0;
+ uint8_t *outdata;
+
+ seg = odp_packet_first_seg(pkt);
+ do {
+ outdata = odp_packet_seg_data(pkt, seg);
+ outlen = odp_packet_seg_data_len(pkt, seg);
+ compare_len = outlen < (text_len - cmp_offset) ?
+ outlen : (text_len - cmp_offset);
+
+ CU_ASSERT(!memcmp(outdata,
+ text + cmp_offset, compare_len));
+ cmp_offset += compare_len;
+ seg = odp_packet_next_seg(pkt, seg);
+ } while (seg != ODP_PACKET_SEG_INVALID && cmp_offset < text_len);
+}
+
+static void comp_decomp_alg_test(odp_comp_alg_t comp_alg,
+ odp_comp_hash_alg_t hash_alg,
+ const uint8_t *plaintext,
+ unsigned int plaintext_len)
+{
+ odp_packet_t decomp_outpkt, comp_outpkt, inpkt;
+ int rc;
+
+ /* Allocate compression input packet */
+ inpkt = odp_packet_alloc(suite_context.pool, plaintext_len);
+ CU_ASSERT(inpkt != ODP_PACKET_INVALID);
+
+ /* copy test data in to pkt memory */
+ rc = odp_packet_copy_from_mem(inpkt, 0,
+ plaintext_len, plaintext);
+ CU_ASSERT_FATAL(!rc);
+
+ comp_outpkt = run_comp_op(ODP_COMP_OP_COMPRESS,
+ comp_alg, hash_alg,
+ inpkt,
+ plaintext_len);
+ if (comp_outpkt == ODP_PACKET_INVALID)
+ goto clean_in;
+
+ decomp_outpkt = run_comp_op(ODP_COMP_OP_DECOMPRESS,
+ comp_alg, hash_alg,
+ comp_outpkt,
+ plaintext_len);
+ if (decomp_outpkt == ODP_PACKET_INVALID)
+ goto cleanup;
+
+ packet_cmp(decomp_outpkt, plaintext, plaintext_len);
+
+ odp_packet_free(decomp_outpkt);
+
+cleanup:
+ odp_packet_free(comp_outpkt);
+clean_in:
+ odp_packet_free(inpkt);
+}
+
+static void comp_alg_test(odp_comp_alg_t comp_alg,
+ odp_comp_hash_alg_t hash_alg,
+ const uint8_t *plaintext,
+ unsigned int plaintext_len)
+{
+ odp_packet_t comp_outpkt, inpkt;
+ int rc;
+
+ /* Allocate compression input packet */
+ inpkt = odp_packet_alloc(suite_context.pool, plaintext_len);
+ CU_ASSERT(inpkt != ODP_PACKET_INVALID);
+
+ /* copy test data in to pkt memory */
+ rc = odp_packet_copy_from_mem(inpkt, 0,
+ plaintext_len, plaintext);
+ CU_ASSERT_FATAL(!rc);
+
+ comp_outpkt = run_comp_op(ODP_COMP_OP_COMPRESS,
+ comp_alg, hash_alg,
+ inpkt,
+ plaintext_len);
+ if (comp_outpkt == ODP_PACKET_INVALID)
+ goto clean_in;
+
+ odp_packet_free(comp_outpkt);
+clean_in:
+ odp_packet_free(inpkt);
+}
+
+static void decomp_alg_test(odp_comp_alg_t comp_alg,
+ odp_comp_hash_alg_t hash_alg,
+ const uint8_t *comptext,
+ unsigned int comptext_len,
+ const uint8_t *plaintext,
+ unsigned int plaintext_len)
+{
+ odp_packet_t decomp_outpkt, inpkt;
+ int rc;
+
+ /* Allocate compression input packet */
+ inpkt = odp_packet_alloc(suite_context.pool, comptext_len);
+ CU_ASSERT(inpkt != ODP_PACKET_INVALID);
+
+ /* copy test data in to pkt memory */
+ rc = odp_packet_copy_from_mem(inpkt, 0,
+ comptext_len, comptext);
+ CU_ASSERT_FATAL(!rc);
+
+ decomp_outpkt = run_comp_op(ODP_COMP_OP_DECOMPRESS,
+ comp_alg, hash_alg,
+ inpkt,
+ plaintext_len);
+ if (decomp_outpkt == ODP_PACKET_INVALID)
+ goto cleanup;
+
+ packet_cmp(decomp_outpkt, plaintext, plaintext_len);
+
+ odp_packet_free(decomp_outpkt);
+cleanup:
+ odp_packet_free(inpkt);
+}
+
+static int comp_check_deflate_none(void)
+{
+ return check_comp_alg_support(ODP_COMP_ALG_DEFLATE,
+ ODP_COMP_HASH_ALG_NONE);
+}
+
+/* Compress content using deflate algorithm */
+static void comp_test_compress_alg_deflate_none(void)
+{
+ comp_alg_test(ODP_COMP_ALG_DEFLATE,
+ ODP_COMP_HASH_ALG_NONE,
+ plaintext, PLAIN_TEXT_SIZE);
+}
+
+/* Decompress content using deflate algorithm */
+static void comp_test_decompress_alg_deflate_none(void)
+{
+ decomp_alg_test(ODP_COMP_ALG_DEFLATE,
+ ODP_COMP_HASH_ALG_NONE,
+ compressed_text_def, COMP_DEFLATE_SIZE,
+ plaintext, PLAIN_TEXT_SIZE);
+}
+
+static int comp_check_zlib_none(void)
+{
+ return check_comp_alg_support(ODP_COMP_ALG_ZLIB,
+ ODP_COMP_HASH_ALG_NONE);
+}
+
+/* Compress content using zlib algorithm */
+static void comp_test_compress_alg_zlib_none(void)
+{
+ comp_alg_test(ODP_COMP_ALG_ZLIB, ODP_COMP_HASH_ALG_NONE,
+ plaintext, PLAIN_TEXT_SIZE);
+}
+
+/* Decompress content using zlib algorithm */
+static void comp_test_decompress_alg_zlib_none(void)
+{
+ decomp_alg_test(ODP_COMP_ALG_ZLIB, ODP_COMP_HASH_ALG_NONE,
+ compressed_text_zlib, COMP_ZLIB_SIZE,
+ plaintext, PLAIN_TEXT_SIZE);
+}
+
+/* Compress/Decompress content using deflate algorithm */
+static void comp_test_comp_decomp_alg_deflate_none(void)
+{
+ comp_decomp_alg_test(ODP_COMP_ALG_DEFLATE,
+ ODP_COMP_HASH_ALG_NONE,
+ plaintext, PLAIN_TEXT_SIZE);
+}
+
+/* Compress/Decompress content using zlib algorithm */
+static void comp_test_comp_decomp_alg_zlib_none(void)
+{
+ comp_decomp_alg_test(ODP_COMP_ALG_ZLIB,
+ ODP_COMP_HASH_ALG_NONE,
+ plaintext, PLAIN_TEXT_SIZE);
+}
+
+static int comp_suite_sync_init(void)
+{
+ suite_context.pool = odp_pool_lookup(COMP_PACKET_POOL);
+ if (suite_context.pool == ODP_POOL_INVALID)
+ return -1;
+
+ suite_context.queue = ODP_QUEUE_INVALID;
+ suite_context.op_mode = ODP_COMP_OP_MODE_SYNC;
+ return 0;
+}
+
+static int comp_suite_async_init(void)
+{
+ suite_context.pool = odp_pool_lookup(COMP_PACKET_POOL);
+ if (suite_context.pool == ODP_POOL_INVALID)
+ return -1;
+ suite_context.queue = odp_queue_lookup(COMP_OUT_QUEUE);
+ if (suite_context.queue == ODP_QUEUE_INVALID)
+ return -1;
+
+ suite_context.op_mode = ODP_COMP_OP_MODE_ASYNC;
+ return 0;
+}
+
+static odp_testinfo_t comp_suite[] = {
+ ODP_TEST_INFO_CONDITIONAL(comp_test_compress_alg_deflate_none,
+ comp_check_deflate_none),
+ ODP_TEST_INFO_CONDITIONAL(comp_test_compress_alg_zlib_none,
+ comp_check_zlib_none),
+ ODP_TEST_INFO_CONDITIONAL(comp_test_decompress_alg_deflate_none,
+ comp_check_deflate_none),
+ ODP_TEST_INFO_CONDITIONAL(comp_test_decompress_alg_zlib_none,
+ comp_check_zlib_none),
+ ODP_TEST_INFO_CONDITIONAL(comp_test_comp_decomp_alg_deflate_none,
+ comp_check_deflate_none),
+ ODP_TEST_INFO_CONDITIONAL(comp_test_comp_decomp_alg_zlib_none,
+ comp_check_zlib_none),
+ ODP_TEST_INFO_NULL,
+};
+
+/* Suite names */
+#define ODP_COMP_SYNC_TEST "Comp/decomp sync test"
+#define ODP_COMP_ASYNC_TEST "Comp/decomp async test"
+
+static odp_suiteinfo_t comp_suites[] = {
+ {ODP_COMP_SYNC_TEST, comp_suite_sync_init,
+ NULL, comp_suite},
+ {ODP_COMP_ASYNC_TEST, comp_suite_async_init,
+ NULL, comp_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+static int comp_init(odp_instance_t *inst)
+{
+ odp_pool_param_t params;
+ odp_pool_t pool;
+ odp_queue_t out_queue;
+ odp_pool_capability_t pool_capa;
+
+ if (0 != odp_init_global(inst, NULL, NULL)) {
+ ODPH_ERR("odp_init_global() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("odp_init_local() failed\n");
+ return -1;
+ }
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ ODPH_ERR("odp_pool_capability() failed\n");
+ return -1;
+ }
+
+ odp_pool_param_init(&params);
+ params.pkt.seg_len = TEST_PKT_LEN;
+ params.pkt.len = TEST_PKT_LEN;
+ params.pkt.num = TEST_NUM_PKT;
+ params.type = ODP_POOL_PACKET;
+
+ if (pool_capa.pkt.max_seg_len &&
+ TEST_PKT_LEN > pool_capa.pkt.max_seg_len) {
+ ODPH_ERR("Warning: small packet segment length\n");
+ params.pkt.seg_len = pool_capa.pkt.max_seg_len;
+ }
+
+ pool = odp_pool_create(COMP_PACKET_POOL, &params);
+ if (ODP_POOL_INVALID == pool) {
+ ODPH_ERR("Packet pool creation failed\n");
+ return -1;
+ }
+
+ /* Queue to store compression/decompression events */
+ out_queue = odp_queue_create(COMP_OUT_QUEUE, NULL);
+ if (ODP_QUEUE_INVALID == out_queue) {
+ ODPH_ERR("Comp outq creation failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int comp_term(odp_instance_t inst)
+{
+ odp_pool_t pool;
+ odp_queue_t out_queue;
+
+ out_queue = odp_queue_lookup(COMP_OUT_QUEUE);
+ if (ODP_QUEUE_INVALID != out_queue) {
+ if (odp_queue_destroy(out_queue))
+ ODPH_ERR("Comp outq destroy failed\n");
+ } else {
+ ODPH_ERR("Comp outq not found\n");
+ }
+
+ pool = odp_pool_lookup(COMP_PACKET_POOL);
+ if (ODP_POOL_INVALID != pool) {
+ if (odp_pool_destroy(pool))
+ ODPH_ERR("Packet pool destroy failed\n");
+ } else {
+ ODPH_ERR("Packet pool not found\n");
+ }
+
+ if (0 != odp_term_local()) {
+ ODPH_ERR("odp_term_local() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ ODPH_ERR("odp_term_global() failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ odp_cunit_register_global_init(comp_init);
+ odp_cunit_register_global_term(comp_term);
+
+ ret = odp_cunit_register(comp_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/api/comp/test_vectors.h b/test/validation/api/comp/test_vectors.h
new file mode 100644
index 000000000..36d98b30d
--- /dev/null
+++ b/test/validation/api/comp/test_vectors.h
@@ -0,0 +1,1997 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_COMP_VECTORS_H_
+#define _ODP_TEST_COMP_VECTORS_H_
+
+#define PLAIN_TEXT_SIZE 8192
+
+static uint8_t plaintext[PLAIN_TEXT_SIZE] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x8b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x98, 0x3c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x51, 0xdc, 0xb0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0x94, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x1f, 0x8e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, 0x58, 0x1b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0x41, 0xb1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x1e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0xa9, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, 0xe1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x00, 0x5f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0x62,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x08, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x23, 0x16,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, 0xe9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe7, 0xcd, 0x90,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8d, 0x43,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x0f, 0x0e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x25,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xf9, 0x9c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x72,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0xc2, 0xdc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9f, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc9, 0xc4, 0xa7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9a, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0xfb, 0x6a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0x5d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x50, 0x9b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb7, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0xba, 0x2d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0xe4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0x30, 0x71,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0xd9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x61, 0x6c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5d, 0x89,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xb1, 0x3a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xa3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0xa8, 0x43,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0x5a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x84, 0x63,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0xa8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xbd, 0xed,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0x8c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xd0, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0x76, 0x9a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb4, 0x9e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x24, 0xf3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xc4, 0x36,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82, 0x1d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0xf8, 0x95,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0xf5, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xbd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x8d, 0x7b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1a, 0x22,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0xdd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0xc8, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa1, 0xd4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0xc2, 0x9a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0xad, 0x0b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x82, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x5f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xc6, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0x2a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0xb9, 0x63,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0xd3, 0xea,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0x77,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0xd7, 0x0b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0xa4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x58, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x4e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x42, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0xd4, 0x4e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0x9a, 0xd8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0xcc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x8d, 0x6d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x8f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x89, 0x22,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x1b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xdb, 0x7f, 0x43,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0xa4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xf9, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x78, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0xbb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x40, 0xc2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0x26,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0xde, 0xa1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x85, 0xe6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0xed, 0x0e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0x3f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xf0, 0x4e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xc1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xb7, 0x9b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0xc7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x65, 0x38,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x0f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbe, 0x15, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0xa8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x8c, 0x39,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf9, 0xe9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0xaf, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb, 0x26,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0xb6, 0x34,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x3c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xb6, 0x6a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb1, 0x57, 0x0c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xeb, 0x35,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf1, 0xe4, 0x9b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb3, 0x50,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x7e, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0x5d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf7, 0x0b, 0xa7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0x84, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa1, 0xea,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x82, 0x81,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x0a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0x8f, 0x0f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xcb, 0x4a, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd0, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x31, 0x8a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x47, 0xb9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbd, 0x96,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x12, 0x8e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x5d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x3f, 0x1e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x9e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x47, 0x0a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0xee,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0xc5, 0xd9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, 0xfd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x2b, 0xf7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x7b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x3e, 0x96,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, 0x82,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0xb1, 0xf2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0xd3, 0x0f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x2f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x81, 0x62,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xdf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0xee, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0xca, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x11, 0xea,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x59,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0xe0, 0xb7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0xd9, 0x6d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x5e, 0x88,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x21,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0xa8, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x7e, 0xe1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xe7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0x0e, 0xde,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0xc5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0xd6, 0xf6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0xd4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0xc3, 0x2a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb, 0x4f, 0xfc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x84, 0xf1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, 0x24, 0xf3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0x57,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7d, 0x30, 0xda,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf5, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x37, 0xb8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, 0x1e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xac, 0x88,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x8f, 0xa7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0xbd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x5a, 0xc7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x34,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x82, 0xa1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x77,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0x55, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0x2a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0xe7, 0xfc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, 0xd3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0x12, 0xc9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0xf6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x99, 0x29,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0xca, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xea, 0xad,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x1a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x5d, 0x15,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x6e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x1b, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x82,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0xc5, 0xa0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcb, 0x4b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0x28, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xfd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0x6a, 0xd8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xd4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0xfe, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0xfa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0x91, 0x59,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x59,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x6a, 0xdf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa2, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbe, 0x8d, 0x0d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xec,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0x21, 0x9e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0xe3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x17, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0xb7, 0xa7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x29,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0xff, 0x75,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x50,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb3, 0x12, 0xb0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0xc9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0xac, 0x25,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0xfc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0xe3, 0x97,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, 0x0a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x6b, 0xfd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x8d, 0x43,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x4a, 0x6e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x2e, 0xf7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa9, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x8a, 0x88,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x2c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0xaf, 0xc6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0x1b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x85, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x1a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0xc6, 0x99,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x13,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9a, 0x06, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0xf6, 0x57,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd5, 0x1d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xf1, 0xd2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0xae,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x47, 0x3e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0xfe, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x5a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0xb9, 0x51,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0xab,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x57, 0x4c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd7, 0x9d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0xb6, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0xc2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x9d, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x43,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0xf3, 0xe7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc4, 0xf8, 0x16,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x22,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0x9d, 0xb4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0x82,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0x4e, 0xe7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0x4d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x9e, 0x81,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0xd4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xad, 0x67, 0x21,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x64,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xe7, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x6e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0x4c, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0xde,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x8c, 0x96,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x3d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0x8c, 0x0d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc4, 0xd3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x2e, 0x6f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x8a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x2e, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x36, 0xd5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0x85,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x85, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0xec,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x48, 0x59,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf4, 0x2f, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x3b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x0b, 0x18,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x5e, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, 0xa8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xab, 0x32,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xac,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0xf6, 0x47,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf1, 0x80, 0xcf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x5d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xae, 0xcf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x68,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0xcc, 0xb1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb7, 0xfb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x46, 0x93,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0x39, 0x49,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0xd2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x2c, 0x1d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x67,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0x5d, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x5a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf7, 0x4f, 0x79,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa0, 0xdf, 0xef,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0xd5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x81, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xf8, 0xcd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xb1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x63, 0x8a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbe, 0xd9, 0x6a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb4, 0xc1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0xac, 0xb2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x85,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x6e, 0xb2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0xa2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0xcf, 0x51,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0xef,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0x00, 0x64,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd0, 0x57, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xcd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0x79, 0x44,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x9e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x25, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xb3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x71, 0x3b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x51,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x51, 0x1b, 0x46,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0xcf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb3, 0xab, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x6b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0x63, 0x0f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x53, 0x8b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x41, 0xe3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0xe4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0xb2, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x3a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x95, 0x42,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x16,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0x3e, 0x97,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x57, 0x6f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0xd8, 0xc9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0xea,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0xf4, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x46,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0x8d, 0x44,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x36, 0xfe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcf, 0x8e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x42, 0xbe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf5, 0x8d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0x5b, 0xbe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0xbb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x2c, 0xa0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x53, 0x81,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x6f, 0x90,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0x9d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd2, 0xd2, 0x97,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0x8a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd0, 0x6c, 0x96,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0xf5, 0x95,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x09, 0x3f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x35, 0x37,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, 0xb4, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0xbc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x71, 0x2a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0xd3, 0xf2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0xaf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xb9, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0xab,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, 0x9a, 0x0b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0xae,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, 0xf2, 0x38,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x72, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xb7, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0xb4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xb6, 0x3c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x13,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x70, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x94, 0xd3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa5, 0xdf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x80, 0xb8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x67,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x57, 0x6a, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0xed,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd3, 0xe5, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x51, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xad, 0xbd, 0xf4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0xef,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0x97, 0x1f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0x72,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xac, 0x71,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xbd, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0xb8, 0x0b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf1, 0x76,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c, 0x2b, 0x21,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x25, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf1, 0x1d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x7b, 0x67,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x97,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x46, 0x91,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb, 0x32,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x4c, 0x34,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0xfc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0x45, 0x17,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0xbc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xdf, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0xda,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x9c, 0x17,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0x41, 0x56,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x4d, 0xb2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x83, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb4, 0x66, 0xce,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x0b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x17, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xfe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x7d, 0xac,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x27, 0x07, 0x38,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x3b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x43, 0x29,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0xbf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xb2, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x6f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x12, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6d, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x8d, 0xa4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x8e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x6f, 0x91,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8d, 0xd3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0xc0, 0x22,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4f, 0xbb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0xd0, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb6, 0x77, 0xa3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0xb0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x35, 0xe1, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x13,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc7, 0x10, 0x12,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0x22,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x1a, 0xe3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x83, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x4a, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xed, 0x69, 0x6b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0x61,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x7f, 0x9f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0xe7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x25, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x39, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x23, 0x1d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x68,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x45, 0x1b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xd5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xd7, 0xbb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9d, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0xac, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0xe6, 0x38,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa5, 0x1b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0x38, 0x96,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0xa7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf4, 0xd6, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0xae, 0x95,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0x4e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x82, 0xb9, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0x8f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0x62, 0x35,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0xca,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xae, 0xb2, 0x35,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0x28, 0x1f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0xfd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0xfd, 0xc9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0x5e, 0xe9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7a, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x7a, 0xa2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0x83,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb4, 0x02, 0x41,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x9d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0xec, 0xa2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0x72, 0x3b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x37, 0xa1, 0xd2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x1f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xcb, 0x29, 0xbf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x88,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xcf, 0x3a, 0xcc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x62,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc9, 0x2f, 0x67,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x6b, 0xef,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0xde,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x60, 0x1d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x25,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x28, 0x6e, 0xb7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xc2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0xef, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd7, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x3f, 0x6b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x18,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xed, 0x1d, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd2, 0x3c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd3, 0xaa, 0x3c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0xb9, 0x9e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x25,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8b, 0x00, 0x6e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0xc3, 0x9e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0xed,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9a, 0x04, 0x3c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x35,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0xb2, 0x6f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x54, 0x95,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0xdb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0x2d, 0x1a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0x8f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x3e, 0x84,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0x5d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0xa1, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3c, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc9, 0xce, 0xab,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xc4, 0x74,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0xbe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xcb, 0x22,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2f, 0xd6, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x41, 0x15,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x81,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, 0x65, 0xb2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0xfa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xa6, 0x0c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0xb6, 0x93,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, 0x69,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x53, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0xba,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x9f, 0x85,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0x60, 0xde,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x71, 0xf4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, 0xe1, 0xcf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x8e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x84, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0x1f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0xec, 0x79,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcf, 0x25,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x71, 0xc9, 0xc7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xce, 0x27, 0xa3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x98,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0xd2, 0x2d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa7, 0xec,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x56, 0xb2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x9d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0xf0, 0xf7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x2c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x86, 0xc6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0xad, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xeb, 0xeb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x2c, 0xc1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x51, 0x99,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0xbd, 0x88,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbf, 0x9d, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x5d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0xaa, 0x6b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c, 0xfe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x51, 0x29,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x8c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0x8d, 0xfb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbd, 0xd5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf9, 0xab, 0x8f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x79,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c, 0xfb, 0x26,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x75,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc7, 0xa1, 0x6d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x0d, 0xcb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x74,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x10, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf4, 0x63,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0x11, 0xec,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xed, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x90, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x98,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x32, 0xe7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x3d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x84, 0x2b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x5f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x91, 0xce,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x42, 0xfd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8b, 0x4f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x7b, 0x8f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcf, 0xdf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0xac, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x26,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xde, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xfe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9f, 0xb2, 0x36,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x6b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0xd3, 0x34,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xe5, 0x95,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0xcf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xd4, 0x1d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0xf2, 0x25,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa7, 0xe1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xfb, 0x9e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbe, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0x45, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbf, 0x0c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0x03, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd5, 0xa4, 0x3d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0x36,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x14, 0x78,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x95, 0x71,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6, 0xfe, 0x2d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7d, 0xf5, 0x9d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x7a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, 0xb7, 0x90,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0xa2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0xa0, 0xb8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x96,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xa0, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0x01, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x74,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xb1, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0xe6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x43, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc9, 0x85,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x6d, 0x6f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x36,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x67, 0xbc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0x68,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0x38, 0x51,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xad,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x75, 0x99,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x3c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x83, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x1a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x73, 0xfc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x97,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x43, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x3d, 0x68,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x41,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0xe0, 0xdc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8b, 0x32,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0xbc, 0x2b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0x98,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa1, 0xd4, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0x2e, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcf, 0x74,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd2, 0xaf, 0xd3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd2, 0xe8, 0x63,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0x61,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x17, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x35, 0x2c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0xe7, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd5, 0x84,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x62, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0x4e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0xed, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x9b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xdd, 0xfb, 0x3b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0x62,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0xd7, 0x54,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf7, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xed, 0x7c, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x16, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0xbf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x53, 0xbf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc7, 0x01, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0x34,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0x85, 0x75,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdd, 0xbd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0xcd, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x5a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x1a, 0x12,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0xfc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0xcf, 0x1b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x53, 0xca,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x2d, 0x26,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0xb7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xeb, 0x2b, 0x6f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x44,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0xe4, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x13,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0xc9, 0x5f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x46,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x62, 0xe5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0xb6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0xe1, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x5d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x19, 0xbf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0xb9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0xdc, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x96,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, 0x04, 0x9f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0xf3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x56, 0xa6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x46, 0x7d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x8d, 0xfa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd2, 0xcb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xeb, 0x15, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0x5a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x26, 0x72,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x57, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0xf5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xfa, 0xcd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x84,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0xad, 0x0d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0x26,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, 0xc8, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x91,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x39, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x91,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc7, 0xd7, 0xeb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x9c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, 0x48, 0x85,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0xb9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0xfa, 0x0f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x62,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xdb, 0x72, 0x32,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xf8, 0x56,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x77,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xca, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa7, 0x4e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0xcf, 0x2f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0xa6, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xdc, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa0, 0xbc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xb7, 0x2e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xed, 0x63,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x0e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5d, 0xf7, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xe7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0x92, 0xb7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, 0xa4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0e, 0x6f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd5, 0x35, 0xb7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0xed, 0xdc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x0d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xe4, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x35,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xc7, 0x87,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0xde,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0x97, 0xa6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xeb, 0x39,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0xb5, 0x8d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x53, 0x90,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xb1, 0x8b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa7, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0xde, 0xfa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x8f, 0x54,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0x85,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0xdc, 0xe5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdb, 0x6b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x42, 0x1c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0x93,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x6d, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5d, 0x2f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0xa2, 0x74,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x17, 0xb2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa5, 0x34,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0x09, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0x25,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x8f, 0xc6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb7, 0x3f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x6e, 0x99,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x7d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x4d, 0x82,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9a, 0x52,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0xb3, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x14,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0x31, 0x77,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x4b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x4e, 0xc4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xce, 0xe6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdb, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x66, 0xb7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x71, 0xba,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb6, 0x7f, 0x0b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0xf6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x40, 0x45,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x28, 0x62, 0xf5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x71, 0x82,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xf0, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xcf, 0xb1, 0xe1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x37, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x35, 0x6b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf9, 0xc8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0xc6, 0xa6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x3f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0xee, 0xb4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd7, 0x56,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x7f, 0x72,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0xd4, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6d, 0xcc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0xaf, 0x4e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x87,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0xe0, 0xfe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0xd2, 0xc2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x2f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0xc7, 0xca,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0xad,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x2e, 0x13,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x2d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x68, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0xae,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x24, 0x8e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0xa8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x0a, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xed, 0x2a, 0xb8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0xfa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x56, 0x69,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0xdc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8d, 0x90,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdb, 0x8b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0xa5, 0x2d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xcb, 0x67,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb6, 0xaa, 0xa4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x57, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xf1, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc4, 0x16, 0xcf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0xa0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x06, 0xcc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0xf7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0x80, 0x45,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0xd8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0x26, 0x91,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x86, 0xfb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd2, 0x75, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdb, 0xee,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x03, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x99,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x97, 0x63,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x0d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x20, 0xa4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0xc2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0x08, 0xe9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x71, 0x76,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x9e, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0x95,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x02, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0x44,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0xe9, 0xb0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0xcd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xd9, 0xc5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, 0x17, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0xca,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x13, 0x0b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x2e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x6a, 0x76,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x57, 0x19,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x26, 0xc1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d, 0xeb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0xf1, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x33, 0xcc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x78,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xa2, 0xf6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x67, 0x6e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0x42, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa0, 0xfe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xb3, 0x8c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x62,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0xc0, 0xed,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0xbc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0xd9, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0x5f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x52, 0xef,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb6, 0xdb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0xa4, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0x3b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0xa9, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9f, 0x7d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0xd0, 0xf5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0xc1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x47, 0x43,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 0xe3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xef, 0x73,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0xb1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa2, 0xfc, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x16,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x9d, 0x75,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0xee,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf4, 0x62, 0x87,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x9f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x79, 0xa0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0xce,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0xaa, 0x37,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0xbc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, 0xcc, 0xdf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x1f, 0x43,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcb, 0x8d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x1a, 0x3a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xae, 0xed, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x6c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0xd4, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x27, 0xa8, 0xc9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0x7d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x0f, 0x44,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x78,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa9, 0x3e, 0x64,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8d, 0x57,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x5b, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x71, 0x2e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x09, 0x41,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x44, 0x0d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0xa6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x46, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xa7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0x76, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, 0xad,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xac, 0x12,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x39, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x7c, 0x83,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x57,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0xac, 0xb8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x48
+};
+
+/** Length of the pre-compressed data using deflate algorithm */
+#define COMP_DEFLATE_SIZE 3769
+
+/** Pre-compressed data using deflate algorithm */
+static uint8_t compressed_text_def[COMP_DEFLATE_SIZE] = {
+ 0x35, 0x99, 0x7b, 0x5c, 0xcf, 0x77, 0xfb, 0xc7,
+ 0xdf, 0xe5, 0x18, 0x8b, 0xb0, 0x8a, 0xd8, 0x72,
+ 0x9e, 0xb3, 0x64, 0x0c, 0x91, 0x39, 0xe6, 0x90,
+ 0xd3, 0x44, 0x6e, 0xa7, 0x9b, 0x8c, 0x26, 0x87,
+ 0x85, 0x9b, 0xcd, 0x34, 0xe5, 0x7c, 0x33, 0x8d,
+ 0xed, 0x76, 0x3e, 0xe6, 0x9c, 0x8a, 0x08, 0x35,
+ 0x16, 0x33, 0x67, 0x99, 0x15, 0xe5, 0xd0, 0xb4,
+ 0xe6, 0xd4, 0x9c, 0x8a, 0xe4, 0x14, 0x39, 0xfc,
+ 0x7e, 0x8f, 0xf7, 0xf3, 0xb5, 0xfd, 0xb1, 0xd7,
+ 0x5e, 0xeb, 0xfb, 0xfd, 0x7c, 0xde, 0x87, 0xeb,
+ 0x7a, 0x5d, 0xaf, 0xeb, 0xfa, 0x1a, 0xf3, 0xff,
+ 0xff, 0x04, 0x77, 0xf9, 0xce, 0xd8, 0x7f, 0x4e,
+ 0xd5, 0xb2, 0x30, 0x76, 0x55, 0x5b, 0xf8, 0x94,
+ 0x6e, 0x16, 0xfa, 0x65, 0xed, 0x81, 0xbf, 0x1b,
+ 0x6a, 0xa1, 0xc7, 0xb2, 0xbb, 0xf0, 0xdc, 0x41,
+ 0x16, 0xea, 0x7b, 0x7e, 0x0f, 0x3f, 0x1f, 0x66,
+ 0xe1, 0xd0, 0xa0, 0xaa, 0xf0, 0xd8, 0x2b, 0x16,
+ 0x0a, 0x3a, 0xec, 0x85, 0x17, 0x7d, 0x68, 0xe1,
+ 0xd6, 0xce, 0x9b, 0xf0, 0xae, 0x37, 0x2c, 0x84,
+ 0x99, 0x7f, 0xc3, 0x8f, 0x05, 0x59, 0x08, 0x28,
+ 0x5d, 0x1d, 0x5e, 0xf8, 0x91, 0x85, 0xaa, 0xb5,
+ 0xdc, 0xe0, 0x77, 0xef, 0x59, 0xb8, 0x73, 0x7e,
+ 0x29, 0x3c, 0xb2, 0x93, 0x85, 0x69, 0xe5, 0xcb,
+ 0xc1, 0x07, 0xd7, 0xb1, 0xd0, 0xe4, 0xe5, 0x3a,
+ 0xf8, 0xa8, 0xc9, 0x16, 0x9a, 0x1f, 0xcb, 0x82,
+ 0x6f, 0x64, 0x3d, 0x67, 0x4f, 0xec, 0x80, 0xaf,
+ 0x29, 0x65, 0x61, 0x4c, 0x51, 0x08, 0xdc, 0x7b,
+ 0x98, 0x05, 0xe7, 0xbe, 0x6b, 0xe1, 0x49, 0x7c,
+ 0xbe, 0xd9, 0xa1, 0xc6, 0xf0, 0x41, 0xb7, 0x2d,
+ 0x6c, 0xf1, 0x9a, 0xa4, 0xf7, 0x65, 0x5a, 0xa8,
+ 0x33, 0x72, 0x3c, 0x7c, 0xd8, 0x42, 0x0b, 0x25,
+ 0xf6, 0xb6, 0x81, 0xbb, 0x6f, 0xe1, 0x6b, 0xd1,
+ 0xac, 0xd3, 0xdc, 0x1b, 0x6c, 0x61, 0xf8, 0xdc,
+ 0x51, 0xf0, 0x8c, 0x68, 0x0b, 0xb1, 0x87, 0xf3,
+ 0xe0, 0x09, 0x8b, 0x2d, 0x9c, 0x4f, 0xeb, 0x0f,
+ 0x3f, 0x75, 0xdd, 0xc2, 0xda, 0x69, 0x6b, 0xe0,
+ 0xfb, 0x37, 0x58, 0x08, 0xa8, 0xfd, 0x04, 0x5e,
+ 0x61, 0xbe, 0x85, 0x72, 0x27, 0x5a, 0xc2, 0x67,
+ 0x7f, 0x60, 0xe1, 0x3f, 0x85, 0xcb, 0xe1, 0x1d,
+ 0xf8, 0x7b, 0x8d, 0x67, 0xe1, 0xf0, 0x76, 0x87,
+ 0x2d, 0x64, 0x45, 0xce, 0x80, 0x2f, 0xc8, 0xb7,
+ 0x10, 0xea, 0x51, 0x53, 0xeb, 0xfb, 0xd3, 0x82,
+ 0xef, 0x99, 0x92, 0xf0, 0x4d, 0x19, 0x3c, 0xe6,
+ 0x98, 0xde, 0x7f, 0xa3, 0xd0, 0xc2, 0xeb, 0x5d,
+ 0x65, 0xe1, 0xc1, 0xc4, 0x8b, 0xef, 0xec, 0xe2,
+ 0x70, 0x07, 0xee, 0x71, 0xe6, 0xa9, 0xda, 0xf0,
+ 0x95, 0x0d, 0x2c, 0xdc, 0x3f, 0xa8, 0xfd, 0x66,
+ 0xf5, 0xb0, 0x30, 0x2e, 0xfd, 0x3e, 0x7c, 0xc5,
+ 0x57, 0x16, 0x96, 0x5c, 0xd1, 0xf3, 0x5a, 0x6d,
+ 0xb5, 0x30, 0x74, 0xd0, 0x00, 0x78, 0x83, 0xde,
+ 0x16, 0x72, 0x3b, 0xf2, 0x1e, 0xb3, 0x87, 0xf8,
+ 0xf2, 0xc9, 0xe0, 0xff, 0x9b, 0x22, 0xd6, 0xe9,
+ 0xbd, 0xe6, 0x2a, 0x3c, 0xfe, 0x37, 0x0b, 0x6d,
+ 0x23, 0x27, 0xc0, 0x03, 0x96, 0xf0, 0xfd, 0x85,
+ 0xda, 0x5f, 0x65, 0xe2, 0xf2, 0x5a, 0xb8, 0xee,
+ 0x63, 0x28, 0xef, 0x73, 0x7c, 0xf9, 0x0e, 0xee,
+ 0x41, 0xbc, 0xbf, 0xfd, 0x7a, 0x19, 0xbc, 0xd3,
+ 0xcf, 0xbc, 0xe6, 0xd3, 0x63, 0xf0, 0x57, 0x75,
+ 0x2d, 0xc4, 0x64, 0x6f, 0x82, 0xb7, 0x39, 0xce,
+ 0xdf, 0xe7, 0xfd, 0x0d, 0xaf, 0xbf, 0xcd, 0xc2,
+ 0x85, 0x3c, 0xc5, 0xe3, 0xdf, 0xed, 0x2d, 0x94,
+ 0xc8, 0xd7, 0x7a, 0xdb, 0xfe, 0x6a, 0x21, 0x2c,
+ 0x49, 0xf1, 0xb5, 0xec, 0xb4, 0x85, 0xa9, 0xa3,
+ 0x5b, 0xc1, 0xaf, 0x96, 0xb7, 0x70, 0xc4, 0xd5,
+ 0x01, 0x3e, 0x92, 0xf8, 0x58, 0xb8, 0xb8, 0x35,
+ 0xfc, 0x25, 0xf1, 0x3f, 0x34, 0xfe, 0x00, 0xfc,
+ 0x67, 0xd6, 0x13, 0x9d, 0xf8, 0x31, 0x7c, 0x35,
+ 0x79, 0x5b, 0x3e, 0x51, 0xf1, 0xbc, 0xfc, 0x53,
+ 0x0b, 0x7b, 0x03, 0xdf, 0x83, 0x3f, 0x68, 0x61,
+ 0xe1, 0xf1, 0x6d, 0xbd, 0x7f, 0x5f, 0x5f, 0xd6,
+ 0x37, 0x93, 0x7b, 0x35, 0x8f, 0x88, 0xff, 0x17,
+ 0x65, 0x95, 0x1f, 0xe6, 0x17, 0xfb, 0xef, 0x7b,
+ 0x73, 0xd3, 0xa0, 0x9b, 0xb8, 0xb7, 0x36, 0xb3,
+ 0x67, 0xc1, 0x73, 0xca, 0x58, 0x48, 0x59, 0xc2,
+ 0xba, 0x4d, 0xd9, 0x50, 0x0b, 0xe7, 0x7a, 0x68,
+ 0xfd, 0x69, 0xc4, 0x5d, 0xb7, 0x66, 0x8b, 0xe0,
+ 0x7e, 0x8e, 0x16, 0x3e, 0xf7, 0x3b, 0x08, 0x3f,
+ 0xbc, 0xc2, 0x82, 0x67, 0x45, 0xe9, 0x47, 0x2d,
+ 0xde, 0xff, 0x61, 0x7b, 0x74, 0xc2, 0x44, 0x13,
+ 0xef, 0xd5, 0xfc, 0x78, 0x8f, 0x99, 0xf1, 0x90,
+ 0xef, 0x9f, 0x24, 0xef, 0xcc, 0xc9, 0x37, 0x16,
+ 0xde, 0x6f, 0xf8, 0x02, 0x3e, 0x85, 0xb8, 0x1e,
+ 0xec, 0xbb, 0x42, 0x7f, 0x9f, 0x6d, 0x61, 0xf8,
+ 0xde, 0x02, 0x78, 0x4f, 0xe2, 0x68, 0x7a, 0xba,
+ 0xd6, 0x3b, 0xaa, 0xa9, 0x05, 0x9f, 0x59, 0xe8,
+ 0x8e, 0x09, 0xfd, 0x8b, 0xe7, 0x3f, 0xe4, 0x1e,
+ 0x4d, 0x6d, 0xe2, 0xab, 0x42, 0x8a, 0xe2, 0x63,
+ 0x03, 0xfb, 0x73, 0xaa, 0xa0, 0xf8, 0xcd, 0xfa,
+ 0x97, 0x85, 0x98, 0xeb, 0x49, 0xf0, 0x8c, 0x21,
+ 0x16, 0xe2, 0x32, 0x15, 0x7f, 0x05, 0x15, 0x2c,
+ 0x54, 0x1d, 0xfe, 0x5f, 0xb8, 0x4b, 0x0d, 0x0b,
+ 0xf1, 0xd1, 0xca, 0x17, 0x1f, 0x9e, 0xd7, 0x7c,
+ 0x26, 0x3a, 0x68, 0xce, 0xdf, 0xb1, 0x70, 0xab,
+ 0x5c, 0x36, 0xbc, 0xef, 0x49, 0x0b, 0xdd, 0x2e,
+ 0x3f, 0x87, 0xfb, 0x91, 0x8f, 0xae, 0xc7, 0xc9,
+ 0x2b, 0x33, 0x94, 0x73, 0xfe, 0xb9, 0xcf, 0x6b,
+ 0xf8, 0x97, 0xf0, 0x9a, 0x73, 0x1f, 0xc3, 0xab,
+ 0xc0, 0x0f, 0xfd, 0xa3, 0x17, 0x6b, 0x03, 0x2d,
+ 0x7c, 0xeb, 0xf5, 0x07, 0xfc, 0x19, 0xfb, 0x2c,
+ 0xfb, 0xc9, 0x4f, 0xf0, 0x1b, 0x9c, 0xbb, 0x47,
+ 0x9c, 0xd6, 0x5b, 0x8d, 0xef, 0x87, 0x2f, 0x51,
+ 0x3c, 0xd4, 0x42, 0x3f, 0x0a, 0x07, 0x13, 0xb7,
+ 0xa6, 0x3e, 0xeb, 0x2f, 0xac, 0x3c, 0x1c, 0xbe,
+ 0x95, 0x38, 0xac, 0x3a, 0x5b, 0xf9, 0x51, 0x89,
+ 0xfc, 0x3e, 0x30, 0x80, 0xba, 0x61, 0x52, 0x58,
+ 0x77, 0xef, 0x3b, 0x5a, 0xef, 0xdd, 0x74, 0x0b,
+ 0xab, 0x2a, 0x9e, 0x85, 0x7b, 0xb3, 0xcf, 0x56,
+ 0xab, 0xeb, 0xc3, 0xaf, 0x53, 0x67, 0xa6, 0xa7,
+ 0xfc, 0x08, 0xef, 0xc5, 0x73, 0xda, 0xdd, 0xdf,
+ 0x05, 0xff, 0xd8, 0xc3, 0x42, 0xf2, 0x30, 0x57,
+ 0xf8, 0xbf, 0x27, 0xf2, 0xfc, 0xaa, 0x33, 0xe1,
+ 0x5f, 0x71, 0xff, 0xaf, 0x4e, 0x46, 0xc1, 0xcf,
+ 0xf5, 0xb4, 0x30, 0xbe, 0x9e, 0xd6, 0x5b, 0x82,
+ 0xf8, 0x89, 0x0b, 0x91, 0x7e, 0xcc, 0xe7, 0x7c,
+ 0x6b, 0xbc, 0x75, 0x81, 0x37, 0x7c, 0x65, 0x21,
+ 0xe6, 0x07, 0xee, 0xd9, 0x78, 0x80, 0x03, 0x42,
+ 0x88, 0x13, 0xb3, 0x39, 0xc6, 0xc2, 0x91, 0x48,
+ 0x67, 0x78, 0x68, 0x2e, 0xfb, 0xad, 0x41, 0xdc,
+ 0x9a, 0x29, 0xb7, 0x2c, 0xf8, 0xb8, 0x53, 0xc7,
+ 0x4c, 0x71, 0x27, 0x0b, 0x43, 0x93, 0x74, 0x9e,
+ 0xe9, 0xec, 0xb3, 0xe5, 0xbb, 0xa9, 0xf0, 0x65,
+ 0x7d, 0x2d, 0xec, 0xab, 0xb8, 0x07, 0x1e, 0xcf,
+ 0xb9, 0xdc, 0x8c, 0xa3, 0xae, 0x99, 0x7c, 0xce,
+ 0xed, 0xf6, 0xad, 0x95, 0xf0, 0x0d, 0xe4, 0x45,
+ 0x9f, 0x71, 0xec, 0xc3, 0x78, 0x13, 0xa7, 0xae,
+ 0xaa, 0x8b, 0xa6, 0x7b, 0x33, 0x0b, 0x6f, 0x7a,
+ 0x70, 0x2e, 0x66, 0x36, 0xba, 0xd1, 0xbb, 0x89,
+ 0xf2, 0x65, 0x27, 0xbc, 0xf4, 0x22, 0xdd, 0x77,
+ 0x68, 0x23, 0x0b, 0x19, 0xf1, 0xa7, 0xe0, 0x09,
+ 0xe8, 0xe5, 0xa2, 0x79, 0xd7, 0xe1, 0xf5, 0x39,
+ 0xef, 0x80, 0x53, 0xab, 0xe1, 0xdd, 0x2a, 0x59,
+ 0x58, 0x53, 0x52, 0xfb, 0x2b, 0xc3, 0x7d, 0x25,
+ 0x3f, 0x27, 0xce, 0xcc, 0x25, 0xea, 0x51, 0xb9,
+ 0xc7, 0x17, 0xe1, 0x95, 0x77, 0x5b, 0x88, 0xf6,
+ 0xf3, 0x85, 0x77, 0xce, 0xb7, 0x10, 0xf7, 0x36,
+ 0x0d, 0x3e, 0x84, 0xfa, 0xf8, 0xe4, 0x60, 0x3f,
+ 0xf8, 0xf7, 0xb1, 0x16, 0xfc, 0x03, 0xfd, 0xe1,
+ 0x57, 0xd6, 0x5b, 0x68, 0x9c, 0xc8, 0xbe, 0xcc,
+ 0x5a, 0x74, 0xd9, 0x69, 0xbd, 0xf2, 0xb3, 0x23,
+ 0xfb, 0xce, 0x79, 0x42, 0x1e, 0x99, 0x92, 0xac,
+ 0xe7, 0x44, 0xa1, 0xfc, 0x43, 0x73, 0xea, 0x40,
+ 0xfc, 0xfa, 0xfd, 0xf0, 0xf3, 0xc4, 0xc7, 0x96,
+ 0xde, 0xfa, 0xfc, 0xdc, 0x5e, 0x16, 0xc2, 0x37,
+ 0x48, 0xdf, 0x1a, 0x13, 0x0f, 0xbb, 0x82, 0xc9,
+ 0x5b, 0x93, 0xf1, 0xb9, 0x85, 0x69, 0x77, 0xa4,
+ 0xbf, 0x7e, 0x9c, 0x6b, 0xb6, 0xbf, 0xea, 0xb5,
+ 0x37, 0x79, 0x5b, 0x6d, 0xb1, 0xf4, 0x27, 0xb7,
+ 0x9d, 0x85, 0x1e, 0x8b, 0x15, 0x1f, 0x27, 0x88,
+ 0x77, 0xaf, 0x26, 0x5f, 0xc2, 0x9f, 0xa3, 0x8b,
+ 0xd5, 0x9b, 0x50, 0x47, 0x4c, 0x2d, 0xd6, 0x3b,
+ 0xaf, 0xe5, 0x25, 0xf8, 0xf8, 0x79, 0x16, 0x8a,
+ 0xe6, 0x11, 0x67, 0x26, 0x81, 0xf8, 0x2a, 0xd5,
+ 0x4d, 0xf1, 0x58, 0x1c, 0x3d, 0x7b, 0xda, 0x34,
+ 0x4c, 0xef, 0xf3, 0xb1, 0x50, 0xb6, 0x6c, 0x65,
+ 0xf8, 0xc1, 0x7a, 0x3c, 0x7f, 0x78, 0x75, 0xf8,
+ 0x21, 0xea, 0xc9, 0xfc, 0x58, 0x6f, 0xf8, 0xf1,
+ 0x38, 0x0b, 0xbe, 0xcf, 0xfd, 0xe0, 0x25, 0xa8,
+ 0xcf, 0x8f, 0x23, 0x52, 0xf5, 0x3c, 0xf4, 0x38,
+ 0x73, 0xb7, 0x78, 0xf0, 0x17, 0x16, 0x9a, 0xff,
+ 0x26, 0x3f, 0x97, 0x54, 0x64, 0x61, 0x75, 0xd7,
+ 0xff, 0xc1, 0xfb, 0xe2, 0x07, 0xb7, 0xb4, 0xee,
+ 0x0e, 0xbf, 0xc5, 0xbd, 0xbf, 0xdf, 0x88, 0x38,
+ 0x30, 0xe9, 0xc1, 0x3c, 0x6f, 0x98, 0xfc, 0xc8,
+ 0xc7, 0xdc, 0xf7, 0x8b, 0x3e, 0xd3, 0xe1, 0xc3,
+ 0xa9, 0xbb, 0x51, 0x7f, 0x3d, 0x82, 0x17, 0x70,
+ 0x0e, 0x2e, 0xb3, 0xd0, 0x55, 0x13, 0x4d, 0x9c,
+ 0x3d, 0x2f, 0x3c, 0xaf, 0xf5, 0xb2, 0x8e, 0x65,
+ 0xa3, 0x54, 0x5f, 0x1c, 0xd0, 0xd5, 0x23, 0x99,
+ 0xaa, 0x7f, 0xfb, 0xa9, 0xb7, 0xc9, 0x71, 0x09,
+ 0xf0, 0xce, 0x9c, 0xe7, 0xd7, 0x13, 0xc5, 0x5f,
+ 0x6d, 0xb6, 0xd0, 0x3d, 0x55, 0xf1, 0x36, 0x96,
+ 0xf7, 0xfe, 0x6d, 0xb8, 0x67, 0x53, 0x0b, 0x1d,
+ 0x49, 0x0b, 0xc4, 0x27, 0x1a, 0x0f, 0xde, 0xfb,
+ 0xc7, 0xf4, 0xce, 0xfa, 0x3c, 0x79, 0x1e, 0x52,
+ 0x47, 0x7a, 0x32, 0x73, 0x9f, 0x05, 0xff, 0x49,
+ 0x3e, 0xe2, 0x3c, 0xb7, 0x5f, 0xd5, 0xae, 0xf0,
+ 0x3a, 0x9c, 0xe3, 0xbe, 0x58, 0xed, 0xa7, 0xdb,
+ 0x38, 0x0b, 0x73, 0x47, 0xa9, 0x1e, 0xf5, 0xc7,
+ 0x87, 0xb7, 0xe9, 0x80, 0x6e, 0x98, 0x65, 0xf8,
+ 0xcd, 0xa2, 0x04, 0xf9, 0x91, 0x66, 0xf8, 0xca,
+ 0xd5, 0xcb, 0x3b, 0xc2, 0x97, 0x12, 0xd7, 0xde,
+ 0xbe, 0xd2, 0x83, 0x40, 0xe2, 0xaa, 0xf3, 0x55,
+ 0xe9, 0xe9, 0x43, 0xea, 0xd4, 0xda, 0xa7, 0xdc,
+ 0xab, 0x49, 0x66, 0x1d, 0xf7, 0x22, 0xb5, 0xfe,
+ 0x1c, 0xe2, 0xae, 0x4e, 0xcb, 0xb7, 0xf0, 0x54,
+ 0xea, 0x70, 0xe9, 0x8e, 0x47, 0xe0, 0xcf, 0x22,
+ 0xf9, 0xfc, 0x10, 0xf1, 0x9b, 0xf8, 0xa1, 0xe1,
+ 0x8d, 0xa2, 0xb4, 0x5e, 0xf2, 0x64, 0xc4, 0x97,
+ 0x4b, 0xe1, 0x31, 0xe4, 0xe7, 0xc5, 0x8b, 0x5a,
+ 0x4f, 0x02, 0xf7, 0x92, 0x36, 0x5e, 0xf9, 0x30,
+ 0x8f, 0xf3, 0x7c, 0xf5, 0x4c, 0xfe, 0x34, 0x20,
+ 0xcd, 0xc2, 0x55, 0x27, 0x7c, 0x92, 0x69, 0xf1,
+ 0x89, 0x85, 0xbb, 0xfb, 0x5d, 0xe0, 0x19, 0xc9,
+ 0x16, 0xc6, 0x4c, 0x52, 0xbd, 0x9b, 0xcd, 0x3e,
+ 0x3e, 0x4f, 0x57, 0x3d, 0x5f, 0x15, 0x6f, 0x21,
+ 0x53, 0x76, 0xc2, 0x44, 0xa3, 0x17, 0x0b, 0xd6,
+ 0xc8, 0x4f, 0x4e, 0x45, 0x6f, 0x46, 0x17, 0xc8,
+ 0x5f, 0x85, 0x92, 0x0f, 0x83, 0x27, 0xab, 0x7f,
+ 0x59, 0x04, 0x6f, 0x9f, 0x24, 0xff, 0x13, 0x84,
+ 0x2e, 0x44, 0x24, 0xaa, 0xff, 0xa9, 0x8f, 0xbe,
+ 0x75, 0x0e, 0xdd, 0x09, 0xcf, 0xa6, 0x6f, 0x09,
+ 0x5b, 0x46, 0x1e, 0x9b, 0x6d, 0xd4, 0x81, 0x85,
+ 0x11, 0xaa, 0x9f, 0xbd, 0x89, 0xef, 0xc0, 0x10,
+ 0xad, 0xff, 0x5f, 0xf8, 0xfa, 0xf4, 0x1c, 0xc5,
+ 0x47, 0xbf, 0x11, 0x16, 0x76, 0x1d, 0x7e, 0x0a,
+ 0x8f, 0x23, 0xde, 0xe6, 0xaf, 0xf4, 0x84, 0x2f,
+ 0xa7, 0x5f, 0x89, 0x88, 0x53, 0x7f, 0x91, 0x5b,
+ 0xda, 0x82, 0xfb, 0x61, 0xf2, 0xca, 0xdc, 0x26,
+ 0x6f, 0xe6, 0xfd, 0xa4, 0xfd, 0x3d, 0x9e, 0x66,
+ 0x61, 0x71, 0x43, 0xe9, 0xd3, 0x7b, 0xe8, 0xd0,
+ 0x98, 0x3a, 0xf2, 0x2f, 0x8f, 0xc9, 0xbb, 0xb0,
+ 0x19, 0xac, 0xcb, 0x1c, 0xe5, 0x5e, 0xc2, 0xba,
+ 0xfe, 0x00, 0xff, 0x19, 0x1d, 0xa8, 0xe9, 0x2f,
+ 0xff, 0xf8, 0x5a, 0xf5, 0xa5, 0x8b, 0x3b, 0x7c,
+ 0x0c, 0xe7, 0xff, 0xc7, 0x5f, 0xd2, 0x9f, 0x91,
+ 0xf8, 0x86, 0xb2, 0xeb, 0xf4, 0xf7, 0x51, 0xec,
+ 0x27, 0xbe, 0xc3, 0x40, 0x78, 0x10, 0x7e, 0x29,
+ 0xb9, 0x97, 0xf2, 0x6b, 0x0e, 0xf9, 0xba, 0x7f,
+ 0xcc, 0xef, 0xf0, 0xb1, 0xac, 0xbb, 0xa9, 0xbb,
+ 0xf4, 0xb2, 0x0d, 0x71, 0xf7, 0xee, 0x5b, 0x74,
+ 0xc8, 0xc4, 0x13, 0x8f, 0x1f, 0x95, 0xd2, 0x7d,
+ 0xb9, 0x91, 0x47, 0xff, 0xeb, 0x24, 0x3f, 0x10,
+ 0x87, 0x1f, 0x2d, 0x95, 0xa0, 0xf3, 0xf4, 0x24,
+ 0xde, 0x7f, 0xaa, 0xa8, 0x7c, 0x9a, 0x40, 0xbf,
+ 0x56, 0x21, 0x12, 0x5f, 0x6f, 0x3e, 0x26, 0x9e,
+ 0x1b, 0x7f, 0xa9, 0xfd, 0x46, 0x72, 0x6f, 0x8f,
+ 0x8e, 0xaa, 0x1f, 0xe8, 0x43, 0x3c, 0x2f, 0x4c,
+ 0x2b, 0x06, 0xcf, 0x60, 0xfd, 0x89, 0x5f, 0x6d,
+ 0xd1, 0xfe, 0xf6, 0x58, 0x68, 0x71, 0x43, 0xfd,
+ 0xcc, 0xaf, 0xc4, 0xc3, 0x69, 0x97, 0x8a, 0xf0,
+ 0xdb, 0x3c, 0xa7, 0xb6, 0x87, 0xf2, 0x75, 0x0e,
+ 0xeb, 0x0a, 0xee, 0xe1, 0x05, 0xbf, 0x1a, 0x61,
+ 0x21, 0x6f, 0x2c, 0x79, 0x6e, 0x56, 0x8c, 0xb4,
+ 0x50, 0x31, 0x7c, 0x23, 0x3c, 0x97, 0xba, 0xd3,
+ 0xa5, 0x0e, 0x7d, 0x87, 0x69, 0x4d, 0xbf, 0xe6,
+ 0x58, 0x4b, 0x7a, 0x79, 0x15, 0xbd, 0xcd, 0xe9,
+ 0xa2, 0x7e, 0xba, 0x0c, 0x7a, 0x58, 0x78, 0x85,
+ 0x75, 0x9b, 0xf5, 0xc4, 0xc5, 0x57, 0x71, 0xd2,
+ 0x53, 0xa7, 0xd1, 0x16, 0x2e, 0xfc, 0xad, 0xf3,
+ 0xdb, 0xc6, 0xf7, 0x56, 0xb4, 0x52, 0xfe, 0xfd,
+ 0x8a, 0xef, 0x78, 0x7a, 0xf9, 0x32, 0xdc, 0x13,
+ 0x3f, 0xb2, 0x7c, 0xb7, 0xf2, 0x31, 0x86, 0x3e,
+ 0x65, 0xf6, 0x41, 0xad, 0x27, 0x85, 0xfe, 0x69,
+ 0x7c, 0x10, 0x7d, 0x83, 0xe9, 0x9e, 0x62, 0x61,
+ 0x77, 0x82, 0xf8, 0x52, 0xea, 0xce, 0xf9, 0x7a,
+ 0x8a, 0x5f, 0x37, 0x7c, 0xc8, 0x17, 0x6f, 0xa4,
+ 0x3f, 0x87, 0xf0, 0x6d, 0x71, 0xc3, 0xe9, 0x5b,
+ 0xcc, 0x37, 0x7c, 0x7e, 0xfb, 0x37, 0xe8, 0xae,
+ 0x29, 0x98, 0x63, 0x61, 0xbf, 0x63, 0x07, 0x78,
+ 0x34, 0xfa, 0x91, 0x92, 0xab, 0xbf, 0xaf, 0x66,
+ 0x1d, 0x09, 0x93, 0xa5, 0xa7, 0xc7, 0x4e, 0x58,
+ 0xf8, 0x64, 0x93, 0xfc, 0x44, 0x03, 0xde, 0x7b,
+ 0xae, 0x3e, 0x71, 0x61, 0x4a, 0xe3, 0x63, 0x52,
+ 0xdb, 0xd0, 0x07, 0x9a, 0x91, 0xf8, 0xfa, 0xb3,
+ 0x4d, 0x15, 0xff, 0xc7, 0xf1, 0xe3, 0x11, 0xe3,
+ 0x54, 0x5f, 0x86, 0x53, 0xaf, 0x27, 0x8e, 0xd0,
+ 0x79, 0x17, 0xc3, 0x67, 0xd5, 0x9b, 0x28, 0x3f,
+ 0xff, 0x07, 0xfe, 0xc2, 0xff, 0x91, 0xfc, 0xe7,
+ 0x95, 0xea, 0x16, 0x42, 0xda, 0xeb, 0x3e, 0xab,
+ 0x50, 0x67, 0xf3, 0x3e, 0x50, 0x3c, 0x5f, 0x44,
+ 0x37, 0xd2, 0x63, 0xa4, 0x1f, 0xab, 0xd9, 0xaf,
+ 0xff, 0x41, 0xf9, 0xc1, 0xe9, 0x3c, 0xff, 0x3b,
+ 0x23, 0x3f, 0x66, 0xc8, 0xaf, 0x9a, 0xc7, 0xf5,
+ 0xf7, 0x81, 0xe8, 0xc5, 0x9a, 0xe2, 0xfa, 0x7e,
+ 0x06, 0xe7, 0x5c, 0x39, 0x41, 0x7e, 0xe1, 0x02,
+ 0x79, 0xfd, 0x36, 0x40, 0xf7, 0x75, 0xfb, 0x9a,
+ 0x85, 0xcc, 0xc6, 0xf8, 0x32, 0x73, 0x9e, 0xfb,
+ 0xea, 0xe2, 0x3b, 0x17, 0xbe, 0x85, 0xfa, 0xfd,
+ 0xc3, 0x26, 0xf5, 0x1b, 0xa7, 0xd0, 0x31, 0x87,
+ 0xb6, 0xf2, 0xc7, 0xef, 0xf0, 0xf7, 0x67, 0x7f,
+ 0x47, 0x37, 0x4d, 0x03, 0xf2, 0x3c, 0xf3, 0xc4,
+ 0x7f, 0xe0, 0xae, 0xe8, 0xbe, 0xc3, 0x39, 0xe5,
+ 0x4b, 0x27, 0x9e, 0xd3, 0xf4, 0x72, 0x3e, 0xfc,
+ 0x21, 0x3a, 0xea, 0xda, 0x41, 0xfe, 0xdb, 0x91,
+ 0x7a, 0xb0, 0x60, 0xb4, 0xf2, 0x7f, 0x24, 0xfe,
+ 0x39, 0x6c, 0xbb, 0xfa, 0xcf, 0x4a, 0x63, 0x2c,
+ 0x04, 0x25, 0xca, 0x2f, 0x6c, 0x18, 0x6b, 0x61,
+ 0x6c, 0x7f, 0xf9, 0xb9, 0xd7, 0x87, 0x2c, 0x4c,
+ 0xde, 0x48, 0x9d, 0x36, 0xb3, 0x4a, 0xb0, 0xad,
+ 0x11, 0xea, 0x87, 0x26, 0xa1, 0x9b, 0xa3, 0x6f,
+ 0xc8, 0x8f, 0x6c, 0x27, 0xbf, 0x7d, 0xe7, 0xca,
+ 0xff, 0xc4, 0x12, 0x0f, 0xdd, 0x73, 0xe5, 0x27,
+ 0x52, 0x39, 0xef, 0x49, 0x67, 0xd5, 0xaf, 0xf4,
+ 0x44, 0xef, 0x7f, 0xff, 0x48, 0xf9, 0xdd, 0x66,
+ 0x95, 0x85, 0xa9, 0x17, 0x35, 0xdf, 0xd9, 0x81,
+ 0xbf, 0xea, 0x33, 0x50, 0xeb, 0x9f, 0x46, 0x3c,
+ 0xde, 0xcf, 0x97, 0x5f, 0x9e, 0x89, 0x3f, 0xfe,
+ 0x7c, 0xbe, 0xfc, 0xf1, 0x3b, 0xd6, 0x3d, 0x6b,
+ 0x57, 0x1a, 0xfc, 0xc1, 0x03, 0xb6, 0xdd, 0x08,
+ 0xdf, 0x61, 0xde, 0x10, 0x17, 0x6f, 0xfb, 0xc9,
+ 0x2f, 0x1f, 0xa7, 0x7e, 0xac, 0x3d, 0x2c, 0xbf,
+ 0x1d, 0xcc, 0x7d, 0xfe, 0xb2, 0x5e, 0xfa, 0xe2,
+ 0xcc, 0x7d, 0xdd, 0x8b, 0x51, 0x7c, 0x2d, 0x46,
+ 0x1f, 0x67, 0xf6, 0x93, 0xfe, 0xf5, 0x66, 0x3e,
+ 0xe4, 0x1d, 0x89, 0xef, 0x32, 0x87, 0xd1, 0x83,
+ 0x97, 0xb1, 0xdc, 0xbb, 0x09, 0x63, 0xdf, 0x8b,
+ 0x8b, 0x98, 0x0b, 0x98, 0x10, 0xfa, 0x8a, 0xd3,
+ 0x9b, 0xd4, 0x8f, 0x0e, 0xc1, 0x2f, 0x6e, 0x75,
+ 0x3e, 0x07, 0x6f, 0xcb, 0x3d, 0x3b, 0xba, 0x68,
+ 0x1e, 0xf7, 0x94, 0x39, 0x4d, 0x42, 0x05, 0xce,
+ 0xc1, 0x68, 0x2c, 0x35, 0x79, 0x69, 0x75, 0xfe,
+ 0xc3, 0x8d, 0xf3, 0xca, 0xf5, 0x96, 0x5f, 0x7e,
+ 0x82, 0xbf, 0x75, 0x98, 0xdb, 0x10, 0xde, 0x8b,
+ 0xf9, 0x4f, 0xfe, 0x0f, 0xd2, 0x7b, 0x83, 0xce,
+ 0xb8, 0x74, 0x54, 0x7f, 0xf2, 0x5d, 0x1f, 0xb6,
+ 0x3d, 0x43, 0xeb, 0x4d, 0xa5, 0x7e, 0xae, 0x8e,
+ 0x43, 0xb7, 0x4c, 0x5f, 0xd6, 0x3d, 0x24, 0x5b,
+ 0xe7, 0xe1, 0xce, 0xfe, 0x37, 0x26, 0x68, 0xbe,
+ 0xf5, 0x3d, 0xe7, 0x92, 0x91, 0xae, 0x7a, 0xb5,
+ 0x0a, 0xbd, 0x2d, 0x93, 0xa3, 0x7c, 0x18, 0x49,
+ 0x5c, 0x14, 0xcb, 0x50, 0x3e, 0x5f, 0x40, 0x4f,
+ 0x93, 0x0b, 0xd4, 0x3f, 0xed, 0xa0, 0xcf, 0x76,
+ 0x2e, 0x52, 0xbe, 0x1d, 0x61, 0x1e, 0xb1, 0xb6,
+ 0x0b, 0x71, 0x69, 0x7e, 0x21, 0x4e, 0x63, 0x8b,
+ 0xe9, 0x3c, 0xca, 0xb1, 0xfe, 0x4b, 0x5b, 0xdb,
+ 0xe9, 0x7d, 0xac, 0xc3, 0xe1, 0xfd, 0xaf, 0xb5,
+ 0x3d, 0xe2, 0x29, 0x67, 0xb9, 0xea, 0x75, 0x01,
+ 0xf7, 0x79, 0xf9, 0xad, 0xe2, 0xe9, 0x39, 0xf5,
+ 0xe8, 0xdb, 0x67, 0xc4, 0x91, 0xf1, 0xfd, 0xc6,
+ 0xc2, 0xc9, 0xa4, 0xa5, 0x70, 0x37, 0x74, 0xee,
+ 0xfb, 0x28, 0xf9, 0x87, 0x1a, 0xe8, 0x74, 0x93,
+ 0x28, 0xe9, 0x4d, 0x63, 0xe2, 0x27, 0xde, 0x41,
+ 0x7a, 0xed, 0xc8, 0x7d, 0x9d, 0xda, 0xab, 0xfe,
+ 0xe3, 0x20, 0x73, 0xa7, 0x51, 0x9d, 0x34, 0x1f,
+ 0x3b, 0x4b, 0xde, 0x2c, 0x9a, 0x20, 0xbd, 0xf0,
+ 0x64, 0xbd, 0xa1, 0xc1, 0xe8, 0x8c, 0x59, 0x49,
+ 0x3d, 0xc9, 0x6e, 0x25, 0xbf, 0xfb, 0x1e, 0x7d,
+ 0xf4, 0xc0, 0xa9, 0x8a, 0xcf, 0x85, 0xe8, 0x8e,
+ 0xc7, 0x1c, 0xf9, 0x9d, 0x86, 0xe8, 0x4a, 0x8d,
+ 0x29, 0xea, 0xd7, 0xab, 0xe2, 0x1f, 0x1c, 0x3a,
+ 0x7d, 0x0a, 0x2f, 0x45, 0xfd, 0x70, 0x6e, 0xc7,
+ 0x73, 0xcd, 0x55, 0x74, 0xfc, 0xcd, 0x75, 0xcd,
+ 0x5b, 0xbf, 0xc3, 0x5f, 0xb8, 0x25, 0x2b, 0x3e,
+ 0x8e, 0x11, 0x3f, 0x9b, 0x32, 0xf8, 0x9e, 0xd9,
+ 0x4a, 0x3f, 0x7f, 0xab, 0x89, 0xd6, 0x9f, 0xca,
+ 0xfe, 0x2e, 0xc6, 0xcb, 0x5f, 0xfd, 0xe8, 0x05,
+ 0xbf, 0xab, 0xf9, 0xe1, 0x2a, 0xea, 0x67, 0x4f,
+ 0x77, 0xc5, 0x4b, 0x0b, 0xf2, 0x71, 0xe4, 0x1d,
+ 0xcd, 0x17, 0x2e, 0xa1, 0x7b, 0x03, 0x82, 0xd2,
+ 0xe0, 0x17, 0xa8, 0x67, 0xe3, 0xf3, 0x74, 0x9f,
+ 0xcd, 0x99, 0x7b, 0xfd, 0x59, 0xf4, 0x4f, 0x3d,
+ 0xa1, 0x3e, 0x24, 0x5f, 0x09, 0x80, 0xbf, 0x40,
+ 0x17, 0xf3, 0xc2, 0xa4, 0xe7, 0xd9, 0xf8, 0xcd,
+ 0x4a, 0x6e, 0xf2, 0xff, 0x8f, 0xa8, 0x33, 0x39,
+ 0xfd, 0x55, 0x6f, 0xaa, 0xf3, 0xb9, 0xd3, 0x0e,
+ 0xc4, 0x91, 0xb9, 0x49, 0x5c, 0xc6, 0xce, 0x53,
+ 0x3f, 0xff, 0x27, 0xf3, 0x92, 0xad, 0xe7, 0xb5,
+ 0xdf, 0x5e, 0xf4, 0x3b, 0xb3, 0x3c, 0xe4, 0x1f,
+ 0xfe, 0xcb, 0xb9, 0x56, 0x4b, 0x55, 0x7d, 0xef,
+ 0x4f, 0x7d, 0xf5, 0x68, 0xac, 0xfc, 0x7d, 0x48,
+ 0x1d, 0x7a, 0xd0, 0x50, 0xf7, 0x39, 0x06, 0xdf,
+ 0x5e, 0xfb, 0xb6, 0xe6, 0xb3, 0xfe, 0xf8, 0x11,
+ 0x9f, 0xb3, 0x9a, 0x9f, 0x4f, 0xc7, 0xdf, 0x7f,
+ 0x18, 0x94, 0x03, 0x8f, 0x4e, 0xb4, 0x10, 0xa7,
+ 0xb9, 0xad, 0x29, 0x42, 0x5f, 0x42, 0xaa, 0x68,
+ 0xfd, 0x5f, 0x60, 0x9c, 0x9f, 0x64, 0xe9, 0x79,
+ 0x83, 0x88, 0xbf, 0xae, 0xc5, 0xe5, 0x4f, 0x4a,
+ 0x32, 0x37, 0xf2, 0x1b, 0xb8, 0x1d, 0xde, 0x10,
+ 0xbf, 0x51, 0xb7, 0xeb, 0xb7, 0xf0, 0x72, 0x70,
+ 0xe7, 0x48, 0x74, 0xde, 0x5c, 0x44, 0x57, 0x1e,
+ 0xb8, 0x2a, 0x5e, 0x13, 0xd8, 0xaf, 0x67, 0x5d,
+ 0x7c, 0xad, 0x19, 0xef, 0x65, 0xa1, 0x4d, 0xa0,
+ 0xf4, 0xde, 0xe7, 0x99, 0x85, 0xa3, 0xaf, 0xd4,
+ 0xcf, 0x05, 0x70, 0x7f, 0x0d, 0x76, 0xa9, 0x5f,
+ 0x8e, 0xe5, 0x1c, 0x0e, 0x9d, 0xd1, 0xfd, 0xf5,
+ 0xc6, 0xc7, 0x15, 0xb6, 0xd6, 0x79, 0x3e, 0x87,
+ 0x9f, 0xbe, 0x82, 0xee, 0x1a, 0x37, 0xe6, 0xfe,
+ 0x1b, 0xba, 0xa9, 0x7e, 0x4c, 0x61, 0x7f, 0x15,
+ 0x5e, 0xa9, 0xbf, 0x2a, 0xcd, 0x7d, 0x5f, 0x9b,
+ 0xac, 0xfe, 0xb7, 0x38, 0xf9, 0x39, 0xa2, 0x50,
+ 0xfe, 0xb5, 0x26, 0xf3, 0xaa, 0x32, 0x29, 0xd2,
+ 0x9f, 0x1d, 0xc4, 0x4f, 0xaf, 0x54, 0xe6, 0x84,
+ 0xa6, 0x99, 0x8b, 0x85, 0x03, 0xdb, 0xe5, 0xaf,
+ 0x87, 0x10, 0x77, 0xc5, 0xb2, 0xd4, 0x9f, 0x45,
+ 0x91, 0x67, 0xce, 0x49, 0x4d, 0xf4, 0x3c, 0xe2,
+ 0xc2, 0x39, 0x4f, 0xf1, 0xeb, 0xc7, 0x5c, 0x78,
+ 0xd8, 0x0b, 0x7d, 0x5e, 0xf3, 0xbe, 0xb5, 0x3f,
+ 0xca, 0x6f, 0x2c, 0xc0, 0xb7, 0x7e, 0x5d, 0x4e,
+ 0xf7, 0x3d, 0x10, 0x5f, 0x7b, 0xa9, 0x85, 0xfe,
+ 0x1e, 0x8a, 0x1f, 0xf2, 0xcf, 0x53, 0x7e, 0xad,
+ 0xe3, 0x9c, 0xe6, 0xdf, 0x96, 0x7e, 0xdd, 0xc7,
+ 0x2f, 0x94, 0x3f, 0xbd, 0x00, 0xbe, 0x8a, 0xba,
+ 0x59, 0xb0, 0x52, 0xf7, 0xf7, 0x80, 0x39, 0xf2,
+ 0xba, 0x03, 0xf4, 0x75, 0xa6, 0xff, 0x52, 0xd6,
+ 0xb7, 0x57, 0xbf, 0xff, 0xec, 0xc0, 0x47, 0xbe,
+ 0xca, 0xd6, 0x7d, 0x0e, 0x8e, 0xb0, 0x70, 0x75,
+ 0x89, 0xf2, 0x63, 0x0f, 0xe7, 0x7a, 0x20, 0x4b,
+ 0xf1, 0x75, 0x0d, 0x3d, 0xee, 0xdb, 0xb1, 0x1a,
+ 0xfc, 0x18, 0x75, 0xfd, 0xcd, 0x84, 0x7c, 0xf8,
+ 0x30, 0xce, 0xcd, 0x69, 0xb3, 0xfc, 0xc4, 0x60,
+ 0xf2, 0xb6, 0x81, 0xbb, 0xea, 0xeb, 0x36, 0xf2,
+ 0xe5, 0xa6, 0x13, 0x3a, 0x6a, 0xb6, 0xa0, 0xd3,
+ 0x45, 0x4b, 0x54, 0x5f, 0x93, 0xe8, 0x0b, 0x2b,
+ 0x4d, 0x94, 0x3e, 0xf9, 0x11, 0x77, 0x01, 0xbd,
+ 0x98, 0xdb, 0x98, 0x35, 0x9f, 0x59, 0x68, 0xb6,
+ 0x0f, 0x9f, 0x60, 0x46, 0xbd, 0x6f, 0xc1, 0xbb,
+ 0x19, 0xf7, 0x68, 0x6a, 0x31, 0xef, 0xeb, 0xdd,
+ 0x1b, 0xdf, 0x68, 0x7e, 0x47, 0x2f, 0xa7, 0x5d,
+ 0x93, 0xbe, 0x0d, 0x61, 0x6e, 0x34, 0x75, 0x8c,
+ 0xce, 0x77, 0x12, 0xfe, 0x23, 0x31, 0x5c, 0xfd,
+ 0x55, 0x2f, 0xe6, 0x91, 0x35, 0x3e, 0xed, 0x02,
+ 0x1f, 0xc7, 0x7d, 0xd7, 0x0b, 0x7a, 0xa6, 0xcf,
+ 0xb3, 0x8e, 0x26, 0xf9, 0x7a, 0x7f, 0x9d, 0x34,
+ 0x0b, 0xa9, 0x7b, 0x35, 0xcf, 0xfd, 0x04, 0x1d,
+ 0x8f, 0x68, 0xa1, 0x7a, 0xfe, 0xf2, 0x8c, 0x85,
+ 0xac, 0x53, 0xba, 0x8f, 0x20, 0xf6, 0xb7, 0xee,
+ 0xa1, 0xe6, 0x51, 0x57, 0x88, 0xc3, 0x2a, 0xe1,
+ 0xca, 0x97, 0x3d, 0xbc, 0xef, 0xc3, 0x0c, 0x17,
+ 0xf8, 0x04, 0x7c, 0x6d, 0x8f, 0x78, 0xfd, 0x9e,
+ 0xd0, 0x87, 0x7b, 0xbe, 0x70, 0x5d, 0xfd, 0x7b,
+ 0xd8, 0x75, 0x0b, 0x53, 0x2e, 0xea, 0xf7, 0x0a,
+ 0x4f, 0xce, 0xbf, 0xc7, 0x69, 0x74, 0xc5, 0xdc,
+ 0x43, 0xef, 0x67, 0x34, 0x41, 0x37, 0xcc, 0x51,
+ 0xea, 0xd4, 0xe0, 0x2f, 0xf0, 0xc5, 0xa6, 0x19,
+ 0xfd, 0xb2, 0x73, 0x6d, 0xcd, 0xe3, 0x67, 0x30,
+ 0x2f, 0x5a, 0x57, 0x46, 0xef, 0x6f, 0x49, 0x3d,
+ 0xcd, 0x6b, 0xa0, 0x7a, 0x95, 0x42, 0x9c, 0x0c,
+ 0x19, 0x88, 0x8f, 0x33, 0xc9, 0xc4, 0xa5, 0x63,
+ 0xe4, 0x52, 0xf8, 0x35, 0xe2, 0xea, 0xc0, 0x36,
+ 0xd5, 0xc3, 0x6c, 0xf4, 0xb2, 0xdd, 0x39, 0xf9,
+ 0xf1, 0xcf, 0xc8, 0x83, 0xc4, 0x18, 0xf5, 0x6b,
+ 0x81, 0xe8, 0xad, 0xe3, 0x63, 0x2f, 0x78, 0x06,
+ 0x7a, 0x7c, 0xc2, 0x4d, 0x7e, 0xcf, 0x3f, 0xca,
+ 0x42, 0xed, 0x92, 0xf2, 0xf7, 0xcb, 0xf1, 0x65,
+ 0x67, 0x22, 0x74, 0x3f, 0x2b, 0x99, 0xe3, 0x1e,
+ 0xa8, 0xab, 0x7e, 0xaf, 0x22, 0xf9, 0x12, 0x31,
+ 0x5f, 0x7e, 0xc9, 0x8b, 0xdf, 0x95, 0x2e, 0x4e,
+ 0x55, 0x7d, 0xbd, 0xc6, 0xef, 0x09, 0x23, 0x8b,
+ 0x69, 0xfe, 0x76, 0x9d, 0x38, 0x1b, 0xb8, 0x52,
+ 0xf9, 0xfa, 0x86, 0xfc, 0x72, 0xab, 0xae, 0xf5,
+ 0x75, 0xe2, 0x5c, 0xcf, 0x94, 0x56, 0x3f, 0x33,
+ 0x89, 0xfe, 0xfb, 0xdd, 0x06, 0xd5, 0xe3, 0x14,
+ 0x7c, 0x48, 0x2f, 0x47, 0xe9, 0xd5, 0x01, 0xf4,
+ 0x3a, 0xfa, 0xde, 0x1e, 0xf8, 0x22, 0x74, 0xae,
+ 0x54, 0x26, 0x73, 0xfd, 0x7f, 0xe6, 0x5d, 0x37,
+ 0xdc, 0xa5, 0x37, 0x4e, 0xdc, 0x53, 0xf3, 0x4a,
+ 0x8a, 0xbf, 0xed, 0xe8, 0xc8, 0x80, 0x10, 0xde,
+ 0x63, 0x02, 0xab, 0x58, 0xf0, 0xa9, 0x2b, 0x7f,
+ 0xf9, 0x01, 0xba, 0xf7, 0xf0, 0xb1, 0xea, 0x61,
+ 0x3e, 0x73, 0xd2, 0xa6, 0xcd, 0x75, 0x3e, 0x13,
+ 0xf1, 0x29, 0xd5, 0x37, 0xeb, 0xf7, 0x03, 0x47,
+ 0xe2, 0xbe, 0x7b, 0xb0, 0xfa, 0x8f, 0x59, 0xe8,
+ 0xd8, 0xcd, 0x8e, 0xea, 0x1f, 0xa3, 0x88, 0xa3,
+ 0xf0, 0x7d, 0xf8, 0x4c, 0x53, 0x88, 0x5e, 0xde,
+ 0x3a, 0x2a, 0x23, 0xe8, 0x87, 0xbe, 0x8d, 0xcd,
+ 0xf4, 0x82, 0xdf, 0xa2, 0xee, 0x54, 0xf8, 0x4c,
+ 0xfd, 0x54, 0x22, 0xfd, 0xc8, 0xaa, 0xad, 0xc4,
+ 0x89, 0x39, 0x48, 0x9d, 0xed, 0xb0, 0x53, 0xf5,
+ 0x79, 0x23, 0xf9, 0x5b, 0x39, 0x4d, 0xf9, 0x53,
+ 0x93, 0x7d, 0x44, 0xfb, 0x69, 0x5e, 0xdd, 0x93,
+ 0x3e, 0xfa, 0xcc, 0xa3, 0x29, 0xf0, 0x37, 0xcc,
+ 0xf7, 0x36, 0xbf, 0x46, 0xa7, 0x4d, 0x71, 0xe6,
+ 0x5d, 0x1e, 0xeb, 0x55, 0x5f, 0x97, 0x72, 0x7f,
+ 0x4f, 0x83, 0xa4, 0x77, 0xdd, 0xd3, 0x2c, 0xbc,
+ 0x1d, 0xa3, 0xfa, 0xe5, 0x4a, 0x5e, 0xf6, 0x9c,
+ 0x1e, 0x05, 0xef, 0x86, 0x2f, 0x5d, 0x11, 0xc3,
+ 0x9c, 0xc9, 0x34, 0x66, 0x3f, 0x77, 0x7f, 0xd3,
+ 0x3c, 0xdf, 0x15, 0x3f, 0x59, 0xc7, 0x53, 0xeb,
+ 0x39, 0x87, 0x4e, 0x0e, 0xf5, 0xd0, 0xef, 0xb9,
+ 0x4b, 0xa8, 0xa3, 0xbb, 0xf3, 0x42, 0xe1, 0x13,
+ 0xf8, 0xdd, 0xb7, 0x4b, 0x86, 0xf6, 0xdb, 0x95,
+ 0xb8, 0xfa, 0x28, 0x5a, 0xfd, 0xef, 0x7c, 0xf6,
+ 0x9b, 0x53, 0x5e, 0xf3, 0xb6, 0xf6, 0xdc, 0xc7,
+ 0x4e, 0x5f, 0xcd, 0x17, 0x23, 0x99, 0x93, 0x2f,
+ 0x1a, 0x72, 0x14, 0x3e, 0x89, 0xfb, 0x5e, 0xe4,
+ 0xa4, 0x7e, 0xb8, 0x11, 0xf5, 0x75, 0x6a, 0x67,
+ 0xd5, 0xc3, 0xad, 0xe8, 0x46, 0x72, 0x57, 0xd5,
+ 0xd7, 0x10, 0xfa, 0xf9, 0x87, 0xd3, 0x54, 0xdf,
+ 0x0f, 0x91, 0xdf, 0xe1, 0x71, 0xf2, 0x17, 0xad,
+ 0xf3, 0x2d, 0x38, 0x86, 0xcd, 0xd1, 0xfe, 0x78,
+ 0x5f, 0x70, 0x9c, 0xf2, 0xf7, 0x7e, 0xb7, 0xff,
+ 0x03
+};
+
+/** Length of the pre-compressed data using zlib algorithm */
+#define COMP_ZLIB_SIZE 3771
+
+/** Pre-compressed data using zlib algorithm */
+static uint8_t compressed_text_zlib[COMP_ZLIB_SIZE] = {
+ 0x78, 0x9c, 0x35, 0x99, 0x77, 0x5c, 0xd6, 0xe5,
+ 0x1a, 0xc6, 0x1f, 0x70, 0xa2, 0xe1, 0x0c, 0x50,
+ 0xb4, 0x70, 0xe7, 0x44, 0xc5, 0x34, 0x27, 0xe6,
+ 0x04, 0x15, 0x51, 0x73, 0x1e, 0x51, 0x8e, 0x60,
+ 0x42, 0xe2, 0x08, 0xf5, 0x68, 0x99, 0x24, 0xb8,
+ 0xd3, 0x24, 0xad, 0xe3, 0x9e, 0xb8, 0x11, 0x50,
+ 0x12, 0x15, 0xd2, 0xd0, 0xcc, 0x2d, 0x66, 0x28,
+ 0xe0, 0x20, 0x89, 0x5c, 0xe4, 0xc4, 0x81, 0x0b,
+ 0xc5, 0x71, 0xce, 0xe7, 0xf9, 0x5e, 0xf5, 0x47,
+ 0x57, 0x57, 0xbc, 0xef, 0xef, 0xf7, 0x8c, 0xfb,
+ 0xbe, 0xee, 0xeb, 0xbe, 0x5f, 0x63, 0xfe, 0xff,
+ 0x4f, 0x68, 0xb7, 0x6f, 0x8d, 0xfd, 0xe7, 0x78,
+ 0x6d, 0x0b, 0xa3, 0x57, 0xb4, 0x87, 0x4f, 0xf2,
+ 0xb5, 0xd0, 0x2f, 0xe7, 0x47, 0xf8, 0xdb, 0x61,
+ 0x16, 0x7a, 0x2e, 0xb9, 0x05, 0xbf, 0x37, 0xc4,
+ 0x42, 0x03, 0x8f, 0xef, 0xe0, 0x67, 0x22, 0x2c,
+ 0xec, 0x1f, 0x52, 0x0d, 0x1e, 0x7f, 0xd1, 0x42,
+ 0x41, 0xa7, 0x5d, 0xf0, 0xa2, 0xf7, 0x2d, 0x5c,
+ 0xdf, 0x7e, 0x0d, 0xde, 0xfd, 0xaa, 0x85, 0x08,
+ 0xf3, 0x6f, 0xf8, 0xe1, 0x60, 0x0b, 0x03, 0x4a,
+ 0xd7, 0x80, 0x17, 0x7e, 0x60, 0xa1, 0x5a, 0x6d,
+ 0x57, 0xf8, 0xad, 0xdb, 0x16, 0x6e, 0x9e, 0x59,
+ 0x0c, 0x8f, 0xee, 0x62, 0x61, 0x4a, 0xf9, 0x72,
+ 0xf0, 0xa1, 0x75, 0x2d, 0x78, 0xbe, 0x58, 0x03,
+ 0x1f, 0x39, 0xd1, 0x42, 0x8b, 0xc3, 0x39, 0xf0,
+ 0xf5, 0xac, 0xe7, 0xd4, 0xd1, 0x6d, 0xf0, 0x55,
+ 0xa5, 0x2c, 0x84, 0x14, 0x85, 0xc1, 0xbd, 0x86,
+ 0x5b, 0x70, 0xee, 0xbb, 0x1a, 0x9e, 0xc2, 0xe7,
+ 0x9b, 0xef, 0x6f, 0x02, 0x1f, 0x72, 0xc3, 0xc2,
+ 0xa6, 0x66, 0x13, 0xf4, 0xbe, 0x6c, 0x0b, 0x75,
+ 0x83, 0xc6, 0xc2, 0x87, 0xcf, 0xb7, 0x50, 0x62,
+ 0x57, 0x5b, 0xb8, 0xdb, 0x26, 0xbe, 0x16, 0xcb,
+ 0x3a, 0xcd, 0xed, 0xa1, 0x16, 0x02, 0x67, 0x8f,
+ 0x84, 0x67, 0xc5, 0x5a, 0x88, 0x3f, 0x90, 0x0f,
+ 0x4f, 0x5a, 0x68, 0xe1, 0xcc, 0xd9, 0xfe, 0xf0,
+ 0xe3, 0x57, 0x2c, 0xac, 0x9e, 0xb2, 0x0a, 0xbe,
+ 0x67, 0x1d, 0xe7, 0x53, 0xe7, 0x31, 0xbc, 0xe2,
+ 0x5c, 0x0b, 0xe5, 0x8e, 0xb6, 0x82, 0xcf, 0x7c,
+ 0xcf, 0xc2, 0x7f, 0x0a, 0x97, 0xc2, 0x3b, 0xf1,
+ 0xf7, 0x9a, 0x4f, 0x23, 0xe1, 0x1d, 0x0e, 0x58,
+ 0xc8, 0x89, 0x9e, 0x06, 0x9f, 0xf7, 0xd0, 0x42,
+ 0xb8, 0x7b, 0x2d, 0xad, 0xef, 0x4f, 0x0b, 0xde,
+ 0x27, 0x4b, 0xc2, 0x37, 0x64, 0xf1, 0x98, 0xc3,
+ 0x7a, 0xff, 0xd5, 0x42, 0x0b, 0xaf, 0x76, 0x94,
+ 0x85, 0x87, 0x12, 0x2f, 0xde, 0x33, 0x8b, 0xc3,
+ 0x1d, 0xb8, 0xc7, 0xe9, 0xc7, 0xeb, 0xc0, 0x97,
+ 0x37, 0xb4, 0x70, 0x67, 0x9f, 0xf6, 0x9b, 0xd3,
+ 0xd3, 0xc2, 0x98, 0xcc, 0x3b, 0xf0, 0x65, 0x5f,
+ 0x58, 0x58, 0x74, 0x51, 0xcf, 0x6b, 0xbd, 0xd9,
+ 0xc2, 0xb0, 0x21, 0x03, 0xe1, 0x0d, 0xfd, 0x2d,
+ 0xdc, 0xeb, 0xcc, 0x7b, 0xcc, 0x8f, 0xc4, 0x57,
+ 0xbb, 0x2c, 0xfe, 0xbf, 0x29, 0x62, 0x9d, 0x5e,
+ 0xab, 0x2e, 0xc1, 0x13, 0x7f, 0xb3, 0xd0, 0x3e,
+ 0x7a, 0x1c, 0x7c, 0xc0, 0x22, 0xbe, 0x3f, 0x5f,
+ 0xfb, 0xab, 0x42, 0x5c, 0x5e, 0x8e, 0xd4, 0x7d,
+ 0x0c, 0xe3, 0x7d, 0x8e, 0x2f, 0xde, 0xc2, 0xdd,
+ 0x89, 0xf7, 0x37, 0x5f, 0x2e, 0x81, 0x77, 0xf9,
+ 0x99, 0xd7, 0x7c, 0x7c, 0x18, 0xfe, 0xb2, 0x9e,
+ 0x85, 0xb8, 0xdc, 0x0d, 0xf0, 0xb6, 0x47, 0xf8,
+ 0xfb, 0x9c, 0xbf, 0xe1, 0x0d, 0xb6, 0x58, 0x38,
+ 0x97, 0xaf, 0x78, 0xfc, 0xbb, 0xa3, 0x85, 0x12,
+ 0x0f, 0xb5, 0xde, 0xf6, 0xbf, 0x5a, 0x88, 0x48,
+ 0x51, 0x7c, 0x2d, 0x39, 0x61, 0x61, 0xf2, 0xa8,
+ 0xd6, 0xf0, 0x4b, 0xe5, 0x2d, 0x1c, 0x74, 0x71,
+ 0x80, 0x07, 0x11, 0x1f, 0xf3, 0x17, 0xb6, 0x81,
+ 0xbf, 0x20, 0xfe, 0x87, 0x25, 0xee, 0x85, 0xff,
+ 0xcc, 0x7a, 0x62, 0x93, 0x3f, 0x84, 0xaf, 0x24,
+ 0x6f, 0xcb, 0x27, 0x2b, 0x9e, 0x97, 0x7e, 0x6c,
+ 0x61, 0xd7, 0xe0, 0x77, 0xe0, 0x77, 0x5b, 0x5a,
+ 0x78, 0x74, 0x43, 0xef, 0xdf, 0xdd, 0x97, 0xf5,
+ 0x4d, 0xe7, 0x5e, 0xcd, 0x03, 0xe2, 0xff, 0x79,
+ 0x59, 0xe5, 0x87, 0xf9, 0xc5, 0xfe, 0xfb, 0xf6,
+ 0xec, 0xb3, 0xd0, 0x0d, 0xdc, 0x5b, 0xdb, 0x99,
+ 0x33, 0xe0, 0x79, 0x65, 0x2c, 0xa4, 0x2d, 0x62,
+ 0xdd, 0xa6, 0x6c, 0xb8, 0x85, 0xd3, 0x3d, 0xb5,
+ 0xfe, 0xb3, 0xc4, 0x9d, 0x6f, 0xf3, 0x05, 0x70,
+ 0x1f, 0x47, 0x0b, 0x9f, 0xfa, 0xec, 0x83, 0x1f,
+ 0x58, 0x66, 0xc1, 0xa3, 0x92, 0xf4, 0xa3, 0x36,
+ 0xef, 0x7f, 0xbf, 0x23, 0x3a, 0x61, 0x62, 0x89,
+ 0xf7, 0xea, 0x3e, 0xbc, 0xc7, 0x4c, 0xbb, 0xcf,
+ 0xf7, 0x8f, 0x91, 0x77, 0xe6, 0xd8, 0x6b, 0x0b,
+ 0xef, 0x36, 0x7a, 0x0e, 0x9f, 0x44, 0x5c, 0x0f,
+ 0xf5, 0x5e, 0xa6, 0xbf, 0xcf, 0xb4, 0x10, 0xb8,
+ 0xab, 0x00, 0xde, 0x8b, 0x38, 0x9a, 0x9a, 0xa9,
+ 0xf5, 0x8e, 0x6c, 0x6a, 0xa1, 0xdd, 0x0c, 0x74,
+ 0xc7, 0x84, 0xff, 0xc5, 0xf3, 0xef, 0x73, 0x8f,
+ 0xa6, 0x0e, 0xf1, 0x55, 0x31, 0x4d, 0xf1, 0xb1,
+ 0x8e, 0xfd, 0x39, 0x55, 0x54, 0xfc, 0xe6, 0xfc,
+ 0xcb, 0x42, 0xdc, 0x95, 0x14, 0x78, 0x56, 0x80,
+ 0x85, 0x84, 0x6c, 0xc5, 0x5f, 0x41, 0x45, 0x0b,
+ 0xd5, 0x02, 0xbf, 0x81, 0x57, 0xa8, 0x69, 0x21,
+ 0x31, 0x56, 0xf9, 0xd2, 0x8e, 0xe7, 0xb5, 0x98,
+ 0x8e, 0x0e, 0x9a, 0x33, 0x37, 0x2d, 0x5c, 0x2f,
+ 0x97, 0x0b, 0xef, 0x7b, 0xcc, 0x82, 0xef, 0x85,
+ 0x67, 0x70, 0x1f, 0xf2, 0xd1, 0xe5, 0x08, 0x79,
+ 0x65, 0x86, 0x71, 0xce, 0x3f, 0xf7, 0x79, 0x05,
+ 0xff, 0x1c, 0x5e, 0x6b, 0xf6, 0x23, 0x78, 0x55,
+ 0xf8, 0xfe, 0x7f, 0xf4, 0x62, 0xf5, 0x60, 0x0b,
+ 0x5f, 0x37, 0xfb, 0x03, 0xfe, 0x94, 0x7d, 0x96,
+ 0xfd, 0xe8, 0x27, 0xf8, 0x55, 0xce, 0xdd, 0x3d,
+ 0x41, 0xeb, 0xad, 0xce, 0xf7, 0x23, 0x17, 0x29,
+ 0x1e, 0x6a, 0xa3, 0x1f, 0x85, 0x43, 0x89, 0x5b,
+ 0xd3, 0x80, 0xf5, 0x17, 0x56, 0x09, 0x84, 0x6f,
+ 0x26, 0x0e, 0xab, 0xcd, 0x54, 0x7e, 0x54, 0x26,
+ 0xbf, 0xf7, 0x0e, 0xa4, 0x6e, 0x98, 0x34, 0xd6,
+ 0xed, 0x7f, 0x53, 0xeb, 0xbd, 0x95, 0x69, 0x61,
+ 0x45, 0xa5, 0x53, 0x70, 0x2f, 0xf6, 0xd9, 0x7a,
+ 0x65, 0x03, 0xf8, 0x15, 0xea, 0xcc, 0xd4, 0xb4,
+ 0x1f, 0xe0, 0xbd, 0x79, 0x4e, 0x87, 0x3b, 0x3b,
+ 0xe0, 0x1f, 0xba, 0x5b, 0x48, 0x1d, 0xee, 0x02,
+ 0xff, 0xf7, 0x78, 0x9e, 0x5f, 0x6d, 0x3a, 0xfc,
+ 0x0b, 0xee, 0xff, 0xe5, 0xb1, 0x18, 0xf8, 0xe9,
+ 0x5e, 0x16, 0xc6, 0xd6, 0xd7, 0x7a, 0x4b, 0x10,
+ 0x3f, 0x09, 0x61, 0xd2, 0x8f, 0xb9, 0x9c, 0x6f,
+ 0xcd, 0x37, 0x15, 0xe0, 0x8d, 0x5e, 0x5a, 0x88,
+ 0xfb, 0x9e, 0x7b, 0x36, 0xee, 0xe0, 0xc0, 0x30,
+ 0xe2, 0xc4, 0x6c, 0x8c, 0xb3, 0x70, 0x30, 0xda,
+ 0x19, 0x1e, 0x7e, 0x8f, 0xfd, 0xd6, 0x24, 0x6e,
+ 0xcd, 0xa4, 0xeb, 0x16, 0xda, 0xb9, 0x51, 0xc7,
+ 0x4c, 0x71, 0x27, 0x0b, 0xc3, 0x52, 0x74, 0x9e,
+ 0x99, 0xec, 0xb3, 0xd5, 0xdb, 0xc9, 0xf0, 0x25,
+ 0xe4, 0xe3, 0xee, 0x4a, 0xaa, 0xb7, 0x89, 0x9c,
+ 0xcb, 0xb5, 0x04, 0xea, 0x9a, 0x79, 0xc8, 0xb9,
+ 0xdd, 0xb8, 0xbe, 0x1c, 0xbe, 0x8e, 0xbc, 0xe8,
+ 0x33, 0x86, 0x7d, 0x18, 0x2f, 0xe2, 0xd4, 0x45,
+ 0x75, 0xd1, 0xf4, 0x68, 0x6e, 0xe1, 0x75, 0x4f,
+ 0xce, 0xc5, 0xcc, 0x44, 0x37, 0xfc, 0x3d, 0x95,
+ 0x2f, 0xdb, 0xe1, 0xa5, 0x17, 0xe8, 0xbe, 0xc3,
+ 0x1b, 0x5b, 0xc8, 0x4a, 0x3c, 0x0e, 0x4f, 0x42,
+ 0x2f, 0x17, 0xcc, 0xb9, 0x02, 0x6f, 0xc0, 0x79,
+ 0x0f, 0x38, 0xbe, 0x12, 0xee, 0x5b, 0xd9, 0xc2,
+ 0xaa, 0x92, 0xda, 0x5f, 0x19, 0xee, 0x2b, 0xf5,
+ 0x19, 0x71, 0x66, 0xce, 0x53, 0x8f, 0xca, 0x3d,
+ 0xca, 0x80, 0x57, 0xd9, 0x69, 0x21, 0xd6, 0xc7,
+ 0x1b, 0xde, 0x95, 0xfa, 0x93, 0xf0, 0x46, 0xfa,
+ 0x12, 0x40, 0x7d, 0x7c, 0xbc, 0xaf, 0x1f, 0xfc,
+ 0xbb, 0x78, 0x0b, 0x7e, 0x83, 0xfd, 0xe0, 0x17,
+ 0xd7, 0x5a, 0x68, 0x92, 0xcc, 0xbe, 0xcc, 0x6a,
+ 0x74, 0xd9, 0x69, 0xad, 0xf2, 0xb3, 0x33, 0xfb,
+ 0xce, 0x7b, 0x4c, 0x1e, 0x99, 0x92, 0xac, 0xe7,
+ 0x68, 0xa1, 0xfc, 0x43, 0x0b, 0xea, 0x40, 0xe2,
+ 0xda, 0x3d, 0xf0, 0x33, 0xc4, 0xc7, 0x26, 0x7f,
+ 0x7d, 0x7e, 0x76, 0x6f, 0x0b, 0x91, 0xeb, 0xa4,
+ 0x6f, 0x4d, 0x88, 0x87, 0x1d, 0xa1, 0xe4, 0xad,
+ 0xc9, 0xfa, 0xd4, 0xc2, 0x94, 0x9b, 0xd2, 0x5f,
+ 0x1f, 0xce, 0x35, 0xd7, 0x4f, 0xf5, 0xda, 0x8b,
+ 0xbc, 0xad, 0xbe, 0x50, 0xfa, 0x73, 0xaf, 0x83,
+ 0x85, 0x9e, 0x0b, 0x15, 0x1f, 0x47, 0x89, 0xf7,
+ 0x66, 0x9e, 0x9f, 0xc3, 0x9f, 0xa1, 0x8b, 0x35,
+ 0x3c, 0xa9, 0x23, 0xa6, 0x36, 0xeb, 0x9d, 0xd3,
+ 0xea, 0x3c, 0x7c, 0xec, 0x1c, 0x0b, 0x45, 0x73,
+ 0x88, 0x33, 0x93, 0x44, 0x7c, 0x95, 0xf2, 0x55,
+ 0x3c, 0x16, 0x47, 0xcf, 0x9e, 0x34, 0x8d, 0xd0,
+ 0xfb, 0xda, 0x59, 0x28, 0x5b, 0xb6, 0x0a, 0x7c,
+ 0x5f, 0x7d, 0x9e, 0x1f, 0x58, 0x03, 0xbe, 0x9f,
+ 0x7a, 0x32, 0x37, 0xde, 0x0b, 0x7e, 0x24, 0xc1,
+ 0x82, 0xf7, 0x33, 0x1f, 0x78, 0x09, 0xea, 0xf3,
+ 0xa3, 0xa8, 0x74, 0x3d, 0x0f, 0x3d, 0xce, 0xde,
+ 0x29, 0x1e, 0xfa, 0x99, 0x85, 0x16, 0xbf, 0xc9,
+ 0xcf, 0xa5, 0x14, 0x59, 0x58, 0xd9, 0xfd, 0xbf,
+ 0xf0, 0xbe, 0xf8, 0xc1, 0x4d, 0x6d, 0x7a, 0xc0,
+ 0xaf, 0x73, 0xef, 0xef, 0x36, 0x26, 0x0e, 0x4c,
+ 0x66, 0x28, 0xcf, 0x1b, 0x2e, 0x3f, 0xf2, 0x21,
+ 0xf7, 0xfd, 0xbc, 0xcf, 0x54, 0x78, 0x20, 0x75,
+ 0x37, 0xe6, 0xaf, 0x07, 0xf0, 0x02, 0xce, 0xa1,
+ 0xc2, 0x0c, 0x74, 0xd5, 0xc4, 0x12, 0x67, 0xcf,
+ 0x0a, 0xcf, 0x68, 0xbd, 0xac, 0x63, 0xc9, 0x48,
+ 0xd5, 0x17, 0x07, 0x74, 0xf5, 0x60, 0xb6, 0xea,
+ 0xdf, 0x1e, 0xea, 0x6d, 0x6a, 0x42, 0x12, 0xbc,
+ 0x2b, 0xe7, 0xf9, 0xe5, 0x78, 0xf1, 0x97, 0x1b,
+ 0x2d, 0xf4, 0x48, 0x57, 0xbc, 0x8d, 0xe6, 0xbd,
+ 0x7f, 0x1b, 0xee, 0xd9, 0xd4, 0x46, 0x47, 0xce,
+ 0x0e, 0xc6, 0x27, 0x1a, 0x77, 0xde, 0xfb, 0xc7,
+ 0xd4, 0xae, 0xfa, 0x3c, 0x79, 0x1e, 0x56, 0x57,
+ 0x7a, 0x32, 0x7d, 0xb7, 0x05, 0xbf, 0x09, 0xed,
+ 0xc4, 0x79, 0x6e, 0xbf, 0x6a, 0xdd, 0xe1, 0x75,
+ 0x39, 0xc7, 0xdd, 0xf1, 0xda, 0x8f, 0xef, 0x18,
+ 0x0b, 0xb3, 0x47, 0xaa, 0x1e, 0xf5, 0xc7, 0x87,
+ 0xb7, 0xed, 0x84, 0x6e, 0x98, 0x25, 0xf8, 0xcd,
+ 0xa2, 0x24, 0xf9, 0x91, 0xe6, 0xf8, 0xca, 0x95,
+ 0x4b, 0x3b, 0xc3, 0x17, 0x13, 0xd7, 0x5e, 0xde,
+ 0xd2, 0x83, 0xc1, 0xc4, 0x55, 0xd7, 0x4b, 0xd2,
+ 0xd3, 0xfb, 0xd4, 0xa9, 0xd5, 0x4f, 0xb8, 0x57,
+ 0x93, 0xca, 0x3a, 0x6e, 0x47, 0x6b, 0xfd, 0x79,
+ 0xc4, 0x5d, 0xdd, 0x56, 0x6f, 0xe0, 0xe9, 0xd4,
+ 0xe1, 0xd2, 0x9d, 0x0f, 0xc2, 0x9f, 0x46, 0xf3,
+ 0xf9, 0x00, 0xf1, 0x6b, 0xf8, 0xa1, 0xc0, 0xc6,
+ 0x31, 0x5a, 0x2f, 0x79, 0x32, 0xe2, 0x73, 0xf9,
+ 0xf1, 0x38, 0xf2, 0x33, 0x23, 0x43, 0xeb, 0x49,
+ 0xe2, 0x5e, 0xce, 0x8e, 0x55, 0x3e, 0xcc, 0xe1,
+ 0x3c, 0x5f, 0x3e, 0x95, 0x3f, 0x1d, 0x40, 0xde,
+ 0x5f, 0x72, 0xc2, 0x27, 0x99, 0x96, 0x1f, 0x59,
+ 0xb8, 0xb5, 0x47, 0xfa, 0x9b, 0x95, 0x6a, 0x21,
+ 0x64, 0x82, 0xea, 0xdd, 0x4c, 0xf6, 0xf1, 0x69,
+ 0xa6, 0xea, 0xf9, 0x8a, 0x44, 0x0b, 0xd9, 0xb2,
+ 0x13, 0x26, 0x16, 0xbd, 0x98, 0xb7, 0x4a, 0x7e,
+ 0x72, 0x32, 0x7a, 0x33, 0xaa, 0x40, 0xfe, 0x2a,
+ 0x9c, 0x7c, 0x18, 0x3a, 0x51, 0xfd, 0xcb, 0x02,
+ 0x78, 0xc7, 0x14, 0xf9, 0x9f, 0x60, 0x74, 0x21,
+ 0x2a, 0x59, 0xfd, 0x4f, 0x03, 0xf4, 0xad, 0x6b,
+ 0xf8, 0x76, 0x78, 0x2e, 0x7d, 0x4b, 0xc4, 0x12,
+ 0xf2, 0xd8, 0x6c, 0xa1, 0x0e, 0xcc, 0x8f, 0x52,
+ 0xfd, 0xf4, 0x27, 0xbe, 0x07, 0x87, 0x69, 0xfd,
+ 0xff, 0xc2, 0xd7, 0x67, 0xe6, 0x29, 0x3e, 0xfa,
+ 0x8d, 0xb0, 0xb0, 0xe3, 0xc0, 0x13, 0x78, 0x02,
+ 0xf1, 0x36, 0x77, 0xb9, 0x07, 0x7c, 0x29, 0xfd,
+ 0x4a, 0x54, 0x82, 0xfa, 0x8b, 0x7b, 0xa5, 0x2d,
+ 0xb8, 0x1d, 0x20, 0xaf, 0xcc, 0x0d, 0xf2, 0x66,
+ 0xce, 0x4f, 0xda, 0xdf, 0xa3, 0x29, 0x16, 0x16,
+ 0x36, 0x92, 0x3e, 0xbd, 0x83, 0x0e, 0x85, 0xd4,
+ 0x95, 0x7f, 0x79, 0x44, 0xde, 0x45, 0x4c, 0x63,
+ 0x5d, 0xe6, 0x10, 0xf7, 0x12, 0xd1, 0xfd, 0x7b,
+ 0xf8, 0xcf, 0xe8, 0x40, 0x2d, 0x3f, 0xf9, 0xc7,
+ 0x57, 0xaa, 0x2f, 0xdd, 0xdc, 0xe0, 0x21, 0x9c,
+ 0xff, 0x1f, 0x7f, 0x49, 0x7f, 0x82, 0xf0, 0x0d,
+ 0x65, 0xd7, 0xe8, 0xef, 0x23, 0xd9, 0x4f, 0x62,
+ 0xa7, 0x41, 0xf0, 0x60, 0xfc, 0x52, 0x6a, 0x6f,
+ 0xe5, 0xd7, 0x2c, 0xf2, 0x75, 0x4f, 0xc8, 0xef,
+ 0xf0, 0xd1, 0xac, 0xbb, 0xa9, 0x9b, 0xf4, 0xb2,
+ 0x2d, 0x71, 0xf7, 0xf6, 0x6b, 0x74, 0xc8, 0x24,
+ 0x12, 0x8f, 0x1f, 0x94, 0xd2, 0x7d, 0xb9, 0x92,
+ 0x47, 0xff, 0xed, 0x22, 0x3f, 0x90, 0x80, 0x1f,
+ 0x2d, 0x95, 0xa4, 0xf3, 0xf4, 0x20, 0xde, 0x7f,
+ 0xaa, 0xa4, 0x7c, 0x1a, 0x47, 0xbf, 0x56, 0x31,
+ 0x1a, 0x5f, 0x6f, 0x3e, 0x24, 0x9e, 0x9b, 0x7c,
+ 0xae, 0xfd, 0x46, 0x73, 0x6f, 0x0f, 0x0e, 0xa9,
+ 0x1f, 0xe8, 0x43, 0x3c, 0xcf, 0x3f, 0x5b, 0x0c,
+ 0x9e, 0xc5, 0xfa, 0x93, 0xbf, 0xd8, 0xa4, 0xfd,
+ 0x51, 0x87, 0x5b, 0x5e, 0x55, 0x3f, 0xf3, 0x2b,
+ 0xf1, 0x70, 0xa2, 0x42, 0x25, 0xf8, 0x0d, 0x9e,
+ 0x53, 0xc7, 0x5d, 0xf9, 0x3a, 0x8b, 0x75, 0x85,
+ 0xf6, 0x6c, 0x06, 0xbf, 0x14, 0x65, 0x21, 0x7f,
+ 0x34, 0x79, 0x6e, 0x96, 0x05, 0x59, 0xa8, 0x14,
+ 0xb9, 0x1e, 0x7e, 0x8f, 0xba, 0xd3, 0xad, 0x2e,
+ 0x7d, 0x87, 0x69, 0x43, 0xbf, 0xe6, 0x58, 0x5b,
+ 0x7a, 0x79, 0x09, 0xbd, 0xcd, 0xeb, 0xa6, 0x7e,
+ 0xba, 0x0c, 0x7a, 0x58, 0x78, 0x91, 0x75, 0x9b,
+ 0xb5, 0xc4, 0xc5, 0x17, 0x09, 0xd2, 0x53, 0xa7,
+ 0x51, 0x16, 0xce, 0xfd, 0xad, 0xf3, 0xdb, 0xc2,
+ 0xf7, 0x96, 0xb5, 0x56, 0xfe, 0xfd, 0x8a, 0xef,
+ 0x78, 0x72, 0xe1, 0x02, 0xdc, 0x03, 0x3f, 0xb2,
+ 0x74, 0xa7, 0xf2, 0x31, 0x8e, 0x3e, 0x65, 0xe6,
+ 0x3e, 0xad, 0x27, 0x8d, 0xfe, 0x69, 0x6c, 0x30,
+ 0x7d, 0x83, 0xe9, 0x91, 0x66, 0x61, 0x67, 0x92,
+ 0xf8, 0x62, 0xea, 0xce, 0x99, 0xfa, 0x8a, 0x5f,
+ 0x57, 0x7c, 0xc8, 0x67, 0xaf, 0xa5, 0x3f, 0xfb,
+ 0xf1, 0x6d, 0x09, 0x81, 0xf4, 0x2d, 0xe6, 0x2b,
+ 0x3e, 0xbf, 0xf5, 0x2b, 0x74, 0xd7, 0x14, 0xcc,
+ 0xb2, 0xb0, 0xc7, 0xb1, 0x13, 0x3c, 0x16, 0xfd,
+ 0x48, 0xbb, 0xa7, 0xbf, 0xaf, 0x64, 0x1d, 0x49,
+ 0x13, 0xa5, 0xa7, 0x87, 0x8f, 0x5a, 0xf8, 0x68,
+ 0x83, 0xfc, 0x44, 0x43, 0xde, 0x7b, 0xba, 0x01,
+ 0x71, 0x61, 0x4a, 0xe3, 0x63, 0xd2, 0xdb, 0xd2,
+ 0x07, 0x9a, 0x20, 0x7c, 0xfd, 0xa9, 0xa6, 0x8a,
+ 0xff, 0x23, 0xf8, 0xf1, 0xa8, 0x31, 0xaa, 0x2f,
+ 0x81, 0xd4, 0xeb, 0xf1, 0x23, 0x74, 0xde, 0xc5,
+ 0xf0, 0x59, 0xf5, 0xc7, 0xcb, 0xcf, 0xff, 0x81,
+ 0xbf, 0xf0, 0x7b, 0x20, 0xff, 0x79, 0xb1, 0x86,
+ 0x85, 0xb0, 0x8e, 0xba, 0xcf, 0xaa, 0xd4, 0xd9,
+ 0xfc, 0xf7, 0x14, 0xcf, 0x19, 0xe8, 0x46, 0x66,
+ 0x9c, 0xf4, 0x63, 0x25, 0xfb, 0xf5, 0xdb, 0x27,
+ 0x3f, 0x38, 0x95, 0xe7, 0x7f, 0x6b, 0xe4, 0xc7,
+ 0x0c, 0xf9, 0x55, 0xeb, 0x88, 0xfe, 0x3e, 0x08,
+ 0xbd, 0x58, 0x55, 0x5c, 0xdf, 0xcf, 0xe2, 0x9c,
+ 0xab, 0x24, 0xc9, 0x2f, 0x9c, 0x23, 0xaf, 0xdf,
+ 0x0c, 0xd0, 0x7d, 0xdd, 0xb8, 0x6c, 0x21, 0xbb,
+ 0x09, 0xbe, 0xcc, 0x9c, 0xe1, 0xbe, 0xba, 0x79,
+ 0xcf, 0x86, 0x6f, 0xa2, 0x7e, 0x7f, 0xbf, 0x41,
+ 0xfd, 0xc6, 0x71, 0x74, 0xcc, 0xa1, 0xbd, 0xfc,
+ 0xf1, 0x5b, 0xfc, 0xfd, 0xa9, 0xdf, 0xd1, 0x4d,
+ 0xd3, 0x90, 0x3c, 0xcf, 0x3e, 0xfa, 0x1f, 0xb8,
+ 0x0b, 0xba, 0xef, 0x70, 0x5a, 0xf9, 0xd2, 0x85,
+ 0xe7, 0x34, 0xbd, 0x80, 0x6f, 0x33, 0xf7, 0xd1,
+ 0x51, 0x97, 0x4e, 0xf2, 0xdf, 0x8e, 0xd4, 0x83,
+ 0x79, 0xa3, 0x94, 0xff, 0x41, 0xf8, 0xe7, 0x88,
+ 0xad, 0xea, 0x3f, 0x2b, 0x87, 0x58, 0x08, 0x4e,
+ 0x96, 0x5f, 0x58, 0x37, 0xda, 0xc2, 0xe8, 0xfe,
+ 0xf2, 0x73, 0xaf, 0xf6, 0x5b, 0x98, 0xb8, 0x9e,
+ 0x3a, 0x6d, 0x66, 0x94, 0x60, 0x5b, 0x23, 0xd4,
+ 0x0f, 0x4d, 0x40, 0x37, 0x47, 0x5d, 0x95, 0x1f,
+ 0xd9, 0x4a, 0x7e, 0x7b, 0xcf, 0x96, 0xff, 0x89,
+ 0x27, 0x1e, 0x7a, 0xdc, 0x93, 0x9f, 0x48, 0xe7,
+ 0xbc, 0x27, 0x9c, 0x52, 0xbf, 0xd2, 0x0b, 0xbd,
+ 0xff, 0xfd, 0x03, 0xe5, 0x77, 0xdb, 0x15, 0x16,
+ 0x26, 0x67, 0x68, 0xbe, 0xb3, 0x0d, 0x7f, 0xd5,
+ 0x67, 0x90, 0xd6, 0x3f, 0x85, 0x78, 0xbc, 0xf3,
+ 0x50, 0x7e, 0x79, 0x3a, 0xfe, 0xf8, 0xd3, 0xb9,
+ 0xf2, 0xc7, 0x6f, 0x59, 0xf7, 0x8c, 0x1d, 0xf2,
+ 0xaf, 0x77, 0xef, 0xb2, 0xed, 0xc6, 0xf8, 0x0e,
+ 0xf3, 0x9a, 0xb8, 0x78, 0xd3, 0x4f, 0x7e, 0xf9,
+ 0x08, 0xf5, 0x63, 0xf5, 0x01, 0xf9, 0xed, 0x50,
+ 0xee, 0xf3, 0x97, 0xb5, 0xd2, 0x17, 0x67, 0xee,
+ 0xeb, 0x76, 0x9c, 0xe2, 0x6b, 0x21, 0xfa, 0x38,
+ 0xbd, 0x9f, 0xf4, 0xcf, 0x9f, 0xf9, 0x90, 0x57,
+ 0x34, 0xbe, 0xcb, 0x1c, 0x40, 0x0f, 0x5e, 0xc4,
+ 0x73, 0xef, 0x26, 0x82, 0x7d, 0x2f, 0x2c, 0x62,
+ 0x2e, 0x60, 0xc2, 0xe8, 0x2b, 0x4e, 0x6c, 0x50,
+ 0x3f, 0x1a, 0x80, 0x5f, 0xdc, 0xec, 0x7c, 0x1a,
+ 0xde, 0x9e, 0x7b, 0x76, 0xac, 0xa0, 0x79, 0xdc,
+ 0x13, 0xe6, 0x34, 0x49, 0x15, 0x39, 0x07, 0xa3,
+ 0xb1, 0xd4, 0xc4, 0xc5, 0x35, 0xf8, 0x0f, 0x57,
+ 0xce, 0xeb, 0x9e, 0x97, 0xfc, 0xf2, 0x63, 0xfc,
+ 0xad, 0xc3, 0xec, 0x46, 0xf0, 0xde, 0xcc, 0x7f,
+ 0x1e, 0x7e, 0x2f, 0xbd, 0x37, 0xe8, 0x4c, 0x85,
+ 0xce, 0xea, 0x4f, 0xbe, 0xed, 0xc3, 0xb6, 0xa7,
+ 0x69, 0xbd, 0xe9, 0xd4, 0xcf, 0x95, 0x09, 0xe8,
+ 0x96, 0xe9, 0xcb, 0xba, 0x03, 0x72, 0x75, 0x1e,
+ 0x6e, 0xec, 0x7f, 0x7d, 0x92, 0xe6, 0x5b, 0xdf,
+ 0x71, 0x2e, 0x59, 0x99, 0xaa, 0x57, 0x2b, 0xd0,
+ 0xdb, 0x32, 0x79, 0xca, 0x87, 0x20, 0xe2, 0xa2,
+ 0x58, 0x96, 0xf2, 0xf9, 0x1c, 0x7a, 0x9a, 0x5a,
+ 0xa0, 0xfe, 0x69, 0x1b, 0x7d, 0xb6, 0x73, 0x91,
+ 0xf2, 0xed, 0x20, 0xf3, 0x88, 0xd5, 0xdd, 0x88,
+ 0x4b, 0xf3, 0x0b, 0x71, 0x1a, 0x5f, 0x4c, 0xe7,
+ 0x51, 0x8e, 0xf5, 0x9f, 0xdf, 0xdc, 0x41, 0xef,
+ 0x63, 0x1d, 0x0e, 0xef, 0x7e, 0xa9, 0xed, 0x11,
+ 0x4f, 0x79, 0x4b, 0x55, 0xaf, 0x0b, 0xb8, 0xcf,
+ 0x0b, 0x6f, 0x14, 0x4f, 0xcf, 0xa8, 0x47, 0x5f,
+ 0x3f, 0x25, 0x8e, 0x8c, 0xf7, 0x57, 0x16, 0x8e,
+ 0xa5, 0xc8, 0x2f, 0xb9, 0xa2, 0x73, 0xdf, 0xc5,
+ 0xc8, 0x3f, 0xd4, 0x44, 0xa7, 0x3d, 0x63, 0xa4,
+ 0x37, 0x4d, 0x88, 0x9f, 0x44, 0x07, 0xe9, 0xb5,
+ 0x23, 0xf7, 0x75, 0x7c, 0x97, 0xfa, 0x8f, 0x7d,
+ 0xcc, 0x9d, 0x46, 0x76, 0xd1, 0x7c, 0xec, 0x14,
+ 0x79, 0xb3, 0x60, 0x9c, 0xf4, 0xc2, 0x83, 0xf5,
+ 0x86, 0x87, 0xa2, 0x33, 0x66, 0x39, 0xf5, 0x24,
+ 0xb7, 0xb5, 0xfc, 0xee, 0x3b, 0xf4, 0xd1, 0x83,
+ 0x26, 0x2b, 0x3e, 0xe7, 0xa3, 0x3b, 0xee, 0xb3,
+ 0xe4, 0x77, 0x1a, 0xa1, 0x2b, 0x35, 0x27, 0xa9,
+ 0x5f, 0xaf, 0x86, 0x7f, 0x70, 0xe8, 0xf2, 0x31,
+ 0xbc, 0x14, 0xf5, 0xc3, 0xb9, 0x03, 0xcf, 0x35,
+ 0x97, 0xd0, 0xf1, 0xd7, 0x57, 0x34, 0x6f, 0xfd,
+ 0x16, 0x7f, 0xe1, 0x9a, 0xaa, 0xf8, 0x38, 0x4c,
+ 0xfc, 0x6c, 0xc8, 0xe2, 0x7b, 0x66, 0x33, 0xfd,
+ 0xfc, 0x75, 0x4f, 0xad, 0x3f, 0x9d, 0xfd, 0x65,
+ 0x24, 0xca, 0x5f, 0xfd, 0x40, 0x1d, 0xcd, 0xb8,
+ 0xa5, 0xf9, 0xe1, 0x0a, 0xea, 0x67, 0x2f, 0x37,
+ 0xc5, 0x4b, 0x4b, 0xf2, 0x31, 0xe8, 0xa6, 0xe6,
+ 0x0b, 0xe7, 0xd1, 0xbd, 0x81, 0xc1, 0xca, 0xc7,
+ 0x73, 0xd4, 0xb3, 0xb1, 0xf9, 0xba, 0xcf, 0x16,
+ 0xcc, 0xbd, 0xfe, 0x2c, 0xfa, 0xa7, 0x9e, 0x50,
+ 0x1f, 0x52, 0x2f, 0x0e, 0x80, 0x3f, 0x47, 0x17,
+ 0xf3, 0x23, 0xa4, 0xe7, 0xb9, 0xf8, 0xcd, 0xca,
+ 0xae, 0xf2, 0xff, 0x0f, 0xa8, 0x33, 0x79, 0xfd,
+ 0x55, 0x6f, 0x6a, 0xf0, 0xb9, 0x13, 0x0e, 0xc4,
+ 0x91, 0xb9, 0x46, 0x5c, 0xc6, 0xcf, 0x51, 0x3f,
+ 0xff, 0x27, 0xf3, 0x92, 0xcd, 0x67, 0xb4, 0xdf,
+ 0xde, 0xf4, 0x3b, 0x33, 0xdc, 0xe5, 0x1f, 0xbe,
+ 0xe1, 0x5c, 0xab, 0xa7, 0xab, 0xbe, 0xf7, 0xa7,
+ 0xbe, 0xba, 0x37, 0x51, 0xfe, 0xde, 0xa7, 0x0e,
+ 0xdd, 0x6d, 0xa4, 0xfb, 0x0c, 0xc1, 0xb7, 0xd7,
+ 0xb9, 0xa1, 0xf9, 0xac, 0x1f, 0x7e, 0xa4, 0xdd,
+ 0x29, 0xcd, 0xcf, 0xa7, 0xe2, 0xef, 0xdf, 0x0f,
+ 0xce, 0x83, 0xc7, 0x26, 0x5b, 0x48, 0xd0, 0xdc,
+ 0xd6, 0x14, 0xa1, 0x2f, 0x61, 0x55, 0xb5, 0xfe,
+ 0xcf, 0x30, 0xce, 0x8f, 0x73, 0xf4, 0xbc, 0x21,
+ 0xc4, 0x5f, 0xf7, 0xe2, 0xf2, 0x27, 0x25, 0x99,
+ 0x1b, 0xf9, 0x0c, 0xda, 0x0a, 0x6f, 0x84, 0xdf,
+ 0xa8, 0xd7, 0xfd, 0x6b, 0x78, 0x39, 0xb8, 0x73,
+ 0x34, 0x3a, 0x6f, 0x32, 0xd0, 0x95, 0xbb, 0x2e,
+ 0x8a, 0xd7, 0x24, 0xf6, 0xeb, 0x51, 0x0f, 0x5f,
+ 0x6b, 0xc6, 0x72, 0x9f, 0x6d, 0x07, 0x4b, 0xef,
+ 0xdb, 0x3d, 0xb5, 0x70, 0xe8, 0xa5, 0xfa, 0xb9,
+ 0x01, 0xdc, 0x5f, 0xc3, 0x1d, 0xea, 0x97, 0xe3,
+ 0x39, 0x87, 0xfd, 0x27, 0x75, 0x7f, 0xfe, 0xf8,
+ 0xb8, 0xc2, 0x36, 0x3a, 0xcf, 0x67, 0xf0, 0x13,
+ 0x17, 0xd1, 0x5d, 0xe3, 0xca, 0xdc, 0x7f, 0x9d,
+ 0xaf, 0xea, 0xc7, 0x24, 0xf6, 0x57, 0xf1, 0xa5,
+ 0xfa, 0xab, 0xd2, 0xdc, 0xf7, 0xe5, 0x89, 0xea,
+ 0x7f, 0x8b, 0x93, 0x9f, 0x23, 0x0a, 0xe5, 0x5f,
+ 0x6b, 0x31, 0xaf, 0x2a, 0x93, 0x26, 0xfd, 0xd9,
+ 0x46, 0xfc, 0xf4, 0x4e, 0x67, 0x4e, 0x68, 0x9a,
+ 0xe3, 0xeb, 0xf7, 0x6e, 0x95, 0xbf, 0x0e, 0x20,
+ 0xee, 0x8a, 0xe5, 0xa8, 0x3f, 0x8b, 0x21, 0xcf,
+ 0x9c, 0x53, 0x3c, 0xf5, 0x3c, 0xe2, 0xc2, 0x39,
+ 0x5f, 0xf1, 0xeb, 0xc3, 0x5c, 0x78, 0xf8, 0x73,
+ 0x7d, 0x5e, 0xf3, 0xbe, 0xd5, 0x3f, 0xc8, 0x6f,
+ 0xcc, 0xc3, 0xb7, 0x7e, 0x59, 0x4e, 0xf7, 0x3d,
+ 0x08, 0x5f, 0x7b, 0xbe, 0xa5, 0xfe, 0x1e, 0x8e,
+ 0x1f, 0xf2, 0xcb, 0x57, 0x7e, 0xad, 0xe1, 0x9c,
+ 0xe6, 0xde, 0x90, 0x7e, 0xdd, 0xc1, 0x2f, 0x94,
+ 0x3f, 0x31, 0x0f, 0xbe, 0x82, 0xba, 0x59, 0xb0,
+ 0x5c, 0xf7, 0x77, 0x97, 0x39, 0xf2, 0x9a, 0xbd,
+ 0xf4, 0x75, 0xa6, 0x3f, 0x3a, 0xe4, 0xbc, 0x4b,
+ 0xbf, 0xff, 0x6c, 0xc3, 0x47, 0xbe, 0xcc, 0xd5,
+ 0x7d, 0x0e, 0x8d, 0xb2, 0x70, 0x69, 0x91, 0xf2,
+ 0xe3, 0x47, 0xce, 0x75, 0x6f, 0x8e, 0xe2, 0xeb,
+ 0x32, 0x7a, 0xdc, 0xb7, 0x73, 0x75, 0xf8, 0x61,
+ 0xea, 0xfa, 0xeb, 0x71, 0xf2, 0x07, 0xc3, 0x39,
+ 0x37, 0xa7, 0x8d, 0xf2, 0x13, 0x43, 0xc9, 0xdb,
+ 0x86, 0x6e, 0xaa, 0xaf, 0x5b, 0xc8, 0x97, 0x6b,
+ 0x4e, 0xe8, 0xa8, 0xd9, 0x84, 0x4e, 0x17, 0x2d,
+ 0x52, 0x7d, 0x4d, 0xa1, 0x2f, 0xac, 0x3c, 0x5e,
+ 0xfa, 0xe4, 0x43, 0xdc, 0x0d, 0xe8, 0xcd, 0xdc,
+ 0xc6, 0xac, 0xfa, 0xc4, 0x42, 0xf3, 0xdd, 0xf8,
+ 0x04, 0x33, 0xf2, 0x5d, 0x0b, 0x5e, 0xcd, 0xb9,
+ 0x47, 0x53, 0x9b, 0x79, 0x9f, 0xbf, 0x3f, 0xbe,
+ 0xd1, 0xfc, 0x8e, 0x5e, 0x4e, 0xb9, 0x2c, 0x7d,
+ 0x0b, 0x60, 0x6e, 0x34, 0x39, 0x44, 0xe7, 0x3b,
+ 0x01, 0xff, 0x91, 0x1c, 0xa9, 0xfe, 0xaa, 0x37,
+ 0xf3, 0xc8, 0x9a, 0x1f, 0x77, 0x83, 0x8f, 0xe1,
+ 0xbe, 0xeb, 0x07, 0x3f, 0xd5, 0xe7, 0x59, 0x87,
+ 0xe7, 0x43, 0xbd, 0xbf, 0x2e, 0xba, 0x93, 0xbe,
+ 0x4b, 0xf3, 0xdc, 0x8f, 0xd0, 0xf1, 0xa8, 0x96,
+ 0xaa, 0xe7, 0x2f, 0x4e, 0x5a, 0xc8, 0x39, 0xae,
+ 0xfb, 0x08, 0x66, 0x7f, 0x6b, 0xee, 0x6b, 0x1e,
+ 0x75, 0x91, 0x38, 0xac, 0x1a, 0xa9, 0x7c, 0xf9,
+ 0x91, 0xf7, 0xbd, 0x9f, 0xa5, 0x7e, 0x72, 0x1c,
+ 0xbe, 0xb6, 0x67, 0xa2, 0x7e, 0x4f, 0xe8, 0xc3,
+ 0x3d, 0x9f, 0xbb, 0xa2, 0xfe, 0x3d, 0xe2, 0x8a,
+ 0x85, 0x49, 0x19, 0xfa, 0xbd, 0xc2, 0x83, 0xf3,
+ 0xef, 0x79, 0x02, 0x5d, 0x31, 0xb7, 0xd1, 0xfb,
+ 0x69, 0x9e, 0xe8, 0x86, 0x39, 0x44, 0x9d, 0x1a,
+ 0xfa, 0x19, 0xbe, 0xd8, 0x34, 0xa7, 0x5f, 0x76,
+ 0xae, 0xa3, 0x79, 0xfc, 0x34, 0xe6, 0x45, 0x6b,
+ 0xca, 0xe8, 0xfd, 0xad, 0xa8, 0xa7, 0xf9, 0x0d,
+ 0x55, 0xaf, 0xd2, 0x88, 0x93, 0x80, 0x41, 0xf8,
+ 0x38, 0x93, 0x4a, 0x5c, 0x3a, 0x46, 0xab, 0xbe,
+ 0x5d, 0x26, 0xae, 0xf6, 0x6e, 0x51, 0x3d, 0xcc,
+ 0x45, 0x2f, 0x3b, 0x9c, 0x96, 0x1f, 0xff, 0x84,
+ 0x3c, 0x48, 0x8e, 0x53, 0xbf, 0x36, 0x18, 0xbd,
+ 0x75, 0x7c, 0xa4, 0xfe, 0x29, 0x0b, 0x3d, 0x3e,
+ 0xea, 0x2a, 0xbf, 0xe7, 0x17, 0x63, 0xa1, 0x4e,
+ 0x49, 0xf9, 0xfb, 0xa5, 0xf8, 0xb2, 0x93, 0x51,
+ 0xba, 0x9f, 0xe5, 0xcc, 0x71, 0xf7, 0xd6, 0x53,
+ 0xbf, 0x57, 0x89, 0x7c, 0x89, 0x9a, 0x2b, 0xbf,
+ 0xd4, 0x8c, 0xdf, 0x95, 0x32, 0x26, 0xab, 0xbe,
+ 0x5e, 0xe6, 0xf7, 0x84, 0xa0, 0x62, 0x9a, 0xbf,
+ 0x5d, 0x21, 0xce, 0x06, 0x2d, 0x57, 0xbe, 0xbe,
+ 0x26, 0xbf, 0x5c, 0x6b, 0x68, 0x7d, 0x5d, 0x38,
+ 0xd7, 0x93, 0xa5, 0xd5, 0xcf, 0x4c, 0xa0, 0xff,
+ 0x7e, 0xbb, 0x4e, 0xf5, 0x38, 0x0d, 0x1f, 0xd2,
+ 0xdb, 0x51, 0x7a, 0xb5, 0x17, 0xbd, 0x8e, 0xbd,
+ 0xad, 0x79, 0xee, 0x02, 0x74, 0xae, 0x54, 0x36,
+ 0x73, 0xfd, 0x7f, 0xe6, 0x5d, 0x57, 0xdd, 0xa4,
+ 0x37, 0x4e, 0xdc, 0x53, 0x8b, 0xca, 0x8a, 0xbf,
+ 0xad, 0xe8, 0xc8, 0xc0, 0xb0, 0x29, 0x3a, 0x9f,
+ 0xaa, 0x16, 0xda, 0xd5, 0x93, 0xbf, 0x7c, 0x0f,
+ 0xdd, 0xbb, 0xff, 0x48, 0xf5, 0xf0, 0x21, 0x73,
+ 0xd2, 0xa6, 0x2d, 0x74, 0x3e, 0xe3, 0xf1, 0x29,
+ 0x35, 0x36, 0xea, 0xf7, 0x03, 0x47, 0xe2, 0xbe,
+ 0x47, 0xa8, 0xfa, 0x8f, 0x19, 0xe8, 0xd8, 0xb5,
+ 0xce, 0xea, 0x1f, 0x63, 0x88, 0xa3, 0xc8, 0xdd,
+ 0xf8, 0x4c, 0x53, 0x88, 0x5e, 0x5e, 0x3f, 0x24,
+ 0x23, 0xe8, 0x83, 0xbe, 0x8d, 0xce, 0xd6, 0xfd,
+ 0x5c, 0xa7, 0xee, 0x54, 0xfc, 0x44, 0xfd, 0x54,
+ 0x32, 0xfd, 0xc8, 0x8a, 0xcd, 0xc4, 0x89, 0xd9,
+ 0x47, 0x9d, 0xed, 0xb4, 0x5d, 0xf5, 0x79, 0x3d,
+ 0xf9, 0x5b, 0xe5, 0xac, 0xf2, 0xa7, 0x16, 0xfb,
+ 0x88, 0xf5, 0xd1, 0xbc, 0xba, 0x17, 0x7d, 0xf4,
+ 0xc9, 0x07, 0x93, 0xe0, 0xaf, 0x99, 0xef, 0x6d,
+ 0x7c, 0x85, 0x4e, 0x9b, 0xe2, 0xcc, 0xbb, 0xdc,
+ 0xd7, 0xaa, 0xbe, 0x2e, 0xe6, 0xfe, 0x9e, 0x04,
+ 0x4b, 0xef, 0x7a, 0x90, 0x7f, 0x6f, 0x42, 0x54,
+ 0xbf, 0x5c, 0xc8, 0xcb, 0x5e, 0x53, 0x63, 0xe0,
+ 0xbe, 0xf8, 0xd2, 0x65, 0x71, 0xcc, 0x99, 0x4c,
+ 0x13, 0xf6, 0x73, 0xeb, 0x37, 0xcd, 0xf3, 0x5d,
+ 0xf0, 0x93, 0x75, 0x3d, 0xb4, 0x9e, 0xd3, 0xe8,
+ 0xe4, 0x30, 0x77, 0xfd, 0x9e, 0xbb, 0x88, 0x3a,
+ 0xba, 0x33, 0x9f, 0xdf, 0x3d, 0xcc, 0x38, 0x7e,
+ 0xf7, 0xed, 0x96, 0xa5, 0xfd, 0x76, 0x27, 0xae,
+ 0x3e, 0x88, 0x55, 0xff, 0x3b, 0x97, 0xfd, 0xe6,
+ 0x95, 0xd7, 0xbc, 0xad, 0x23, 0xf7, 0xb1, 0xdd,
+ 0x5b, 0xf3, 0xc5, 0x68, 0xe6, 0xe4, 0x0b, 0x02,
+ 0x0e, 0xc1, 0x27, 0x70, 0xdf, 0x0b, 0x9c, 0xd4,
+ 0x0f, 0x37, 0xa6, 0xbe, 0x4e, 0xee, 0xaa, 0x7a,
+ 0xb8, 0x19, 0xdd, 0x48, 0xed, 0xae, 0xfa, 0x1a,
+ 0x46, 0x3f, 0x7f, 0x7f, 0x8a, 0xea, 0xfb, 0x7e,
+ 0xf2, 0x3b, 0x32, 0x41, 0xfe, 0xa2, 0x0d, 0x3a,
+ 0xec, 0x18, 0x31, 0x4b, 0xfb, 0xe3, 0x7d, 0xa1,
+ 0x09, 0xca, 0xdf, 0x3b, 0xbe, 0xff, 0x03, 0x4e,
+ 0x9a, 0x03, 0x4d
+};
+
+#endif
diff --git a/test/common_plat/validation/api/cpumask/.gitignore b/test/validation/api/cpumask/.gitignore
index 655a1640f..655a1640f 100644
--- a/test/common_plat/validation/api/cpumask/.gitignore
+++ b/test/validation/api/cpumask/.gitignore
diff --git a/test/validation/api/cpumask/Makefile.am b/test/validation/api/cpumask/Makefile.am
new file mode 100644
index 000000000..3872c1bd4
--- /dev/null
+++ b/test/validation/api/cpumask/Makefile.am
@@ -0,0 +1,5 @@
+include ../Makefile.inc
+
+test_PROGRAMS = cpumask_main
+cpumask_main_SOURCES = cpumask.c
+LDADD += $(LIBCPUMASK_COMMON)
diff --git a/test/validation/api/cpumask/cpumask.c b/test/validation/api/cpumask/cpumask.c
new file mode 100644
index 000000000..db500df3a
--- /dev/null
+++ b/test/validation/api/cpumask/cpumask.c
@@ -0,0 +1,200 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+
+#include "odp_cunit_common.h"
+#include "mask_common.h"
+
+static int cpumask_max_count(void)
+{
+ odp_cpumask_t mask;
+
+ odp_cpumask_setall(&mask);
+
+ return odp_cpumask_count(&mask);
+}
+
+static void cpumask_test_odp_cpumask_def_control(void)
+{
+ odp_cpumask_t mask;
+ int num, count, all;
+ int max = cpumask_max_count();
+ int request = 7;
+
+ CU_ASSERT_FATAL(max > 1);
+
+ if (request > max)
+ request = max - 1;
+
+ all = odp_cpumask_default_control(&mask, 0);
+ num = all;
+ count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(num > 0);
+ CU_ASSERT(num == count);
+ CU_ASSERT(num <= max);
+
+ num = odp_cpumask_default_control(&mask, max);
+ count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(num > 0);
+ CU_ASSERT(num == count);
+ CU_ASSERT(num <= max);
+ CU_ASSERT(num == all);
+
+ num = odp_cpumask_default_control(&mask, 1);
+ count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(num == 1);
+ CU_ASSERT(num == count);
+
+ num = odp_cpumask_default_control(&mask, request);
+ count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(num > 0);
+ CU_ASSERT(num <= request);
+ CU_ASSERT(num == count);
+
+ CU_ASSERT(odp_cpumask_default_control(NULL, request) == num);
+ CU_ASSERT(odp_cpumask_default_control(NULL, 0) == all);
+ CU_ASSERT(odp_cpumask_default_control(NULL, 1) == 1);
+}
+
+static void cpumask_test_odp_cpumask_def_worker(void)
+{
+ odp_cpumask_t mask;
+ int num, count, all;
+ int max = cpumask_max_count();
+ int request = 7;
+
+ CU_ASSERT_FATAL(max > 1);
+
+ if (request > max)
+ request = max - 1;
+
+ all = odp_cpumask_default_worker(&mask, 0);
+ num = all;
+ count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(num > 0);
+ CU_ASSERT(num == count);
+ CU_ASSERT(num <= max);
+
+ num = odp_cpumask_default_worker(&mask, max);
+ count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(num > 0);
+ CU_ASSERT(num == count);
+ CU_ASSERT(num <= max);
+ CU_ASSERT(num == all);
+
+ num = odp_cpumask_default_worker(&mask, 1);
+ count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(num == 1);
+ CU_ASSERT(num == count);
+
+ num = odp_cpumask_default_worker(&mask, request);
+ count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(num > 0);
+ CU_ASSERT(num <= request);
+ CU_ASSERT(num == count);
+
+ CU_ASSERT(odp_cpumask_default_worker(NULL, request) == num);
+ CU_ASSERT(odp_cpumask_default_worker(NULL, 0) == all);
+ CU_ASSERT(odp_cpumask_default_worker(NULL, 1) == 1);
+}
+
+static void cpumask_test_odp_cpumask_def(void)
+{
+ odp_cpumask_t mask, all_mask, overlap;
+ int count, all, num_worker, num_control, request;
+ int max = cpumask_max_count();
+ int cpu_count = odp_cpu_count();
+
+ all = odp_cpumask_all_available(&all_mask);
+ count = odp_cpumask_count(&all_mask);
+
+ CU_ASSERT_FATAL(cpu_count > 0);
+ CU_ASSERT_FATAL(all > 0);
+ CU_ASSERT(all == cpu_count);
+ CU_ASSERT(all <= max);
+ CU_ASSERT(all == count);
+
+ request = all - 1;
+ if (request == 0)
+ request = 1;
+
+ num_worker = odp_cpumask_default_worker(&mask, request);
+ count = odp_cpumask_count(&mask);
+ CU_ASSERT(num_worker > 0);
+ CU_ASSERT(num_worker <= request);
+ CU_ASSERT(num_worker == count);
+
+ /* Check that CPUs are in the all CPUs mask */
+ odp_cpumask_zero(&overlap);
+ odp_cpumask_and(&overlap, &mask, &all_mask);
+ CU_ASSERT(odp_cpumask_count(&overlap) == num_worker);
+
+ num_control = odp_cpumask_default_control(&mask, 1);
+ count = odp_cpumask_count(&mask);
+ CU_ASSERT(num_control == 1);
+ CU_ASSERT(num_control == count);
+
+ odp_cpumask_zero(&overlap);
+ odp_cpumask_and(&overlap, &mask, &all_mask);
+ CU_ASSERT(odp_cpumask_count(&overlap) == num_control);
+
+ CU_ASSERT(odp_cpumask_default_worker(NULL, request) == num_worker);
+ CU_ASSERT(odp_cpumask_default_worker(NULL, 0) <= all);
+ CU_ASSERT(odp_cpumask_default_control(NULL, 0) <= all);
+}
+
+odp_testinfo_t cpumask_suite[] = {
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_to_from_str),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_equal),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_zero),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_set),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_clr),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_isset),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_count),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_and),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_or),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_xor),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_copy),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_first),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_last),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_next),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_setall),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_def_control),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_def_worker),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_def),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t cpumask_suites[] = {
+ {"Cpumask", NULL, NULL, cpumask_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(cpumask_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/crypto/.gitignore b/test/validation/api/crypto/.gitignore
index 0ac55e35e..0ac55e35e 100644
--- a/test/common_plat/validation/api/crypto/.gitignore
+++ b/test/validation/api/crypto/.gitignore
diff --git a/test/validation/api/crypto/Makefile.am b/test/validation/api/crypto/Makefile.am
new file mode 100644
index 000000000..ead21a336
--- /dev/null
+++ b/test/validation/api/crypto/Makefile.am
@@ -0,0 +1,14 @@
+include ../Makefile.inc
+
+test_PROGRAMS = crypto_main
+crypto_main_SOURCES = \
+ odp_crypto_test_inp.c \
+ crypto_op_test.c \
+ crypto_op_test.h \
+ test_vectors.h \
+ test_vectors_len.h \
+ test_vector_defs.h \
+ util.h \
+ util.c
+
+PRELDADD += $(LIBPACKET_COMMON)
diff --git a/test/validation/api/crypto/crypto_op_test.c b/test/validation/api/crypto/crypto_op_test.c
new file mode 100644
index 000000000..ae1465581
--- /dev/null
+++ b/test/validation/api/crypto/crypto_op_test.c
@@ -0,0 +1,614 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021-2024, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include <packet_common.h>
+#include "crypto_op_test.h"
+#include "util.h"
+
+#define MAX_FAILURE_PRINTS 20
+
+#define MAX_IGNORED_RANGES 3
+
+/*
+ * Output packet parts that we ignore since they have undefined values
+ */
+typedef struct ignore_t {
+ uint32_t byte_offset; /* offset to a byte which has bits to be ignored */
+ uint8_t byte_mask; /* mask of ignored bits in the byte */
+ struct {
+ uint32_t offset;
+ uint32_t length;
+ } ranges[MAX_IGNORED_RANGES]; /* byte ranges to be ignored */
+ uint32_t num_ranges;
+} ignore_t;
+
+/* Add room for bytes that are not included in ref->length */
+#define MAX_EXP_DATA_LEN (MAX_DATA_LEN + 200)
+
+/*
+ * Expected packet data
+ */
+typedef struct expected_t {
+ uint8_t data[MAX_EXP_DATA_LEN];
+ uint32_t len;
+ ignore_t ignore;
+} expected_t;
+
+int crypto_op(odp_packet_t pkt_in,
+ odp_packet_t *pkt_out,
+ odp_bool_t *ok,
+ const odp_crypto_packet_op_param_t *op_params,
+ odp_crypto_op_type_t session_op_type,
+ odp_crypto_op_type_t op_type)
+{
+ int rc;
+ odp_event_t event, event2;
+ odp_crypto_packet_result_t result;
+ odp_event_subtype_t subtype;
+ odp_packet_t orig_pkt_out;
+
+ if (op_type == ODP_CRYPTO_OP_TYPE_LEGACY)
+ *pkt_out = pkt_in;
+ else if (op_type == ODP_CRYPTO_OP_TYPE_BASIC)
+ *pkt_out = ODP_PACKET_INVALID;
+ orig_pkt_out = *pkt_out;
+
+ if (suite_context.op_mode == ODP_CRYPTO_SYNC) {
+ rc = odp_crypto_op(&pkt_in, pkt_out, op_params, 1);
+ if (rc <= 0) {
+ CU_FAIL("Failed odp_crypto_packet_op()");
+ goto fail;
+ }
+ } else {
+ odp_packet_t *out_param = pkt_out;
+
+ if (session_op_type == ODP_CRYPTO_OP_TYPE_BASIC)
+ out_param = NULL;
+
+ rc = odp_crypto_op_enq(&pkt_in, out_param, op_params, 1);
+ if (rc <= 0) {
+ CU_FAIL("Failed odp_crypto_op_enq()");
+ goto fail;
+ }
+
+ /* Get crypto completion event from compl_queue. */
+ CU_ASSERT_FATAL(NULL != suite_context.compl_queue_deq);
+ do {
+ event = suite_context.compl_queue_deq();
+ } while (event == ODP_EVENT_INVALID);
+
+ CU_ASSERT(ODP_EVENT_PACKET == odp_event_type(event));
+ CU_ASSERT(ODP_EVENT_PACKET_CRYPTO == odp_event_subtype(event));
+ CU_ASSERT(ODP_EVENT_PACKET == odp_event_types(event, &subtype));
+ CU_ASSERT(ODP_EVENT_PACKET_CRYPTO == subtype);
+
+ *pkt_out = odp_crypto_packet_from_event(event);
+ }
+
+ if (op_type != ODP_CRYPTO_OP_TYPE_BASIC)
+ CU_ASSERT(*pkt_out == orig_pkt_out);
+
+ event = odp_packet_to_event(*pkt_out);
+ CU_ASSERT(ODP_EVENT_PACKET == odp_event_type(event));
+ CU_ASSERT(ODP_EVENT_PACKET_CRYPTO == odp_event_subtype(event));
+ CU_ASSERT(ODP_EVENT_PACKET == odp_event_types(event, &subtype));
+ CU_ASSERT(ODP_EVENT_PACKET_CRYPTO == subtype);
+ CU_ASSERT(odp_packet_subtype(*pkt_out) == ODP_EVENT_PACKET_CRYPTO);
+
+ event2 = odp_crypto_packet_to_event(*pkt_out);
+ CU_ASSERT(ODP_EVENT_PACKET == odp_event_type(event2));
+ CU_ASSERT(ODP_EVENT_PACKET_CRYPTO == odp_event_subtype(event2));
+ CU_ASSERT(odp_event_to_u64(event) == odp_event_to_u64(event2));
+
+ rc = odp_crypto_result(&result, *pkt_out);
+ if (rc < -1)
+ CU_FAIL("Failed odp_crypto_result()");
+ CU_ASSERT(rc == 0 || rc == -1);
+
+ if (op_type == ODP_CRYPTO_OP_TYPE_OOP &&
+ suite_context.op_mode == ODP_CRYPTO_ASYNC)
+ CU_ASSERT(result.pkt_in == pkt_in);
+
+ *ok = (rc == 0);
+
+ return 0;
+fail:
+ odp_packet_free(pkt_in);
+ if (op_type == ODP_CRYPTO_OP_TYPE_OOP)
+ odp_packet_free(*pkt_out);
+ return -1;
+}
+
+/*
+ * Try to adjust packet so that the first segment holds 'first_seg_len' bytes
+ * of packet data (+ tailroom if first_seg_len is longer than the packet).
+ *
+ * If 'first_seg_len' is zero, do not try to add segments but make headroom
+ * zero.
+ *
+ * Packet data bytes are not preserved.
+ */
+static void adjust_segments(odp_packet_t *pkt, uint32_t first_seg_len)
+{
+ uint32_t shift;
+
+ shift = odp_packet_headroom(*pkt) + first_seg_len;
+
+ if (odp_packet_extend_head(pkt, shift, NULL, NULL) < 0) {
+ CU_FAIL("odp_packet_extend_head() failed\n");
+ return;
+ }
+ if (odp_packet_trunc_tail(pkt, shift, NULL, NULL) < 0) {
+ CU_FAIL("odp_packet_trunc_tail() failed\n");
+ return;
+ }
+ /*
+ * ODP API does not seem to guarantee that we ever have a multi-segment
+ * packet at this point, but we can print a message about it.
+ */
+ if (first_seg_len == 1 &&
+ first_seg_len != odp_packet_seg_len(*pkt))
+ printf("Could not create a segmented packet for testing.\n");
+}
+
+static void write_header_and_trailer(odp_packet_t pkt,
+ uint32_t header_len, uint32_t trailer_len)
+{
+ uint32_t trailer_offset = odp_packet_len(pkt) - trailer_len;
+ uint32_t max_len = header_len > trailer_len ? header_len : trailer_len;
+ uint8_t buffer[max_len];
+ int rc;
+
+ fill_with_pattern(buffer, sizeof(buffer));
+
+ rc = odp_packet_copy_from_mem(pkt, 0, header_len, buffer);
+ CU_ASSERT(rc == 0);
+ rc = odp_packet_copy_from_mem(pkt, trailer_offset, trailer_len, buffer);
+ CU_ASSERT(rc == 0);
+}
+
+static void prepare_crypto_ranges(const crypto_op_test_param_t *param,
+ odp_packet_data_range_t *cipher_range,
+ odp_packet_data_range_t *auth_range)
+{
+ uint32_t c_scale = param->session.cipher_range_in_bits ? 8 : 1;
+ uint32_t a_scale = param->session.auth_range_in_bits ? 8 : 1;
+
+ *cipher_range = param->cipher_range;
+ *auth_range = param->auth_range;
+ cipher_range->offset += c_scale * param->header_len;
+ auth_range->offset += a_scale * param->header_len;
+}
+
+static int prepare_input_packet(const crypto_op_test_param_t *param,
+ odp_packet_t *pkt_in)
+{
+ crypto_test_reference_t *ref = param->ref;
+ uint32_t reflength = ref_length_in_bytes(ref);
+ odp_packet_t pkt;
+ uint32_t digest_offset = param->digest_offset;
+ uint32_t pkt_len;
+
+ pkt_len = param->header_len + reflength + param->trailer_len;
+ if (param->digest_offset == param->header_len + reflength)
+ pkt_len += ref->digest_length;
+ if (pkt_len == 0)
+ pkt_len = 1;
+
+ pkt = odp_packet_alloc(suite_context.pool, pkt_len);
+
+ CU_ASSERT(pkt != ODP_PACKET_INVALID);
+ if (pkt == ODP_PACKET_INVALID)
+ return -1;
+
+ if (param->adjust_segmentation)
+ adjust_segments(&pkt, param->first_seg_len);
+
+ write_header_and_trailer(pkt, param->header_len, param->trailer_len);
+
+ if (param->session.op == ODP_CRYPTO_OP_ENCODE) {
+ odp_packet_copy_from_mem(pkt, param->header_len,
+ reflength, ref->plaintext);
+ } else {
+ odp_packet_copy_from_mem(pkt, param->header_len,
+ reflength, ref->ciphertext);
+ odp_packet_copy_from_mem(pkt, digest_offset,
+ ref->digest_length,
+ ref->digest);
+ if (param->wrong_digest) {
+ uint8_t byte = ~ref->digest[0];
+
+ odp_packet_copy_from_mem(pkt, digest_offset, 1, &byte);
+ }
+ }
+ *pkt_in = pkt;
+ return 0;
+}
+
+static void prepare_oop_output_packet(const crypto_op_test_param_t *param,
+ odp_packet_t *pkt_out,
+ uint32_t pkt_len)
+{
+ uint32_t reflength = ref_length_in_bytes(param->ref);
+ const uint32_t oop_extra_len = 5;
+ uint32_t trl_len;
+ uint32_t hdr_len;
+ uint32_t oop_len;
+
+ oop_len = pkt_len + param->oop_shift + oop_extra_len;
+ *pkt_out = odp_packet_alloc(suite_context.pool, oop_len);
+ CU_ASSERT_FATAL(*pkt_out != ODP_PACKET_INVALID);
+
+ uint8_t buf[oop_len];
+
+ memset(buf, 0x55, sizeof(buf));
+ odp_packet_copy_from_mem(*pkt_out, 0, sizeof(buf), buf);
+
+ hdr_len = param->header_len + param->oop_shift;
+ trl_len = oop_len - hdr_len - reflength;
+
+ write_header_and_trailer(*pkt_out, hdr_len, trl_len);
+
+ /* have different metadata than in the input packet */
+ memset(odp_packet_user_area(*pkt_out), 0xab,
+ odp_packet_user_area_size(*pkt_out));
+}
+
+static int is_packet_data_equal(odp_packet_t pkt_1, odp_packet_t pkt_2)
+{
+ uint32_t len = odp_packet_len(pkt_1);
+ uint8_t buf_1[len];
+ uint8_t buf_2[len];
+
+ if (len != odp_packet_len(pkt_2) ||
+ odp_packet_copy_to_mem(pkt_1, 0, len, buf_1) ||
+ odp_packet_copy_to_mem(pkt_2, 0, len, buf_2))
+ return 0;
+
+ return !memcmp(buf_1, buf_2, len);
+}
+
+static int is_in_range(uint32_t offs, uint32_t range_offs, uint32_t range_len)
+{
+ return offs >= range_offs && offs < range_offs + range_len;
+}
+
+static void add_ignored_range(ignore_t *ign, uint32_t offs, uint32_t len)
+{
+ if (len == 0)
+ return;
+ CU_ASSERT_FATAL(ign->num_ranges < MAX_IGNORED_RANGES);
+ ign->ranges[ign->num_ranges].offset = offs;
+ ign->ranges[ign->num_ranges].length = len;
+ ign->num_ranges++;
+}
+
+static void clear_ignored_data(const ignore_t *ign, uint8_t *data, uint32_t data_len)
+{
+ CU_ASSERT_FATAL(ign->byte_offset < data_len);
+ data[ign->byte_offset] &= ~ign->byte_mask;
+
+ for (uint32_t n = 0; n < ign->num_ranges; n++) {
+ uint32_t offset = ign->ranges[n].offset;
+ uint32_t length = ign->ranges[n].length;
+
+ CU_ASSERT(offset + length <= data_len);
+ memset(data + offset, 0, length);
+ }
+}
+
+static void prepare_ignore_info(const crypto_op_test_param_t *param,
+ uint32_t shift,
+ uint32_t cipher_offset,
+ uint32_t cipher_len,
+ uint32_t auth_offset,
+ uint32_t auth_len,
+ ignore_t *ignore)
+{
+ memset(ignore, 0, sizeof(*ignore));
+
+ /*
+ * Leftover bits in the last byte of the cipher range of bit mode
+ * ciphers have undefined values.
+ */
+ if (param->session.cipher_range_in_bits &&
+ param->ref->cipher != ODP_CIPHER_ALG_NULL) {
+ uint8_t leftover_bits = ref_length_in_bits(param->ref) % 8;
+
+ ignore->byte_offset = cipher_offset + cipher_len - 1 + shift;
+ if (leftover_bits > 0)
+ ignore->byte_mask = ~(0xff << (8 - leftover_bits));
+ else
+ ignore->byte_mask = 0;
+ }
+
+ /*
+ * In decode sessions the bytes in the hash location have
+ * undefined values.
+ */
+ if (param->ref->auth != ODP_AUTH_ALG_NULL &&
+ param->session.op == ODP_CRYPTO_OP_DECODE) {
+ uint32_t offs = param->digest_offset;
+
+ if (param->op_type != ODP_CRYPTO_OP_TYPE_OOP ||
+ is_in_range(offs, cipher_offset, cipher_len) ||
+ is_in_range(offs, auth_offset, auth_len)) {
+ add_ignored_range(ignore,
+ param->digest_offset + shift,
+ param->ref->digest_length);
+ }
+ }
+
+ /* Decrypted bytes are undefined if authentication fails. */
+ if (param->session.op == ODP_CRYPTO_OP_DECODE &&
+ param->wrong_digest) {
+ add_ignored_range(ignore, cipher_offset + shift, cipher_len);
+ /* In OOP case, auth range may not get copied */
+ if (param->op_type == ODP_CRYPTO_OP_TYPE_OOP)
+ add_ignored_range(ignore, auth_offset + shift, auth_len);
+ }
+}
+
+static void prepare_expected_data(const crypto_op_test_param_t *param,
+ const odp_packet_data_range_t *cipher_range,
+ const odp_packet_data_range_t *auth_range,
+ odp_packet_t pkt_in,
+ odp_packet_t pkt_out,
+ expected_t *ex)
+{
+ uint32_t digest_offset = param->digest_offset;
+ uint32_t cipher_offset = cipher_range->offset;
+ uint32_t cipher_len = cipher_range->length;
+ uint32_t auth_offset = auth_range->offset;
+ uint32_t auth_len = auth_range->length;
+ const int32_t shift = param->op_type == ODP_CRYPTO_OP_TYPE_OOP ? param->oop_shift
+ : 0;
+ const odp_packet_t base_pkt = param->op_type == ODP_CRYPTO_OP_TYPE_OOP ? pkt_out
+ : pkt_in;
+ int rc;
+ uint32_t cipher_offset_in_ref = param->cipher_range.offset;
+
+ if (param->session.op == ODP_CRYPTO_OP_ENCODE)
+ digest_offset += shift;
+
+ if (param->session.cipher_range_in_bits) {
+ cipher_offset_in_ref /= 8;
+ cipher_offset /= 8;
+ cipher_len = (cipher_len + 7) / 8;
+ }
+ if (param->session.auth_range_in_bits) {
+ auth_offset /= 8;
+ auth_len = (auth_len + 7) / 8;
+ }
+ if (param->ref->auth == ODP_AUTH_ALG_AES_GCM ||
+ param->ref->auth == ODP_AUTH_ALG_AES_CCM ||
+ param->ref->auth == ODP_AUTH_ALG_CHACHA20_POLY1305) {
+ /* auth range is ignored with AEAD algorithms */
+ auth_len = 0;
+ }
+
+ /* copy all data from base packet */
+ ex->len = odp_packet_len(base_pkt);
+ CU_ASSERT_FATAL(ex->len <= sizeof(ex->data));
+ rc = odp_packet_copy_to_mem(base_pkt, 0, ex->len, ex->data);
+ CU_ASSERT(rc == 0);
+
+ if (param->op_type == ODP_CRYPTO_OP_TYPE_OOP && auth_len > 0) {
+ /* copy auth range from input packet */
+ rc = odp_packet_copy_to_mem(pkt_in, auth_offset, auth_len,
+ ex->data + auth_offset + shift);
+ CU_ASSERT(rc == 0);
+ }
+
+ if (param->session.op == ODP_CRYPTO_OP_ENCODE) {
+ /* copy hash first */
+ memcpy(ex->data + digest_offset,
+ param->ref->digest,
+ param->ref->digest_length);
+ /*
+ * Copy ciphertext, possibly overwriting hash.
+ * The other order (hash overwriting some cipher
+ * text) does not work in any real use case anyway.
+ */
+ memcpy(ex->data + cipher_offset + shift,
+ param->ref->ciphertext + cipher_offset_in_ref,
+ cipher_len);
+ } else {
+ memcpy(ex->data + cipher_offset + shift,
+ param->ref->plaintext + cipher_offset_in_ref,
+ cipher_len);
+ }
+
+ prepare_ignore_info(param, shift,
+ cipher_offset, cipher_len,
+ auth_offset, auth_len,
+ &ex->ignore);
+}
+
+static void print_data(const char *title, uint8_t *data, uint32_t len)
+{
+ static uint64_t limit;
+
+ if (limit++ > MAX_FAILURE_PRINTS)
+ return;
+
+ printf("%s\n", title);
+ for (uint32_t n = 0; n < len ; n++) {
+ printf(" %02x", data[n]);
+ if ((n + 1) % 16 == 0)
+ printf("\n");
+ }
+ printf("\n");
+}
+
+static void check_output_packet_data(odp_packet_t pkt, expected_t *ex)
+{
+ int rc;
+ uint8_t pkt_data[ex->len];
+
+ CU_ASSERT(odp_packet_len(pkt) == ex->len);
+ rc = odp_packet_copy_to_mem(pkt, 0, ex->len, pkt_data);
+ CU_ASSERT(rc == 0);
+
+ clear_ignored_data(&ex->ignore, pkt_data, sizeof(pkt_data));
+ clear_ignored_data(&ex->ignore, ex->data, sizeof(ex->data));
+
+ if (memcmp(pkt_data, ex->data, ex->len)) {
+ CU_FAIL("packet data does not match expected data");
+ print_data("packet:", pkt_data, ex->len);
+ print_data("expected:", ex->data, ex->len);
+ }
+}
+
+static int is_digest_in_cipher_range(const crypto_op_test_param_t *param,
+ const odp_crypto_packet_op_param_t *op_params)
+{
+ /*
+ * Do not use op_params.hash_result_offset here as it refers to
+ * the output packet which (in the OOP case) might be shifted
+ * relative to the input packet.
+ */
+ uint32_t d_offset = param->digest_offset;
+
+ if (param->session.cipher_range_in_bits)
+ d_offset *= 8;
+
+ return d_offset >= op_params->cipher_range.offset &&
+ d_offset < op_params->cipher_range.offset + op_params->cipher_range.length;
+}
+
+static void do_test_crypto_op(const crypto_op_test_param_t *param)
+{
+ odp_bool_t ok = false;
+ odp_packet_t pkt;
+ odp_packet_t pkt_copy = ODP_PACKET_INVALID;
+ odp_packet_t pkt_out = ODP_PACKET_INVALID;
+ test_packet_md_t md_in, md_out, md_out_orig;
+ expected_t expected;
+ odp_crypto_packet_op_param_t op_params = {
+ .session = param->session.session,
+ .cipher_iv_ptr = param->ref->cipher_iv,
+ .auth_iv_ptr = param->ref->auth_iv,
+ .hash_result_offset = param->digest_offset,
+ .aad_ptr = param->ref->aad,
+ .dst_offset_shift = param->oop_shift,
+ .null_crypto = param->null_crypto,
+ };
+ odp_bool_t failure_allowed = false;
+
+ /*
+ * Test detection of wrong digest value in input packet
+ * only when decoding and using non-null auth algorithm.
+ */
+ if (param->wrong_digest &&
+ (param->ref->auth == ODP_AUTH_ALG_NULL ||
+ param->session.op == ODP_CRYPTO_OP_ENCODE))
+ return;
+
+ prepare_crypto_ranges(param, &op_params.cipher_range, &op_params.auth_range);
+ if (prepare_input_packet(param, &pkt))
+ return;
+
+ if (param->op_type == ODP_CRYPTO_OP_TYPE_OOP) {
+ prepare_oop_output_packet(param, &pkt_out, odp_packet_len(pkt));
+
+ pkt_copy = odp_packet_copy(pkt, suite_context.pool);
+ CU_ASSERT_FATAL(pkt_copy != ODP_PACKET_INVALID);
+ test_packet_get_md(pkt_out, &md_out_orig);
+
+ /* Non-zero-length ranges do not have to be supported. */
+ if ((param->ref->cipher == ODP_CIPHER_ALG_NULL &&
+ op_params.cipher_range.length != 0))
+ failure_allowed = true;
+ if ((param->ref->auth == ODP_AUTH_ALG_NULL &&
+ op_params.auth_range.length != 0))
+ failure_allowed = true;
+ }
+
+ prepare_expected_data(param, &op_params.cipher_range, &op_params.auth_range,
+ pkt, pkt_out, &expected);
+
+ if (param->op_type == ODP_CRYPTO_OP_TYPE_OOP &&
+ param->session.op == ODP_CRYPTO_OP_ENCODE) {
+ /*
+ * In this type of sessions digest offset is an offset to the output
+ * packet, so apply the shift.
+ */
+ op_params.hash_result_offset += param->oop_shift;
+ }
+
+ test_packet_set_md(pkt);
+ test_packet_get_md(pkt, &md_in);
+
+ if (crypto_op(pkt, &pkt_out, &ok, &op_params,
+ param->session.op_type, param->op_type))
+ return;
+
+ test_packet_get_md(pkt_out, &md_out);
+
+ if (param->op_type == ODP_CRYPTO_OP_TYPE_OOP) {
+ test_packet_md_t md;
+
+ /* check that input packet has not changed */
+ CU_ASSERT(is_packet_data_equal(pkt, pkt_copy));
+ odp_packet_free(pkt_copy);
+ test_packet_get_md(pkt, &md);
+ CU_ASSERT(test_packet_is_md_equal(&md, &md_in));
+ odp_packet_free(pkt);
+
+ /* check that metadata of output packet has not changed */
+ CU_ASSERT(test_packet_is_md_equal(&md_out, &md_out_orig));
+ } else {
+ CU_ASSERT(test_packet_is_md_equal(&md_out, &md_in));
+ }
+
+ if (param->ref->cipher != ODP_CIPHER_ALG_NULL &&
+ param->ref->auth != ODP_AUTH_ALG_NULL &&
+ is_digest_in_cipher_range(param, &op_params)) {
+ /*
+ * Not all implementations support digest offset in cipher
+ * range, so allow crypto op failure without further checks
+ * in this case.
+ */
+ failure_allowed = true;
+ }
+
+ if (!ok && failure_allowed)
+ goto out;
+
+ if (param->wrong_digest) {
+ CU_ASSERT(!ok);
+ } else {
+ CU_ASSERT(ok);
+ }
+
+ check_output_packet_data(pkt_out, &expected);
+out:
+ odp_packet_free(pkt_out);
+}
+
+void test_crypto_op(const crypto_op_test_param_t *param)
+{
+ crypto_op_test_param_t null_param = *param;
+ crypto_test_reference_t ref = *param->ref;
+
+ if (param->session.null_crypto_enable && param->null_crypto) {
+ null_param = *param;
+ null_param.ref = &ref;
+ ref = *param->ref;
+ ref.cipher = ODP_CIPHER_ALG_NULL;
+ ref.auth = ODP_AUTH_ALG_NULL;
+ ref.digest_length = 0;
+ memcpy(ref.ciphertext, ref.plaintext, sizeof(ref.ciphertext));
+ param = &null_param;
+ }
+ do_test_crypto_op(param);
+}
diff --git a/test/validation/api/crypto/crypto_op_test.h b/test/validation/api/crypto/crypto_op_test.h
new file mode 100644
index 000000000..9805457ad
--- /dev/null
+++ b/test/validation/api/crypto/crypto_op_test.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CRYPTO_OP_TEST_H
+#define CRYPTO_OP_TEST_H
+
+#include <odp_api.h>
+#include <stdint.h>
+#include "test_vectors.h"
+
+typedef struct crypto_session_t {
+ odp_crypto_session_t session;
+ odp_crypto_op_t op;
+ odp_crypto_op_type_t op_type;
+ odp_bool_t cipher_range_in_bits;
+ odp_bool_t auth_range_in_bits;
+ odp_bool_t null_crypto_enable;
+} crypto_session_t;
+
+typedef struct crypto_op_test_param_t {
+ crypto_session_t session;
+ odp_crypto_op_type_t op_type;
+ int32_t oop_shift;
+ crypto_test_reference_t *ref;
+ odp_packet_data_range_t cipher_range;
+ odp_packet_data_range_t auth_range;
+ uint32_t digest_offset;
+ odp_bool_t null_crypto;
+ odp_bool_t adjust_segmentation;
+ odp_bool_t wrong_digest;
+ uint32_t first_seg_len;
+ uint32_t header_len;
+ uint32_t trailer_len;
+} crypto_op_test_param_t;
+
+void test_crypto_op(const crypto_op_test_param_t *param);
+
+int crypto_op(odp_packet_t pkt_in,
+ odp_packet_t *pkt_out,
+ odp_bool_t *ok,
+ const odp_crypto_packet_op_param_t *op_params,
+ odp_crypto_op_type_t session_op_type,
+ odp_crypto_op_type_t op_type);
+
+#endif
diff --git a/test/validation/api/crypto/odp_crypto_test_inp.c b/test/validation/api/crypto/odp_crypto_test_inp.c
new file mode 100644
index 000000000..532aaf525
--- /dev/null
+++ b/test/validation/api/crypto/odp_crypto_test_inp.c
@@ -0,0 +1,2414 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include <odp_cunit_common.h>
+#include "test_vectors.h"
+#include "test_vector_defs.h"
+#include "crypto_op_test.h"
+#include "util.h"
+
+/*
+ * If nonzero, run time consuming tests too.
+ * Set through FULL_TEST environment variable.
+ */
+static int full_test;
+
+#define MAX_FAILURE_PRINTS 20
+
+#define PKT_POOL_NUM 64
+#define PKT_POOL_LEN 1200 /* enough for a test packet and some headroom */
+#define UAREA_SIZE 8
+
+static void test_defaults(uint8_t fill)
+{
+ odp_crypto_session_param_t param;
+
+ memset(&param, fill, sizeof(param));
+ odp_crypto_session_param_init(&param);
+
+ CU_ASSERT_EQUAL(param.op, ODP_CRYPTO_OP_ENCODE);
+ CU_ASSERT_EQUAL(param.op_type, ODP_CRYPTO_OP_TYPE_LEGACY);
+ CU_ASSERT_EQUAL(param.cipher_range_in_bits, false);
+ CU_ASSERT_EQUAL(param.auth_range_in_bits, false);
+ CU_ASSERT_EQUAL(param.auth_cipher_text, false);
+ CU_ASSERT_EQUAL(param.null_crypto_enable, false);
+ CU_ASSERT_EQUAL(param.op_mode, ODP_CRYPTO_SYNC);
+ CU_ASSERT_EQUAL(param.cipher_alg, ODP_CIPHER_ALG_NULL);
+ CU_ASSERT_EQUAL(param.cipher_iv_len, 0);
+ CU_ASSERT_EQUAL(param.auth_alg, ODP_AUTH_ALG_NULL);
+ CU_ASSERT_EQUAL(param.auth_iv_len, 0);
+ CU_ASSERT_EQUAL(param.auth_aad_len, 0);
+}
+
+static void test_default_values(void)
+{
+ test_defaults(0);
+ test_defaults(0xff);
+}
+
+static void print_alg_test_param(const crypto_op_test_param_t *p)
+{
+ const char *cipher_mode = p->session.cipher_range_in_bits ? "bit" : "byte";
+ const char *auth_mode = p->session.auth_range_in_bits ? "bit" : "byte";
+
+ switch (p->session.op_type) {
+ case ODP_CRYPTO_OP_TYPE_LEGACY:
+ printf("legacy ");
+ break;
+ case ODP_CRYPTO_OP_TYPE_BASIC:
+ printf("basic ");
+ break;
+ case ODP_CRYPTO_OP_TYPE_OOP:
+ printf("out-of-place ");
+ break;
+ case ODP_CRYPTO_OP_TYPE_BASIC_AND_OOP:
+ printf("basic-and-out-of-place (%s)",
+ p->op_type == ODP_CRYPTO_OP_TYPE_BASIC ? "basic" : "oop");
+ break;
+ default:
+ printf("unknown (internal error) ");
+ break;
+ }
+ printf("%s\n", p->session.op == ODP_CRYPTO_OP_ENCODE ? "encode" : "decode");
+
+ printf("cipher: %s, %s mode\n", cipher_alg_name(p->ref->cipher), cipher_mode);
+ printf(" key length: %d, iv length: %d\n",
+ p->ref->cipher_key_length, p->ref->cipher_iv_length);
+ printf(" range: offset %d, length %d\n",
+ p->cipher_range.offset, p->cipher_range.length);
+
+ printf("auth: %s, %s mode\n", auth_alg_name(p->ref->auth), auth_mode);
+ printf(" key length: %d, iv length: %d\n",
+ p->ref->auth_key_length, p->ref->auth_iv_length);
+ printf(" range: offset %d, length %d; aad length: %d\n",
+ p->auth_range.offset, p->auth_range.length, p->ref->aad_length);
+ printf(" digest offset: %d, digest length %d\n",
+ p->digest_offset, p->ref->digest_length);
+
+ if (p->wrong_digest)
+ printf("wrong digest test\n");
+ printf("header length: %d, trailer length: %d\n", p->header_len, p->trailer_len);
+ if (p->adjust_segmentation)
+ printf("segmentation adjusted, first_seg_len: %d\n", p->first_seg_len);
+ if (p->op_type == ODP_CRYPTO_OP_TYPE_OOP)
+ printf("oop_shift: %d\n", p->oop_shift);
+ if (p->session.null_crypto_enable)
+ printf("null crypto enabled in session\n");
+ if (p->null_crypto)
+ printf("null crypto requested\n");
+}
+
+static void alg_test_execute_and_print(crypto_op_test_param_t *param)
+{
+ static int print_limit = MAX_FAILURE_PRINTS;
+ unsigned int num = CU_get_number_of_failures();
+
+ test_crypto_op(param);
+
+ if (CU_get_number_of_failures() > num) {
+ if (print_limit > 0) {
+ printf("\nTest failed:\n");
+ print_alg_test_param(param);
+ printf("\n");
+ print_limit--;
+ if (print_limit == 0)
+ printf("Suppressing further failure output\n");
+ }
+ }
+}
+
+static void alg_test_op2(crypto_op_test_param_t *param)
+{
+ int32_t oop_shifts[] = {0, 3, 130, -10};
+
+ for (uint32_t n = 0; n < ODPH_ARRAY_SIZE(oop_shifts); n++) {
+ if (oop_shifts[n] != 0 &&
+ param->op_type != ODP_CRYPTO_OP_TYPE_OOP)
+ continue;
+ if ((int32_t)param->header_len + oop_shifts[n] < 0)
+ continue;
+ param->oop_shift = oop_shifts[n];
+
+ param->wrong_digest = false;
+ alg_test_execute_and_print(param);
+
+ param->null_crypto = true;
+ alg_test_execute_and_print(param);
+ param->null_crypto = false;
+
+ if (full_test)
+ alg_test_execute_and_print(param); /* rerun with the same parameters */
+
+ if (!full_test && param->session.null_crypto_enable)
+ break;
+ if (!full_test && param->session.op_type == ODP_CRYPTO_OP_TYPE_BASIC_AND_OOP)
+ break;
+
+ param->wrong_digest = true;
+ alg_test_execute_and_print(param);
+ }
+}
+
+static void alg_test_op(crypto_op_test_param_t *param)
+{
+ param->op_type = param->session.op_type;
+ if (param->op_type == ODP_CRYPTO_OP_TYPE_BASIC_AND_OOP) {
+ param->op_type = ODP_CRYPTO_OP_TYPE_BASIC;
+ alg_test_op2(param);
+ param->op_type = ODP_CRYPTO_OP_TYPE_OOP;
+ }
+ alg_test_op2(param);
+}
+
+static int combo_warning_shown;
+static int oop_warning_shown;
+
+typedef enum {
+ HASH_NO_OVERLAP,
+ HASH_OVERLAP,
+} hash_test_mode_t;
+
+typedef enum {
+ AUTH_CIPHERTEXT,
+ AUTH_PLAINTEXT
+} alg_order_t;
+
+static int session_create(crypto_session_t *session,
+ alg_order_t order,
+ crypto_test_reference_t *ref,
+ hash_test_mode_t hash_mode,
+ odp_bool_t must_fail)
+{
+ int rc;
+ odp_crypto_ses_create_err_t status;
+ odp_crypto_session_param_t ses_params;
+ uint8_t cipher_key_data[ref->cipher_key_length];
+ uint8_t auth_key_data[ref->auth_key_length];
+ odp_crypto_key_t cipher_key = {
+ .data = cipher_key_data,
+ .length = ref->cipher_key_length
+ };
+ odp_crypto_key_t auth_key = {
+ .data = auth_key_data,
+ .length = ref->auth_key_length
+ };
+
+ memcpy(cipher_key_data, ref->cipher_key, ref->cipher_key_length);
+ memcpy(auth_key_data, ref->auth_key, ref->auth_key_length);
+
+ /* Create a crypto session */
+ odp_crypto_session_param_init(&ses_params);
+ ses_params.op = session->op;
+ ses_params.op_type = session->op_type;
+ ses_params.cipher_range_in_bits = session->cipher_range_in_bits;
+ ses_params.auth_range_in_bits = session->auth_range_in_bits;
+ ses_params.auth_cipher_text = (order == AUTH_CIPHERTEXT);
+ ses_params.null_crypto_enable = session->null_crypto_enable;
+ ses_params.op_mode = suite_context.op_mode;
+ ses_params.cipher_alg = ref->cipher;
+ ses_params.auth_alg = ref->auth;
+ ses_params.compl_queue = suite_context.queue;
+ ses_params.output_pool = suite_context.pool;
+ ses_params.cipher_key = cipher_key;
+ ses_params.cipher_iv_len = ref->cipher_iv_length;
+ ses_params.auth_iv_len = ref->auth_iv_length;
+ ses_params.auth_key = auth_key;
+ ses_params.auth_digest_len = ref->digest_length;
+ ses_params.auth_aad_len = ref->aad_length;
+ ses_params.hash_result_in_auth_range = (hash_mode == HASH_OVERLAP);
+ rc = odp_crypto_session_create(&ses_params, &session->session, &status);
+
+ if (must_fail) {
+ CU_ASSERT(rc < 0);
+ if (rc == 0) {
+ rc = odp_crypto_session_destroy(session->session);
+ CU_ASSERT(rc == 0);
+ }
+ return -1;
+ }
+
+ if (rc < 0 && status == ODP_CRYPTO_SES_ERR_ALG_COMBO) {
+ if (!combo_warning_shown) {
+ combo_warning_shown = 1;
+ printf("\n Unsupported algorithm combination: %s, %s\n",
+ cipher_alg_name(ref->cipher),
+ auth_alg_name(ref->auth));
+ }
+ return -1;
+ }
+
+ /*
+ * Allow ODP_CRYPTO_SES_ERR_ALG_ORDER only in async op mode.
+ * In sync mode an implementation should be able to support both
+ * orders without much difficulty.
+ */
+ if (rc < 0 && status == ODP_CRYPTO_SES_ERR_ALG_ORDER &&
+ ses_params.op_mode == ODP_CRYPTO_ASYNC) {
+ printf("\n Unsupported algorithm order: %s, %s, auth_cipher_text: %d\n",
+ cipher_alg_name(ref->cipher),
+ auth_alg_name(ref->auth),
+ ses_params.auth_cipher_text);
+ return -1;
+ }
+
+ /* For now, allow out-of-place sessions not to be supported. */
+ if (rc < 0 && status == ODP_CRYPTO_SES_ERR_PARAMS &&
+ (ses_params.op_type == ODP_CRYPTO_OP_TYPE_OOP ||
+ ses_params.op_type == ODP_CRYPTO_OP_TYPE_BASIC_AND_OOP)) {
+ if (!oop_warning_shown)
+ printf("\n Skipping out-of-place tests\n");
+ oop_warning_shown = 1;
+ return -1;
+ }
+
+ CU_ASSERT_FATAL(!rc);
+ CU_ASSERT(status == ODP_CRYPTO_SES_ERR_NONE);
+ CU_ASSERT(odp_crypto_session_to_u64(session->session) !=
+ odp_crypto_session_to_u64(ODP_CRYPTO_SESSION_INVALID));
+
+ /*
+ * Clear session creation parameters so that we might notice if
+ * the implementation still tried to use them.
+ */
+ memset(cipher_key_data, 0, sizeof(cipher_key_data));
+ memset(auth_key_data, 0, sizeof(auth_key_data));
+ memset(&ses_params, 0, sizeof(ses_params));
+
+ return 0;
+}
+
+static void alg_test_ses(odp_crypto_op_t op,
+ odp_crypto_op_type_t op_type,
+ alg_order_t order,
+ crypto_test_reference_t *ref,
+ odp_packet_data_range_t cipher_range,
+ odp_packet_data_range_t auth_range,
+ uint32_t digest_offset,
+ odp_bool_t cipher_range_in_bits,
+ odp_bool_t auth_range_in_bits,
+ odp_bool_t null_crypto_enable,
+ odp_bool_t session_creation_must_fail)
+{
+ unsigned int initial_num_failures = CU_get_number_of_failures();
+ const uint32_t reflength = ref_length_in_bytes(ref);
+ const uint32_t auth_scale = auth_range_in_bits ? 8 : 1;
+ hash_test_mode_t hash_mode = HASH_NO_OVERLAP;
+ int rc;
+ uint32_t seg_len;
+ uint32_t max_shift;
+ crypto_op_test_param_t test_param;
+
+ if (null_crypto_enable && suite_context.op_mode == ODP_CRYPTO_SYNC)
+ return;
+
+ if (digest_offset * auth_scale >= auth_range.offset &&
+ digest_offset * auth_scale < auth_range.offset + auth_range.length)
+ hash_mode = HASH_OVERLAP;
+
+ memset(&test_param, 0, sizeof(test_param));
+ test_param.session.op = op;
+ test_param.session.op_type = op_type;
+ test_param.session.cipher_range_in_bits = cipher_range_in_bits;
+ test_param.session.auth_range_in_bits = auth_range_in_bits;
+ test_param.session.null_crypto_enable = null_crypto_enable;
+ if (session_create(&test_param.session, order, ref, hash_mode, session_creation_must_fail))
+ return;
+ test_param.ref = ref;
+ test_param.cipher_range = cipher_range;
+ test_param.auth_range = auth_range;
+ test_param.digest_offset = digest_offset;
+
+ alg_test_op(&test_param);
+
+ max_shift = reflength + ref->digest_length;
+ seg_len = 0;
+
+ if (!full_test)
+ if ((ref->cipher != ODP_CIPHER_ALG_NULL &&
+ ref->auth != ODP_AUTH_ALG_NULL) ||
+ test_param.session.null_crypto_enable ||
+ test_param.session.op_type == ODP_CRYPTO_OP_TYPE_BASIC_AND_OOP) {
+ /* run the loop body just once */
+ seg_len = max_shift / 2;
+ max_shift = seg_len;
+ }
+
+ /*
+ * Test with segmented packets with all possible segment boundaries
+ * within the packet data
+ */
+ for (; seg_len <= max_shift; seg_len++) {
+ /*
+ * CUnit chokes on too many assertion failures, so bail
+ * out if this test has already failed.
+ */
+ if (CU_get_number_of_failures() > initial_num_failures)
+ break;
+
+ test_param.adjust_segmentation = true;
+ test_param.first_seg_len = seg_len;
+ test_param.header_len = 0;
+ test_param.trailer_len = 0;
+ test_param.digest_offset = digest_offset;
+ alg_test_op(&test_param);
+
+ /* Test partial packet crypto with odd alignment. */
+ test_param.header_len = 13;
+ test_param.trailer_len = 32;
+ test_param.digest_offset = test_param.header_len + digest_offset;
+ alg_test_op(&test_param);
+ }
+
+ rc = odp_crypto_session_destroy(test_param.session.session);
+ CU_ASSERT(!rc);
+}
+
+static void alg_test_op_types(odp_crypto_op_t op,
+ alg_order_t order,
+ crypto_test_reference_t *ref,
+ odp_packet_data_range_t cipher_range,
+ odp_packet_data_range_t auth_range,
+ uint32_t digest_offset,
+ odp_bool_t cipher_range_in_bits,
+ odp_bool_t auth_range_in_bits,
+ odp_bool_t session_creation_must_fail)
+{
+ odp_crypto_op_type_t op_types[] = {
+ ODP_CRYPTO_OP_TYPE_LEGACY,
+ ODP_CRYPTO_OP_TYPE_BASIC,
+ ODP_CRYPTO_OP_TYPE_OOP,
+ ODP_CRYPTO_OP_TYPE_BASIC_AND_OOP,
+ };
+
+ for (unsigned int n = 0; n < ODPH_ARRAY_SIZE(op_types); n++) {
+ for (unsigned int null_crypto = 0 ; null_crypto <= 1; null_crypto++)
+ alg_test_ses(op,
+ op_types[n],
+ order,
+ ref,
+ cipher_range,
+ auth_range,
+ digest_offset,
+ cipher_range_in_bits,
+ auth_range_in_bits,
+ null_crypto,
+ session_creation_must_fail);
+ }
+}
+
+static void alg_test(odp_crypto_op_t op,
+ alg_order_t order,
+ crypto_test_reference_t *ref,
+ odp_packet_data_range_t cipher_bit_range,
+ odp_packet_data_range_t auth_bit_range,
+ uint32_t digest_offset,
+ odp_bool_t is_bit_mode_cipher,
+ odp_bool_t is_bit_mode_auth)
+{
+ odp_packet_data_range_t cipher_range;
+ odp_packet_data_range_t auth_range;
+
+ for (int cr_in_bits = 0; cr_in_bits <= 1; cr_in_bits++) {
+ if (!cr_in_bits && cipher_bit_range.length % 8 != 0)
+ continue;
+ for (int ar_in_bits = 0; ar_in_bits <= 1; ar_in_bits++) {
+ odp_bool_t session_creation_must_fail;
+
+ if (!ar_in_bits && auth_bit_range.length % 8 != 0)
+ continue;
+
+ cipher_range = cipher_bit_range;
+ auth_range = auth_bit_range;
+ if (!cr_in_bits) {
+ cipher_range.offset /= 8;
+ cipher_range.length /= 8;
+ }
+ if (!ar_in_bits) {
+ auth_range.offset /= 8;
+ auth_range.length /= 8;
+ }
+ session_creation_must_fail = ((ar_in_bits && !is_bit_mode_auth) ||
+ (cr_in_bits && !is_bit_mode_cipher));
+ alg_test_op_types(op, order, ref, cipher_range, auth_range,
+ digest_offset, cr_in_bits, ar_in_bits,
+ session_creation_must_fail);
+ }
+ }
+}
+
+static odp_bool_t aad_len_ok(const odp_crypto_auth_capability_t *capa, uint32_t len)
+{
+ if (len < capa->aad_len.min || len > capa->aad_len.max)
+ return false;
+
+ if (len == capa->aad_len.min)
+ return true;
+ if (capa->aad_len.inc == 0)
+ return false;
+
+ return ((len - capa->aad_len.min) % capa->aad_len.inc) == 0;
+}
+
+static void check_alg(odp_crypto_op_t op,
+ crypto_test_reference_t *ref,
+ size_t count)
+{
+ int rc, i;
+ const odp_cipher_alg_t cipher_alg = ref->cipher;
+ const odp_auth_alg_t auth_alg = ref->auth;
+ int cipher_num = odp_crypto_cipher_capability(cipher_alg, NULL, 0);
+ int auth_num = odp_crypto_auth_capability(auth_alg, NULL, 0);
+ odp_bool_t cipher_ok = false;
+ odp_bool_t auth_ok = false;
+ size_t idx;
+
+ CU_ASSERT_FATAL(cipher_num > 0);
+ CU_ASSERT_FATAL(auth_num > 0);
+
+ init_reference(ref, count);
+
+ odp_crypto_cipher_capability_t cipher_capa[cipher_num];
+ odp_crypto_auth_capability_t auth_capa[auth_num];
+ odp_bool_t cipher_tested[cipher_num];
+ odp_bool_t auth_tested[auth_num];
+
+ rc = odp_crypto_cipher_capability(cipher_alg, cipher_capa, cipher_num);
+ CU_ASSERT_FATAL(rc == cipher_num);
+
+ rc = odp_crypto_auth_capability(auth_alg, auth_capa, auth_num);
+ CU_ASSERT_FATAL(rc == auth_num);
+
+ memset(cipher_tested, 0, sizeof(cipher_tested));
+ memset(auth_tested, 0, sizeof(auth_tested));
+
+ oop_warning_shown = 0; /* allow OOP-unsupported warning again */
+
+ for (idx = 0; idx < count; idx++) {
+ int cipher_idx = -1, auth_idx = -1;
+ odp_bool_t is_bit_mode_cipher = false;
+ odp_bool_t is_bit_mode_auth = false;
+ uint32_t digest_offs = ref_length_in_bytes(&ref[idx]);
+ odp_packet_data_range_t cipher_bit_range = {.offset = 0};
+ odp_packet_data_range_t auth_bit_range = {.offset = 0};
+
+ for (i = 0; i < cipher_num; i++) {
+ if (cipher_capa[i].key_len ==
+ ref[idx].cipher_key_length &&
+ cipher_capa[i].iv_len ==
+ ref[idx].cipher_iv_length) {
+ cipher_idx = i;
+ is_bit_mode_cipher = cipher_capa[i].bit_mode;
+ break;
+ }
+ }
+
+ if (cipher_idx < 0) {
+ printf("\n Unsupported: alg=%s, key_len=%" PRIu32
+ ", iv_len=%" PRIu32 "\n",
+ cipher_alg_name(cipher_alg),
+ ref[idx].cipher_key_length,
+ ref[idx].cipher_iv_length);
+ continue;
+ }
+
+ for (i = 0; i < auth_num; i++) {
+ if (auth_capa[i].digest_len ==
+ ref[idx].digest_length &&
+ auth_capa[i].iv_len ==
+ ref[idx].auth_iv_length &&
+ auth_capa[i].key_len ==
+ ref[idx].auth_key_length &&
+ aad_len_ok(&auth_capa[i], ref[idx].aad_length)) {
+ auth_idx = i;
+ is_bit_mode_auth = auth_capa[i].bit_mode;
+ break;
+ }
+ }
+
+ if (auth_idx < 0) {
+ printf("\n Unsupported: alg=%s, key_len=%" PRIu32
+ ", iv_len=%" PRIu32 ", digest_len=%" PRIu32
+ "\n",
+ auth_alg_name(auth_alg),
+ ref[idx].auth_key_length,
+ ref[idx].auth_iv_length,
+ ref[idx].digest_length);
+ continue;
+ }
+
+ cipher_bit_range.length = ref_length_in_bits(&ref[idx]);
+ auth_bit_range.length = ref_length_in_bits(&ref[idx]);
+
+ alg_test(op, AUTH_PLAINTEXT, &ref[idx],
+ cipher_bit_range, auth_bit_range, digest_offs,
+ is_bit_mode_cipher, is_bit_mode_auth);
+ alg_test(op, AUTH_CIPHERTEXT, &ref[idx],
+ cipher_bit_range, auth_bit_range, digest_offs,
+ is_bit_mode_cipher, is_bit_mode_auth);
+
+ cipher_tested[cipher_idx] = true;
+ auth_tested[auth_idx] = true;
+ }
+
+ for (i = 0; i < cipher_num; i++) {
+ cipher_ok |= cipher_tested[i];
+ if (!cipher_tested[i] && cipher_alg != ODP_CIPHER_ALG_NULL)
+ printf("\n Untested: alg=%s, key_len=%" PRIu32 ", "
+ "iv_len=%" PRIu32 "%s\n",
+ cipher_alg_name(cipher_alg),
+ cipher_capa[i].key_len,
+ cipher_capa[i].iv_len,
+ cipher_capa[i].bit_mode ? ", bit mode" : "");
+ }
+
+ for (i = 0; i < auth_num; i++) {
+ auth_ok |= auth_tested[i];
+ if (!auth_tested[i] && auth_alg != ODP_AUTH_ALG_NULL)
+ printf("\n Untested: alg=%s, key_len=%" PRIu32 ", "
+ "digest_len=%" PRIu32 "%s\n",
+ auth_alg_name(auth_alg),
+ auth_capa[i].key_len,
+ auth_capa[i].digest_len,
+ auth_capa[i].bit_mode ? ", bit mode" : "");
+ }
+
+ /* Verify that we were able to run at least one test */
+ CU_ASSERT(cipher_ok);
+ CU_ASSERT(auth_ok);
+}
+
+static void test_capability(void)
+{
+ odp_crypto_capability_t capa = {.max_sessions = 1};
+ int rc;
+
+ rc = odp_crypto_capability(&capa);
+ CU_ASSERT(!rc);
+ if (capa.max_sessions > 0)
+ CU_ASSERT(capa.sync_mode || capa.async_mode);
+ CU_ASSERT((~capa.ciphers.all_bits & capa.hw_ciphers.all_bits) == 0);
+ CU_ASSERT((~capa.auths.all_bits & capa.hw_auths.all_bits) == 0);
+}
+
+/*
+ * Create a test reference, which can be used in tests where the hash
+ * result is within auth_range.
+ *
+ * The ciphertext packet and the hash are calculated using an encode
+ * operation with hash_result_offset outside the auth_range and by
+ * copying the hash in the ciphertext packet.
+ */
+static int create_hash_test_reference(odp_auth_alg_t auth,
+ const odp_crypto_auth_capability_t *capa,
+ crypto_test_reference_t *ref,
+ uint32_t digest_offset,
+ uint8_t digest_fill)
+{
+ crypto_session_t session;
+ int rc;
+ odp_packet_t pkt;
+ odp_bool_t ok;
+ const uint32_t auth_bytes = 100;
+ uint32_t enc_digest_offset = auth_bytes;
+
+ ref->cipher = ODP_CIPHER_ALG_NULL;
+ ref->auth = auth;
+ ref->auth_key_length = capa->key_len;
+ ref->auth_iv_length = capa->iv_len;
+ ref->digest_length = capa->digest_len;
+ ref->is_length_in_bits = false;
+ ref->length = auth_bytes;
+
+ if (ref->auth_key_length > MAX_KEY_LEN ||
+ ref->auth_iv_length > MAX_IV_LEN ||
+ auth_bytes > MAX_DATA_LEN ||
+ digest_offset + ref->digest_length > MAX_DATA_LEN)
+ CU_FAIL_FATAL("Internal error\n");
+
+ fill_with_pattern(ref->auth_key, ref->auth_key_length);
+ fill_with_pattern(ref->auth_iv, ref->auth_iv_length);
+ fill_with_pattern(ref->plaintext, auth_bytes);
+
+ memset(ref->plaintext + digest_offset, digest_fill, ref->digest_length);
+
+ pkt = odp_packet_alloc(suite_context.pool, auth_bytes + ref->digest_length);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ rc = odp_packet_copy_from_mem(pkt, 0, auth_bytes, ref->plaintext);
+ CU_ASSERT(rc == 0);
+
+ session.op = ODP_CRYPTO_OP_ENCODE;
+ session.op_type = ODP_CRYPTO_OP_TYPE_BASIC;
+ session.cipher_range_in_bits = false;
+ session.auth_range_in_bits = false;
+ session.null_crypto_enable = false;
+ if (session_create(&session, AUTH_PLAINTEXT, ref, HASH_NO_OVERLAP, false))
+ return -1;
+
+ odp_crypto_packet_op_param_t op_params = {
+ .session = session.session,
+ .cipher_iv_ptr = ref->cipher_iv,
+ .auth_iv_ptr = ref->auth_iv,
+ .hash_result_offset = enc_digest_offset,
+ .aad_ptr = ref->aad,
+ .cipher_range = {.offset = 0, .length = 0},
+ .auth_range = { .offset = 0, .length = auth_bytes },
+ .dst_offset_shift = 0,
+ };
+ rc = crypto_op(pkt, &pkt, &ok, &op_params,
+ ODP_CRYPTO_OP_TYPE_BASIC, ODP_CRYPTO_OP_TYPE_BASIC);
+
+ CU_ASSERT(rc == 0);
+ if (rc) {
+ (void)odp_crypto_session_destroy(session.session);
+ return -1;
+ }
+ CU_ASSERT(ok);
+
+ rc = odp_crypto_session_destroy(session.session);
+ CU_ASSERT(rc == 0);
+
+ /* copy the processed packet to the ciphertext packet in ref */
+ rc = odp_packet_copy_to_mem(pkt, 0, auth_bytes, ref->ciphertext);
+ CU_ASSERT(rc == 0);
+
+ /* copy the calculated digest in the ciphertext packet in ref */
+ rc = odp_packet_copy_to_mem(pkt, enc_digest_offset, ref->digest_length,
+ &ref->ciphertext[digest_offset]);
+ CU_ASSERT(rc == 0);
+
+ /* copy the calculated digest the digest field in ref */
+ rc = odp_packet_copy_to_mem(pkt, enc_digest_offset, ref->digest_length,
+ &ref->digest);
+ CU_ASSERT(rc == 0);
+
+ odp_packet_free(pkt);
+
+ return 0;
+}
+
+static void test_auth_hash_in_auth_range(odp_auth_alg_t auth,
+ const odp_crypto_auth_capability_t *capa,
+ odp_bool_t is_bit_mode_cipher,
+ alg_order_t order)
+{
+ static crypto_test_reference_t ref = {.length = 0};
+ uint32_t digest_offset = 13;
+ const odp_packet_data_range_t cipher_bit_range = {.offset = 0, .length = 0};
+ odp_packet_data_range_t auth_bit_range;
+
+ if (!full_test && capa->digest_len % 4 != 0)
+ return;
+
+ /*
+ * Create test packets with auth hash in the authenticated range and
+ * zeroes in the hash location in the plaintext packet.
+ */
+ if (create_hash_test_reference(auth, capa, &ref, digest_offset, 0))
+ return;
+
+ auth_bit_range.offset = 0;
+ auth_bit_range.length = ref_length_in_bits(&ref);
+
+ /*
+ * Decode the ciphertext packet.
+ *
+ * Check that auth hash verification works even if hash_result_offset
+ * is within the auth range. The ODP implementation must clear the
+ * hash bytes in the ciphertext packet before calculating the hash.
+ */
+ alg_test(ODP_CRYPTO_OP_DECODE,
+ order,
+ &ref,
+ cipher_bit_range, auth_bit_range,
+ digest_offset,
+ is_bit_mode_cipher,
+ capa->bit_mode);
+
+ /*
+ * Create test packets with auth hash in the authenticated range and
+ * ones in the hash location in the plaintext packet.
+ */
+ if (create_hash_test_reference(auth, capa, &ref, digest_offset, 1))
+ return;
+
+ auth_bit_range.offset = 0;
+ auth_bit_range.length = ref_length_in_bits(&ref);
+
+ /*
+ * Encode the plaintext packet.
+ *
+ * Check that auth hash generation works even if hash_result_offset
+ * is within the auth range. The ODP implementation must not clear
+ * the hash bytes in the plaintext packet before calculating the hash.
+ */
+ alg_test(ODP_CRYPTO_OP_ENCODE,
+ order,
+ &ref,
+ cipher_bit_range, auth_bit_range,
+ digest_offset,
+ is_bit_mode_cipher,
+ capa->bit_mode);
+}
+
+/*
+ * Cipher algorithms that are not AEAD algorithms
+ */
+static odp_cipher_alg_t cipher_algs[] = {
+ ODP_CIPHER_ALG_NULL,
+ ODP_CIPHER_ALG_DES,
+ ODP_CIPHER_ALG_3DES_CBC,
+ ODP_CIPHER_ALG_3DES_ECB,
+ ODP_CIPHER_ALG_AES_CBC,
+ ODP_CIPHER_ALG_AES_CTR,
+ ODP_CIPHER_ALG_AES_ECB,
+ ODP_CIPHER_ALG_AES_CFB128,
+ ODP_CIPHER_ALG_AES_XTS,
+ ODP_CIPHER_ALG_KASUMI_F8,
+ ODP_CIPHER_ALG_SNOW3G_UEA2,
+ ODP_CIPHER_ALG_AES_EEA2,
+ ODP_CIPHER_ALG_ZUC_EEA3,
+};
+
+/*
+ * Authentication algorithms and hashes that may use auth_range
+ * parameter. AEAD algorithms are excluded.
+ */
+static odp_auth_alg_t auth_algs[] = {
+ ODP_AUTH_ALG_NULL,
+ ODP_AUTH_ALG_MD5_HMAC,
+ ODP_AUTH_ALG_SHA1_HMAC,
+ ODP_AUTH_ALG_SHA224_HMAC,
+ ODP_AUTH_ALG_SHA256_HMAC,
+ ODP_AUTH_ALG_SHA384_HMAC,
+ ODP_AUTH_ALG_SHA512_HMAC,
+ ODP_AUTH_ALG_AES_GMAC,
+ ODP_AUTH_ALG_AES_CMAC,
+ ODP_AUTH_ALG_AES_XCBC_MAC,
+ ODP_AUTH_ALG_KASUMI_F9,
+ ODP_AUTH_ALG_SNOW3G_UIA2,
+ ODP_AUTH_ALG_AES_EIA2,
+ ODP_AUTH_ALG_ZUC_EIA3,
+ ODP_AUTH_ALG_MD5,
+ ODP_AUTH_ALG_SHA1,
+ ODP_AUTH_ALG_SHA224,
+ ODP_AUTH_ALG_SHA256,
+ ODP_AUTH_ALG_SHA384,
+ ODP_AUTH_ALG_SHA512,
+};
+
+static void test_auth_hashes_in_auth_range(void)
+{
+ for (size_t n = 0; n < ODPH_ARRAY_SIZE(auth_algs); n++) {
+ odp_auth_alg_t auth = auth_algs[n];
+ odp_crypto_cipher_capability_t c_capa;
+ int num;
+
+ if (check_alg_support(ODP_CIPHER_ALG_NULL, auth) == ODP_TEST_INACTIVE)
+ continue;
+
+ num = odp_crypto_cipher_capability(ODP_CIPHER_ALG_NULL, &c_capa, 1);
+ CU_ASSERT_FATAL(num == 1);
+
+ num = odp_crypto_auth_capability(auth, NULL, 0);
+ CU_ASSERT_FATAL(num > 0);
+
+ odp_crypto_auth_capability_t capa[num];
+
+ num = odp_crypto_auth_capability(auth, capa, num);
+
+ for (int i = 0; i < num; i++) {
+ test_auth_hash_in_auth_range(auth, &capa[i], c_capa.bit_mode,
+ AUTH_PLAINTEXT);
+ test_auth_hash_in_auth_range(auth, &capa[i], c_capa.bit_mode,
+ AUTH_CIPHERTEXT);
+ }
+ }
+}
+
+/*
+ * Encode ref->plaintext and save result in ref->ciphertext.
+ */
+static int crypto_encode_ref(crypto_test_reference_t *ref,
+ odp_packet_data_range_t cipher_range,
+ odp_packet_data_range_t auth_range,
+ uint32_t hash_result_offset)
+{
+ odp_packet_data_range_t zero_range = {.offset = 0, .length = 0};
+ odp_packet_t pkt;
+ int rc;
+ crypto_session_t session;
+ odp_bool_t ok;
+
+ pkt = odp_packet_alloc(suite_context.pool, ref->length);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ rc = odp_packet_copy_from_mem(pkt, 0, ref->length, ref->plaintext);
+ CU_ASSERT(rc == 0);
+
+ session.op = ODP_CRYPTO_OP_ENCODE;
+ session.op_type = ODP_CRYPTO_OP_TYPE_BASIC;
+ session.cipher_range_in_bits = false;
+ session.auth_range_in_bits = false;
+ session.null_crypto_enable = false;
+ if (session_create(&session, AUTH_PLAINTEXT, ref, HASH_OVERLAP, false)) {
+ odp_packet_free(pkt);
+ return 1;
+ }
+
+ if (ref->cipher == ODP_CIPHER_ALG_NULL)
+ cipher_range = zero_range;
+ if (ref->auth == ODP_AUTH_ALG_NULL) {
+ auth_range = zero_range;
+ hash_result_offset = 0;
+ }
+
+ CU_ASSERT_FATAL(hash_result_offset + ref->digest_length <= ref->length);
+
+ odp_crypto_packet_op_param_t op_params = {
+ .session = session.session,
+ .cipher_iv_ptr = ref->cipher_iv,
+ .auth_iv_ptr = ref->auth_iv,
+ .hash_result_offset = hash_result_offset,
+ .aad_ptr = ref->aad,
+ .cipher_range = cipher_range,
+ .auth_range = auth_range,
+ .dst_offset_shift = 0,
+ };
+ rc = crypto_op(pkt, &pkt, &ok, &op_params,
+ ODP_CRYPTO_OP_TYPE_BASIC, ODP_CRYPTO_OP_TYPE_BASIC);
+ CU_ASSERT(rc == 0);
+ if (rc) {
+ (void)odp_crypto_session_destroy(session.session);
+ return -1;
+ }
+ CU_ASSERT(ok);
+
+ rc = odp_crypto_session_destroy(session.session);
+ CU_ASSERT(rc == 0);
+
+ rc = odp_packet_copy_to_mem(pkt, 0, ref->length, ref->ciphertext);
+ CU_ASSERT(rc == 0);
+
+ odp_packet_free(pkt);
+ return 0;
+}
+
+typedef struct crypto_suite_t {
+ odp_cipher_alg_t cipher;
+ odp_auth_alg_t auth;
+ alg_order_t order;
+ const odp_crypto_cipher_capability_t *cipher_capa;
+ const odp_crypto_auth_capability_t *auth_capa;
+} crypto_suite_t;
+
+/*
+ * Create test reference for combined auth & cipher by doing authentication
+ * and ciphering through separate ODP crypto operations.
+ */
+static int create_combined_ref(const crypto_suite_t *suite,
+ crypto_test_reference_t *ref,
+ const odp_packet_data_range_t *cipher_range,
+ const odp_packet_data_range_t *auth_range,
+ uint32_t digest_offset)
+{
+ uint32_t total_len;
+ int rc;
+ crypto_test_reference_t ref_cipher_only;
+ crypto_test_reference_t ref_auth_only;
+ crypto_test_reference_t *first_ref, *second_ref;
+
+ total_len = cipher_range->offset + cipher_range->length;
+ if (auth_range->offset + auth_range->length > total_len)
+ total_len = auth_range->offset + auth_range->length;
+ if (digest_offset + suite->auth_capa->digest_len > total_len)
+ total_len = digest_offset + suite->auth_capa->digest_len;
+
+ ref->cipher = suite->cipher;
+ ref->auth = suite->auth;
+ ref->cipher_key_length = suite->cipher_capa->key_len;
+ ref->cipher_iv_length = suite->cipher_capa->iv_len;
+ ref->auth_key_length = suite->auth_capa->key_len;
+ ref->auth_iv_length = suite->auth_capa->iv_len;
+ ref->digest_length = suite->auth_capa->digest_len;
+ ref->aad_length = 0;
+ ref->is_length_in_bits = false;
+ ref->length = total_len;
+
+ if (ref->auth_key_length > MAX_KEY_LEN ||
+ ref->auth_iv_length > MAX_IV_LEN ||
+ total_len > MAX_DATA_LEN ||
+ digest_offset + ref->digest_length > MAX_DATA_LEN)
+ CU_FAIL_FATAL("Internal error\n");
+
+ fill_with_pattern(ref->cipher_key, ref->cipher_key_length);
+ fill_with_pattern(ref->cipher_iv, ref->cipher_iv_length);
+ fill_with_pattern(ref->auth_key, ref->auth_key_length);
+ fill_with_pattern(ref->auth_iv, ref->auth_iv_length);
+ fill_with_pattern(ref->plaintext, ref->length);
+ memset(ref->plaintext + digest_offset, 0, ref->digest_length);
+
+ ref_cipher_only = *ref;
+ ref_cipher_only.auth = ODP_AUTH_ALG_NULL;
+ ref_cipher_only.auth_key_length = 0;
+ ref_cipher_only.auth_iv_length = 0;
+ ref_cipher_only.aad_length = 0;
+ ref_cipher_only.digest_length = 0;
+
+ ref_auth_only = *ref;
+ ref_auth_only.cipher = ODP_CIPHER_ALG_NULL;
+ ref_auth_only.cipher_key_length = 0;
+ ref_auth_only.cipher_iv_length = 0;
+
+ if (suite->order == AUTH_CIPHERTEXT) {
+ first_ref = &ref_cipher_only;
+ second_ref = &ref_auth_only;
+ } else {
+ first_ref = &ref_auth_only;
+ second_ref = &ref_cipher_only;
+ }
+ rc = crypto_encode_ref(first_ref,
+ *cipher_range, *auth_range,
+ digest_offset);
+ if (rc)
+ return 1;
+ memcpy(second_ref->plaintext, first_ref->ciphertext, ref->length);
+ rc = crypto_encode_ref(second_ref,
+ *cipher_range, *auth_range,
+ digest_offset);
+ if (rc)
+ return 1;
+ memcpy(ref->ciphertext, second_ref->ciphertext, ref->length);
+ /*
+ * These may be encrypted bytes, but that is what alg_test wants if
+ * the digest is encrypted in the input packet.
+ */
+ memcpy(ref->digest, second_ref->ciphertext + digest_offset, ref->digest_length);
+
+ return 0;
+}
+
+/*
+ * Return cipher range that is at least min_len bytes long, multiple of the
+ * block size and at least 3 blocks.
+ */
+static uint32_t get_cipher_range_len(uint32_t min_len)
+{
+#define MAX_BLOCK_SIZE 16
+ uint32_t bs = MAX_BLOCK_SIZE;
+ uint32_t len = 3 * bs;
+
+ if (min_len > len)
+ len = ((min_len + bs - 1) / bs) * bs;
+ return len;
+}
+
+typedef enum range_overlap_t {
+ SEPARATE_AUTH_AND_CIPHER_RANGES,
+ SAME_AUTH_AND_CIPHER_RANGE,
+ RANGES_PARTIALLY_OVERLAP,
+ AUTH_RANGE_IN_CIPHER_RANGE,
+ CIPHER_RANGE_IN_AUTH_RANGE,
+} range_overlap_t;
+#define NUM_RANGE_OVERLAPS 5
+
+typedef enum hash_location_t {
+ HASH_SEPARATE,
+ HASH_IN_AUTH_RANGE_ONLY,
+ HASH_IN_CIPHER_RANGE_ONLY,
+ HASH_IN_AUTH_AND_CIPHER_RANGE,
+} hash_location_t;
+#define NUM_HASH_LOCATIONS 4
+
+static int make_byte_ranges(range_overlap_t overlap,
+ hash_location_t hash_location,
+ uint32_t hash_len,
+ odp_packet_data_range_t *cipher_range,
+ odp_packet_data_range_t *auth_range,
+ uint32_t *digest_offset)
+{
+ const uint32_t padding = 5; /* padding between parts, could also be zero */
+ const uint32_t nonzero_len = 3;
+ uint32_t c_offs = 0, c_len = 0, a_offs = 0, a_len = 0, digest_offs = 0;
+
+ switch (overlap) {
+ case SEPARATE_AUTH_AND_CIPHER_RANGES:
+ switch (hash_location) {
+ case HASH_SEPARATE:
+ /* |cccc_aaaa_dd| */
+ c_offs = 0;
+ c_len = get_cipher_range_len(nonzero_len);
+ a_offs = c_offs + c_len + padding;
+ a_len = nonzero_len;
+ digest_offs = a_offs + a_len + padding;
+ break;
+ case HASH_IN_AUTH_RANGE_ONLY:
+ /*
+ * |cccc_aaaa|
+ * | _dd_|
+ */
+ c_offs = 0;
+ c_len = get_cipher_range_len(nonzero_len);
+ a_offs = c_offs + c_len + padding;
+ a_len = hash_len + 2 * padding;
+ digest_offs = a_offs + padding;
+ break;
+ case HASH_IN_CIPHER_RANGE_ONLY:
+ /*
+ * |cccc_aaaa|
+ * |_dd_ |
+ */
+ c_offs = 0;
+ c_len = get_cipher_range_len(hash_len + 2 * padding);
+ a_offs = c_offs + c_len + padding;
+ a_len = nonzero_len;
+ digest_offs = c_offs + padding;
+ break;
+ case HASH_IN_AUTH_AND_CIPHER_RANGE:
+ /* not possible when ranges are separate */
+ return 1;
+ }
+ break;
+ case SAME_AUTH_AND_CIPHER_RANGE:
+ c_offs = 0;
+ a_offs = 0;
+ switch (hash_location) {
+ case HASH_SEPARATE:
+ /*
+ * |cccc_dd|
+ * |aaaa |
+ */
+ c_len = get_cipher_range_len(nonzero_len);
+ a_len = c_len;
+ digest_offs = c_len + padding;
+ break;
+ case HASH_IN_AUTH_RANGE_ONLY:
+ case HASH_IN_CIPHER_RANGE_ONLY:
+ /* not possible when ranges are the same */
+ return 1;
+ case HASH_IN_AUTH_AND_CIPHER_RANGE:
+ /*
+ * |cccc|
+ * |aaaa|
+ * |_dd_|
+ */
+ c_len = get_cipher_range_len(hash_len + 2 * padding);
+ a_len = c_len;
+ digest_offs = padding;
+ break;
+ }
+ break;
+ case RANGES_PARTIALLY_OVERLAP:
+ a_offs = 0;
+ switch (hash_location) {
+ case HASH_SEPARATE:
+ /*
+ * |aaaa |
+ * | cccc_dd|
+ */
+ a_len = 2 * nonzero_len;
+ c_offs = nonzero_len;
+ c_len = get_cipher_range_len(a_len);
+ digest_offs = c_offs + c_len + padding;
+ break;
+ case HASH_IN_AUTH_RANGE_ONLY:
+ /*
+ * |aaaaa |
+ * |_dd_ccc|
+ */
+ digest_offs = padding;
+ a_len = hash_len + 2 * padding + nonzero_len;
+ c_offs = hash_len + 2 * padding;
+ c_len = get_cipher_range_len(2 * nonzero_len);
+ break;
+ case HASH_IN_CIPHER_RANGE_ONLY:
+ /* PDCP case when AUTH_PLAINTEXT */
+ /*
+ * |aaaadd|
+ * | ccccc|
+ */
+ c_offs = nonzero_len;
+ c_len = get_cipher_range_len(nonzero_len + hash_len);
+ a_len = nonzero_len + c_len - hash_len;
+ digest_offs = c_offs + c_len - hash_len;
+ break;
+ case HASH_IN_AUTH_AND_CIPHER_RANGE:
+ /*
+ * |aaaaaa |
+ * | cccccc|
+ * |¨_dd_ |
+ */
+ c_offs = nonzero_len;
+ c_len = get_cipher_range_len(hash_len + 2 * padding + nonzero_len);
+ a_len = c_offs + hash_len + 2 * padding;
+ digest_offs = c_offs + padding;
+ break;
+ }
+ break;
+ case AUTH_RANGE_IN_CIPHER_RANGE:
+ c_offs = 0;
+ a_offs = nonzero_len;
+ switch (hash_location) {
+ case HASH_SEPARATE:
+ /*
+ * |cccc_dd|
+ * | aa_ |
+ */
+ a_len = nonzero_len;
+ c_len = get_cipher_range_len(a_offs + a_len + padding);
+ digest_offs = c_len + padding;
+ break;
+ case HASH_IN_AUTH_RANGE_ONLY:
+ /* not possible since auth range is in cipher range */
+ return 1;
+ case HASH_IN_CIPHER_RANGE_ONLY:
+ /*
+ * |ccccccc|
+ * | aa_dd_|
+ */
+ a_len = nonzero_len;
+ digest_offs = a_offs + a_len + padding;
+ c_len = get_cipher_range_len(digest_offs + hash_len + padding);
+ break;
+ case HASH_IN_AUTH_AND_CIPHER_RANGE:
+ /*
+ * |cccccc|
+ * | aaaa_|
+ * | _dd_ |
+ */
+ a_len = /**/ hash_len + 2 * padding;
+ c_len = get_cipher_range_len(a_offs + a_len + padding);
+ digest_offs = a_offs + /**/ padding;
+ break;
+ }
+ break;
+ case CIPHER_RANGE_IN_AUTH_RANGE:
+ a_offs = 0;
+ c_offs = nonzero_len;
+ switch (hash_location) {
+ case HASH_SEPARATE:
+ /*
+ * |aaaa_dd|
+ * | cc_ |
+ */
+ c_len = get_cipher_range_len(nonzero_len);
+ a_len = c_offs + c_len + padding;
+ digest_offs = a_len + padding;
+ break;
+ case HASH_IN_AUTH_RANGE_ONLY:
+ /*
+ * |aaaaaaa|
+ * | cc_dd_|
+ */
+ c_len = get_cipher_range_len(nonzero_len);
+ digest_offs = c_offs + c_len + padding;
+ a_len = digest_offs + hash_len + padding;
+ break;
+ case HASH_IN_CIPHER_RANGE_ONLY:
+ /* not possible since cipher range is in auth range */
+ return 1;
+ case HASH_IN_AUTH_AND_CIPHER_RANGE:
+ /*
+ * |aaaaaa|
+ * | cccc_|
+ * | _dd_ |
+ */
+ c_len = get_cipher_range_len(hash_len + 2 * padding);
+ a_len = c_offs + c_len + padding;
+ digest_offs = c_offs + padding;
+ break;
+ }
+ break;
+ }
+ cipher_range->offset = c_offs;
+ cipher_range->length = c_len;
+ auth_range->offset = a_offs;
+ auth_range->length = a_len;
+ *digest_offset = digest_offs;
+ return 0;
+}
+
+static void test_combo(const crypto_suite_t *suite,
+ range_overlap_t overlap,
+ hash_location_t location)
+{
+ int rc;
+
+ odp_packet_data_range_t cipher_range = {0, 0};
+ odp_packet_data_range_t auth_range = {0, 0};
+ uint32_t digest_offset = 0;
+ crypto_test_reference_t ref;
+
+ rc = make_byte_ranges(overlap,
+ location,
+ suite->auth_capa->digest_len,
+ &cipher_range,
+ &auth_range,
+ &digest_offset);
+ if (rc)
+ return;
+
+ rc = create_combined_ref(suite, &ref,
+ &cipher_range, &auth_range,
+ digest_offset);
+ if (rc)
+ return;
+
+ cipher_range.offset *= 8;
+ cipher_range.length *= 8;
+ auth_range.offset *= 8;
+ auth_range.length *= 8;
+
+ alg_test(ODP_CRYPTO_OP_ENCODE,
+ suite->order,
+ &ref,
+ cipher_range, auth_range,
+ digest_offset,
+ suite->cipher_capa->bit_mode,
+ suite->auth_capa->bit_mode);
+
+ alg_test(ODP_CRYPTO_OP_DECODE,
+ suite->order,
+ &ref,
+ cipher_range, auth_range,
+ digest_offset,
+ suite->cipher_capa->bit_mode,
+ suite->auth_capa->bit_mode);
+}
+
+/* Iterate and test different cipher/auth range and hash locations */
+static void test_combo_ranges(const crypto_suite_t *suite)
+{
+ if (!full_test && suite->auth_capa->digest_len % 4 != 0)
+ return;
+
+ for (int overlap = 0; overlap < NUM_RANGE_OVERLAPS; overlap++)
+ for (int location = 0; location < NUM_HASH_LOCATIONS; location++) {
+ if (suite->order == AUTH_CIPHERTEXT &&
+ (location == HASH_IN_CIPHER_RANGE_ONLY ||
+ location == HASH_IN_AUTH_AND_CIPHER_RANGE)) {
+ /*
+ * This combination ís not valid since
+ * the generated hash would overwrite some
+ * ciphertext, preventing decryption.
+ */
+ continue;
+ }
+ test_combo(suite, overlap, location);
+ }
+}
+
+/* Iterate and test all variants (key sizes etc) of an alg combo */
+static void test_combo_variants(odp_cipher_alg_t cipher, odp_auth_alg_t auth)
+{
+ int num, num_ciphers, num_auths;
+
+ /* ODP API says AES-GMAC can be combined with the null cipher only */
+ if (auth == ODP_AUTH_ALG_AES_GMAC &&
+ cipher != ODP_CIPHER_ALG_NULL)
+ return;
+
+ if (check_alg_support(cipher, auth) == ODP_TEST_INACTIVE)
+ return;
+
+ printf(" %s, %s\n",
+ cipher_alg_name(cipher),
+ auth_alg_name(auth));
+
+ num_ciphers = odp_crypto_cipher_capability(cipher, NULL, 0);
+ num_auths = odp_crypto_auth_capability(auth, NULL, 0);
+ CU_ASSERT_FATAL(num_ciphers > 0);
+ CU_ASSERT_FATAL(num_auths > 0);
+
+ odp_crypto_cipher_capability_t cipher_capa[num_ciphers];
+ odp_crypto_auth_capability_t auth_capa[num_auths];
+
+ num = odp_crypto_cipher_capability(cipher, cipher_capa, num_ciphers);
+ CU_ASSERT(num == num_ciphers);
+ num = odp_crypto_auth_capability(auth, auth_capa, num_auths);
+ CU_ASSERT(num == num_auths);
+
+ combo_warning_shown = 0;
+
+ for (int n = 0; n < num_ciphers; n++)
+ for (int i = 0; i < num_auths; i++) {
+ crypto_suite_t suite = {.cipher = cipher,
+ .auth = auth,
+ .cipher_capa = &cipher_capa[n],
+ .auth_capa = &auth_capa[i]};
+ suite.order = AUTH_PLAINTEXT;
+ test_combo_ranges(&suite);
+ suite.order = AUTH_CIPHERTEXT;
+ test_combo_ranges(&suite);
+ }
+}
+
+static void test_all_combinations(void)
+{
+ if (suite_context.partial_test) {
+ printf("skipped ");
+ return;
+ }
+
+ printf("\n");
+ for (size_t n = 0; n < ODPH_ARRAY_SIZE(cipher_algs); n++)
+ for (size_t i = 0; i < ODPH_ARRAY_SIZE(auth_algs); i++)
+ test_combo_variants(cipher_algs[n], auth_algs[i]);
+}
+
+static int check_alg_null(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_NULL);
+}
+
+static void crypto_test_enc_alg_null(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ null_reference,
+ ODPH_ARRAY_SIZE(null_reference));
+}
+
+static void crypto_test_dec_alg_null(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ null_reference,
+ ODPH_ARRAY_SIZE(null_reference));
+}
+
+static int check_alg_3des_cbc(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_3DES_CBC, ODP_AUTH_ALG_NULL);
+}
+
+static void crypto_test_enc_alg_3des_cbc(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ tdes_cbc_reference,
+ ODPH_ARRAY_SIZE(tdes_cbc_reference));
+}
+
+static void crypto_test_dec_alg_3des_cbc(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ tdes_cbc_reference,
+ ODPH_ARRAY_SIZE(tdes_cbc_reference));
+}
+
+static int check_alg_3des_ecb(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_3DES_ECB, ODP_AUTH_ALG_NULL);
+}
+
+static void crypto_test_enc_alg_3des_ecb(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ tdes_ecb_reference,
+ ODPH_ARRAY_SIZE(tdes_ecb_reference));
+}
+
+static void crypto_test_dec_alg_3des_ecb(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ tdes_ecb_reference,
+ ODPH_ARRAY_SIZE(tdes_ecb_reference));
+}
+
+static int check_alg_chacha20_poly1305(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_CHACHA20_POLY1305,
+ ODP_AUTH_ALG_CHACHA20_POLY1305);
+}
+
+static void crypto_test_enc_alg_chacha20_poly1305(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ chacha20_poly1305_reference,
+ ODPH_ARRAY_SIZE(chacha20_poly1305_reference));
+}
+
+static void crypto_test_dec_alg_chacha20_poly1305(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ chacha20_poly1305_reference,
+ ODPH_ARRAY_SIZE(chacha20_poly1305_reference));
+}
+
+static int check_alg_aes_gcm(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_AES_GCM, ODP_AUTH_ALG_AES_GCM);
+}
+
+static void crypto_test_enc_alg_aes_gcm(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ aes_gcm_reference,
+ ODPH_ARRAY_SIZE(aes_gcm_reference));
+}
+
+static void crypto_test_dec_alg_aes_gcm(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ aes_gcm_reference,
+ ODPH_ARRAY_SIZE(aes_gcm_reference));
+}
+
+static int check_alg_aes_ccm(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_AES_CCM, ODP_AUTH_ALG_AES_CCM);
+}
+
+static void crypto_test_enc_alg_aes_ccm(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ aes_ccm_reference,
+ ODPH_ARRAY_SIZE(aes_ccm_reference));
+}
+
+static void crypto_test_dec_alg_aes_ccm(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ aes_ccm_reference,
+ ODPH_ARRAY_SIZE(aes_ccm_reference));
+}
+
+static int check_alg_aes_cbc(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_AES_CBC, ODP_AUTH_ALG_NULL);
+}
+
+static void crypto_test_enc_alg_aes_cbc(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ aes_cbc_reference,
+ ODPH_ARRAY_SIZE(aes_cbc_reference));
+}
+
+static void crypto_test_dec_alg_aes_cbc(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ aes_cbc_reference,
+ ODPH_ARRAY_SIZE(aes_cbc_reference));
+}
+
+static int check_alg_aes_ctr(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_AES_CTR, ODP_AUTH_ALG_NULL);
+}
+
+static void crypto_test_enc_alg_aes_ctr(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ aes_ctr_reference,
+ ODPH_ARRAY_SIZE(aes_ctr_reference));
+}
+
+static void crypto_test_dec_alg_aes_ctr(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ aes_ctr_reference,
+ ODPH_ARRAY_SIZE(aes_ctr_reference));
+}
+
+static int check_alg_aes_ecb(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_AES_ECB, ODP_AUTH_ALG_NULL);
+}
+
+static void crypto_test_enc_alg_aes_ecb(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ aes_ecb_reference,
+ ODPH_ARRAY_SIZE(aes_ecb_reference));
+}
+
+static void crypto_test_dec_alg_aes_ecb(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ aes_ecb_reference,
+ ODPH_ARRAY_SIZE(aes_ecb_reference));
+}
+
+static int check_alg_aes_cfb128(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_AES_CFB128, ODP_AUTH_ALG_NULL);
+}
+
+static void crypto_test_enc_alg_aes_cfb128(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ aes_cfb128_reference,
+ ODPH_ARRAY_SIZE(aes_cfb128_reference));
+}
+
+static void crypto_test_dec_alg_aes_cfb128(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ aes_cfb128_reference,
+ ODPH_ARRAY_SIZE(aes_cfb128_reference));
+}
+
+static int check_alg_aes_xts(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_AES_XTS, ODP_AUTH_ALG_NULL);
+}
+
+static void crypto_test_enc_alg_aes_xts(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ aes_xts_reference,
+ ODPH_ARRAY_SIZE(aes_xts_reference));
+}
+
+static void crypto_test_dec_alg_aes_xts(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ aes_xts_reference,
+ ODPH_ARRAY_SIZE(aes_xts_reference));
+}
+
+static int check_alg_kasumi_f8(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_KASUMI_F8, ODP_AUTH_ALG_NULL);
+}
+
+static void crypto_test_enc_alg_kasumi_f8(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ kasumi_f8_reference,
+ ODPH_ARRAY_SIZE(kasumi_f8_reference));
+}
+
+static void crypto_test_dec_alg_kasumi_f8(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ kasumi_f8_reference,
+ ODPH_ARRAY_SIZE(kasumi_f8_reference));
+}
+
+static int check_alg_snow3g_uea2(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_SNOW3G_UEA2, ODP_AUTH_ALG_NULL);
+}
+
+static void crypto_test_enc_alg_snow3g_uea2(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ snow3g_uea2_reference,
+ ODPH_ARRAY_SIZE(snow3g_uea2_reference));
+}
+
+static void crypto_test_dec_alg_snow3g_uea2(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ snow3g_uea2_reference,
+ ODPH_ARRAY_SIZE(snow3g_uea2_reference));
+}
+
+static int check_alg_aes_eea2(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_AES_EEA2,
+ ODP_AUTH_ALG_NULL);
+}
+
+static void crypto_test_enc_alg_aes_eea2(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ aes_eea2_reference,
+ ODPH_ARRAY_SIZE(aes_eea2_reference));
+}
+
+static void crypto_test_dec_alg_aes_eea2(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ aes_eea2_reference,
+ ODPH_ARRAY_SIZE(aes_eea2_reference));
+}
+
+static int check_alg_zuc_eea3(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_ZUC_EEA3, ODP_AUTH_ALG_NULL);
+}
+
+static void crypto_test_enc_alg_zuc_eea3(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ zuc_eea3_reference,
+ ODPH_ARRAY_SIZE(zuc_eea3_reference));
+}
+
+static void crypto_test_dec_alg_zuc_eea3(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ zuc_eea3_reference,
+ ODPH_ARRAY_SIZE(zuc_eea3_reference));
+}
+
+static int check_alg_hmac_md5(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_MD5_HMAC);
+}
+
+static void crypto_test_gen_alg_hmac_md5(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ hmac_md5_reference,
+ ODPH_ARRAY_SIZE(hmac_md5_reference));
+}
+
+static void crypto_test_check_alg_hmac_md5(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ hmac_md5_reference,
+ ODPH_ARRAY_SIZE(hmac_md5_reference));
+}
+
+static int check_alg_hmac_sha1(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_SHA1_HMAC);
+}
+
+static void crypto_test_gen_alg_hmac_sha1(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ hmac_sha1_reference,
+ ODPH_ARRAY_SIZE(hmac_sha1_reference));
+}
+
+static void crypto_test_check_alg_hmac_sha1(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ hmac_sha1_reference,
+ ODPH_ARRAY_SIZE(hmac_sha1_reference));
+}
+
+static int check_alg_hmac_sha224(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_SHA224_HMAC);
+}
+
+static void crypto_test_gen_alg_hmac_sha224(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ hmac_sha224_reference,
+ ODPH_ARRAY_SIZE(hmac_sha224_reference));
+}
+
+static void crypto_test_check_alg_hmac_sha224(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ hmac_sha224_reference,
+ ODPH_ARRAY_SIZE(hmac_sha224_reference));
+}
+
+static int check_alg_hmac_sha256(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_SHA256_HMAC);
+}
+
+static void crypto_test_gen_alg_hmac_sha256(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ hmac_sha256_reference,
+ ODPH_ARRAY_SIZE(hmac_sha256_reference));
+}
+
+static void crypto_test_check_alg_hmac_sha256(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ hmac_sha256_reference,
+ ODPH_ARRAY_SIZE(hmac_sha256_reference));
+}
+
+static int check_alg_hmac_sha384(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_SHA384_HMAC);
+}
+
+static void crypto_test_gen_alg_hmac_sha384(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ hmac_sha384_reference,
+ ODPH_ARRAY_SIZE(hmac_sha384_reference));
+}
+
+static void crypto_test_check_alg_hmac_sha384(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ hmac_sha384_reference,
+ ODPH_ARRAY_SIZE(hmac_sha384_reference));
+}
+
+static int check_alg_hmac_sha512(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_SHA512_HMAC);
+}
+
+static void crypto_test_gen_alg_hmac_sha512(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ hmac_sha512_reference,
+ ODPH_ARRAY_SIZE(hmac_sha512_reference));
+}
+
+static void crypto_test_check_alg_hmac_sha512(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ hmac_sha512_reference,
+ ODPH_ARRAY_SIZE(hmac_sha512_reference));
+}
+
+static int check_alg_aes_xcbc(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL,
+ ODP_AUTH_ALG_AES_XCBC_MAC);
+}
+
+static void crypto_test_gen_alg_aes_xcbc(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ aes_xcbc_reference,
+ ODPH_ARRAY_SIZE(aes_xcbc_reference));
+}
+
+static void crypto_test_check_alg_aes_xcbc(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ aes_xcbc_reference,
+ ODPH_ARRAY_SIZE(aes_xcbc_reference));
+}
+
+static int check_alg_aes_gmac(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_AES_GMAC);
+}
+
+static void crypto_test_gen_alg_aes_gmac(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ aes_gmac_reference,
+ ODPH_ARRAY_SIZE(aes_gmac_reference));
+}
+
+static void crypto_test_check_alg_aes_gmac(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ aes_gmac_reference,
+ ODPH_ARRAY_SIZE(aes_gmac_reference));
+}
+
+static int check_alg_aes_cmac(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_AES_CMAC);
+}
+
+static void crypto_test_gen_alg_aes_cmac(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ aes_cmac_reference,
+ ODPH_ARRAY_SIZE(aes_cmac_reference));
+}
+
+static void crypto_test_check_alg_aes_cmac(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ aes_cmac_reference,
+ ODPH_ARRAY_SIZE(aes_cmac_reference));
+}
+
+static int check_alg_kasumi_f9(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_KASUMI_F9);
+}
+
+static void crypto_test_gen_alg_kasumi_f9(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ kasumi_f9_reference,
+ ODPH_ARRAY_SIZE(kasumi_f9_reference));
+}
+
+static void crypto_test_check_alg_kasumi_f9(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ kasumi_f9_reference,
+ ODPH_ARRAY_SIZE(kasumi_f9_reference));
+}
+
+static int check_alg_snow3g_uia2(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_SNOW3G_UIA2);
+}
+
+static void crypto_test_gen_alg_snow3g_uia2(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ snow3g_uia2_reference,
+ ODPH_ARRAY_SIZE(snow3g_uia2_reference));
+}
+
+static void crypto_test_check_alg_snow3g_uia2(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ snow3g_uia2_reference,
+ ODPH_ARRAY_SIZE(snow3g_uia2_reference));
+}
+
+static int check_alg_aes_eia2(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL,
+ ODP_AUTH_ALG_AES_EIA2);
+}
+
+static void crypto_test_gen_alg_aes_eia2(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ aes_eia2_reference,
+ ODPH_ARRAY_SIZE(aes_eia2_reference));
+}
+
+static void crypto_test_check_alg_aes_eia2(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ aes_eia2_reference,
+ ODPH_ARRAY_SIZE(aes_eia2_reference));
+}
+
+static int check_alg_zuc_eia3(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_ZUC_EIA3);
+}
+
+static void crypto_test_gen_alg_zuc_eia3(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ zuc_eia3_reference,
+ ODPH_ARRAY_SIZE(zuc_eia3_reference));
+}
+
+static void crypto_test_check_alg_zuc_eia3(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ zuc_eia3_reference,
+ ODPH_ARRAY_SIZE(zuc_eia3_reference));
+}
+
+static int check_alg_md5(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_MD5);
+}
+
+static void crypto_test_gen_alg_md5(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ md5_reference,
+ ODPH_ARRAY_SIZE(md5_reference));
+}
+
+static void crypto_test_check_alg_md5(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ md5_reference,
+ ODPH_ARRAY_SIZE(md5_reference));
+}
+
+static int check_alg_sha1(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_SHA1);
+}
+
+static void crypto_test_gen_alg_sha1(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ sha1_reference,
+ ODPH_ARRAY_SIZE(sha1_reference));
+}
+
+static void crypto_test_check_alg_sha1(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ sha1_reference,
+ ODPH_ARRAY_SIZE(sha1_reference));
+}
+
+static int check_alg_sha224(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_SHA224);
+}
+
+static void crypto_test_gen_alg_sha224(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ sha224_reference,
+ ODPH_ARRAY_SIZE(sha224_reference));
+}
+
+static void crypto_test_check_alg_sha224(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ sha224_reference,
+ ODPH_ARRAY_SIZE(sha224_reference));
+}
+
+static int check_alg_sha256(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_SHA256);
+}
+
+static void crypto_test_gen_alg_sha256(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ sha256_reference,
+ ODPH_ARRAY_SIZE(sha256_reference));
+}
+
+static void crypto_test_check_alg_sha256(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ sha256_reference,
+ ODPH_ARRAY_SIZE(sha256_reference));
+}
+
+static int check_alg_sha384(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_SHA384);
+}
+
+static void crypto_test_gen_alg_sha384(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ sha384_reference,
+ ODPH_ARRAY_SIZE(sha384_reference));
+}
+
+static void crypto_test_check_alg_sha384(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ sha384_reference,
+ ODPH_ARRAY_SIZE(sha384_reference));
+}
+
+static int check_alg_sha512(void)
+{
+ return check_alg_support(ODP_CIPHER_ALG_NULL, ODP_AUTH_ALG_SHA512);
+}
+
+static void crypto_test_gen_alg_sha512(void)
+{
+ check_alg(ODP_CRYPTO_OP_ENCODE,
+ sha512_reference,
+ ODPH_ARRAY_SIZE(sha512_reference));
+}
+
+static void crypto_test_check_alg_sha512(void)
+{
+ check_alg(ODP_CRYPTO_OP_DECODE,
+ sha512_reference,
+ ODPH_ARRAY_SIZE(sha512_reference));
+}
+
+static odp_queue_t sched_compl_queue_create(void)
+{
+ odp_queue_param_t qparam;
+
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.prio = odp_schedule_default_prio();
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qparam.sched.group = ODP_SCHED_GROUP_ALL;
+
+ return odp_queue_create("crypto-out", &qparam);
+}
+
+static odp_queue_t plain_compl_queue_create(void)
+{
+ return odp_queue_create("crypto-out", NULL);
+}
+
+static odp_event_t sched_compl_queue_deq(void)
+{
+ return odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+}
+
+static odp_event_t plain_compl_queue_deq(void)
+{
+ return odp_queue_deq(suite_context.queue);
+}
+
+static int partial_test_only(odp_crypto_op_mode_t op_mode, odp_queue_type_t q_type)
+{
+ odp_crypto_capability_t capa;
+
+ if (full_test || odp_crypto_capability(&capa))
+ return 0;
+
+ if (!capa.async_mode)
+ return 0;
+
+ if (op_mode == ODP_CRYPTO_SYNC)
+ return 1;
+ else if (q_type == ODP_QUEUE_TYPE_PLAIN && capa.queue_type_sched)
+ return 1;
+
+ return 0;
+}
+
+static int crypto_suite_packet_sync_init(void)
+{
+ suite_context.op_mode = ODP_CRYPTO_SYNC;
+
+ suite_context.pool = odp_pool_lookup("packet_pool");
+ if (suite_context.pool == ODP_POOL_INVALID)
+ return -1;
+
+ suite_context.queue = ODP_QUEUE_INVALID;
+ suite_context.partial_test = partial_test_only(suite_context.op_mode,
+ ODP_QUEUE_TYPE_PLAIN);
+ return 0;
+}
+
+static int crypto_suite_packet_async_plain_init(void)
+{
+ odp_queue_t out_queue;
+
+ suite_context.op_mode = ODP_CRYPTO_ASYNC;
+
+ suite_context.pool = odp_pool_lookup("packet_pool");
+ if (suite_context.pool == ODP_POOL_INVALID)
+ return -1;
+
+ out_queue = plain_compl_queue_create();
+ if (ODP_QUEUE_INVALID == out_queue) {
+ ODPH_ERR("Crypto outq creation failed\n");
+ return -1;
+ }
+ suite_context.queue = out_queue;
+ suite_context.q_type = ODP_QUEUE_TYPE_PLAIN;
+ suite_context.compl_queue_deq = plain_compl_queue_deq;
+ suite_context.partial_test = partial_test_only(suite_context.op_mode,
+ suite_context.q_type);
+
+ return 0;
+}
+
+static int crypto_suite_packet_async_sched_init(void)
+{
+ odp_queue_t out_queue;
+
+ suite_context.op_mode = ODP_CRYPTO_ASYNC;
+
+ suite_context.pool = odp_pool_lookup("packet_pool");
+ if (suite_context.pool == ODP_POOL_INVALID)
+ return -1;
+
+ out_queue = sched_compl_queue_create();
+ if (ODP_QUEUE_INVALID == out_queue) {
+ ODPH_ERR("Crypto outq creation failed\n");
+ return -1;
+ }
+ suite_context.queue = out_queue;
+ suite_context.q_type = ODP_QUEUE_TYPE_SCHED;
+ suite_context.compl_queue_deq = sched_compl_queue_deq;
+ suite_context.partial_test = partial_test_only(suite_context.op_mode,
+ suite_context.q_type);
+
+ return 0;
+}
+
+static int crypto_suite_term(void)
+{
+ if (ODP_QUEUE_INVALID != suite_context.queue) {
+ if (odp_queue_destroy(suite_context.queue))
+ ODPH_ERR("Crypto outq destroy failed\n");
+ } else {
+ ODPH_ERR("Crypto outq not found\n");
+ }
+
+ return odp_cunit_print_inactive();
+}
+
+odp_testinfo_t crypto_suite[] = {
+ ODP_TEST_INFO(test_capability),
+ ODP_TEST_INFO(test_default_values),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_null,
+ check_alg_null),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_null,
+ check_alg_null),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_3des_cbc,
+ check_alg_3des_cbc),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_3des_cbc,
+ check_alg_3des_cbc),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_3des_ecb,
+ check_alg_3des_ecb),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_3des_ecb,
+ check_alg_3des_ecb),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_aes_cbc,
+ check_alg_aes_cbc),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_aes_cbc,
+ check_alg_aes_cbc),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_aes_ctr,
+ check_alg_aes_ctr),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_aes_ctr,
+ check_alg_aes_ctr),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_aes_ecb,
+ check_alg_aes_ecb),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_aes_ecb,
+ check_alg_aes_ecb),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_aes_cfb128,
+ check_alg_aes_cfb128),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_aes_cfb128,
+ check_alg_aes_cfb128),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_aes_xts,
+ check_alg_aes_xts),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_aes_xts,
+ check_alg_aes_xts),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_kasumi_f8,
+ check_alg_kasumi_f8),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_kasumi_f8,
+ check_alg_kasumi_f8),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_snow3g_uea2,
+ check_alg_snow3g_uea2),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_snow3g_uea2,
+ check_alg_snow3g_uea2),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_aes_eea2,
+ check_alg_aes_eea2),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_aes_eea2,
+ check_alg_aes_eea2),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_zuc_eea3,
+ check_alg_zuc_eea3),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_zuc_eea3,
+ check_alg_zuc_eea3),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_aes_gcm,
+ check_alg_aes_gcm),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_aes_gcm,
+ check_alg_aes_gcm),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_aes_ccm,
+ check_alg_aes_ccm),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_aes_ccm,
+ check_alg_aes_ccm),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_enc_alg_chacha20_poly1305,
+ check_alg_chacha20_poly1305),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_dec_alg_chacha20_poly1305,
+ check_alg_chacha20_poly1305),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_hmac_md5,
+ check_alg_hmac_md5),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_hmac_md5,
+ check_alg_hmac_md5),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_hmac_sha1,
+ check_alg_hmac_sha1),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_hmac_sha1,
+ check_alg_hmac_sha1),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_hmac_sha224,
+ check_alg_hmac_sha224),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_hmac_sha224,
+ check_alg_hmac_sha224),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_hmac_sha256,
+ check_alg_hmac_sha256),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_hmac_sha256,
+ check_alg_hmac_sha256),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_hmac_sha384,
+ check_alg_hmac_sha384),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_hmac_sha384,
+ check_alg_hmac_sha384),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_hmac_sha512,
+ check_alg_hmac_sha512),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_hmac_sha512,
+ check_alg_hmac_sha512),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_aes_xcbc,
+ check_alg_aes_xcbc),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_aes_xcbc,
+ check_alg_aes_xcbc),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_aes_gmac,
+ check_alg_aes_gmac),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_aes_gmac,
+ check_alg_aes_gmac),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_aes_cmac,
+ check_alg_aes_cmac),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_aes_cmac,
+ check_alg_aes_cmac),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_kasumi_f9,
+ check_alg_kasumi_f9),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_kasumi_f9,
+ check_alg_kasumi_f9),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_snow3g_uia2,
+ check_alg_snow3g_uia2),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_snow3g_uia2,
+ check_alg_snow3g_uia2),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_aes_eia2,
+ check_alg_aes_eia2),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_aes_eia2,
+ check_alg_aes_eia2),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_zuc_eia3,
+ check_alg_zuc_eia3),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_zuc_eia3,
+ check_alg_zuc_eia3),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_md5,
+ check_alg_md5),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_md5,
+ check_alg_md5),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_sha1,
+ check_alg_sha1),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_sha1,
+ check_alg_sha1),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_sha224,
+ check_alg_sha224),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_sha224,
+ check_alg_sha224),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_sha256,
+ check_alg_sha256),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_sha256,
+ check_alg_sha256),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_sha384,
+ check_alg_sha384),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_sha384,
+ check_alg_sha384),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_gen_alg_sha512,
+ check_alg_sha512),
+ ODP_TEST_INFO_CONDITIONAL(crypto_test_check_alg_sha512,
+ check_alg_sha512),
+ ODP_TEST_INFO(test_auth_hashes_in_auth_range),
+ ODP_TEST_INFO(test_all_combinations),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t crypto_suites[] = {
+ {"odp_crypto_packet_sync_inp", crypto_suite_packet_sync_init,
+ NULL, crypto_suite},
+ {"odp_crypto_packet_async_plain_inp",
+ crypto_suite_packet_async_plain_init,
+ crypto_suite_term, crypto_suite},
+ {"odp_crypto_packet_async_sched_inp",
+ crypto_suite_packet_async_sched_init,
+ crypto_suite_term, crypto_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+static int crypto_init(odp_instance_t *inst)
+{
+ odp_pool_param_t params;
+ odp_pool_t pool;
+ odp_pool_capability_t pool_capa;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("odph_options() failed\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
+ ODPH_ERR("odp_init_global() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("odp_init_local() failed\n");
+ return -1;
+ }
+
+ /* Configure the scheduler. */
+ if (odp_schedule_config(NULL)) {
+ ODPH_ERR("odp_schedule_config() failed\n");
+ return -1;
+ }
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ ODPH_ERR("odp_pool_capability() failed\n");
+ return -1;
+ }
+
+ odp_pool_param_init(&params);
+ params.pkt.seg_len = PKT_POOL_LEN;
+ params.pkt.len = PKT_POOL_LEN;
+ params.pkt.num = PKT_POOL_NUM;
+ params.type = ODP_POOL_PACKET;
+
+ /*
+ * Let's have a user area so that we can check that its
+ * content gets copied along with other metadata when needed.
+ */
+ if (pool_capa.pkt.max_uarea_size >= UAREA_SIZE)
+ params.pkt.uarea_size = UAREA_SIZE;
+ else
+ printf("Warning: could not request packet user area\n");
+
+ if (pool_capa.pkt.max_seg_len &&
+ PKT_POOL_LEN > pool_capa.pkt.max_seg_len) {
+ ODPH_ERR("Warning: small packet segment length\n");
+ params.pkt.seg_len = pool_capa.pkt.max_seg_len;
+ }
+
+ if (pool_capa.pkt.max_len &&
+ PKT_POOL_LEN > pool_capa.pkt.max_len) {
+ ODPH_ERR("Pool max packet length too small\n");
+ return -1;
+ }
+
+ pool = odp_pool_create("packet_pool", &params);
+
+ if (ODP_POOL_INVALID == pool) {
+ ODPH_ERR("Packet pool creation failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int crypto_term(odp_instance_t inst)
+{
+ odp_pool_t pool;
+
+ pool = odp_pool_lookup("packet_pool");
+ if (ODP_POOL_INVALID != pool) {
+ if (odp_pool_destroy(pool))
+ ODPH_ERR("Packet pool destroy failed\n");
+ } else {
+ ODPH_ERR("Packet pool not found\n");
+ }
+
+ if (0 != odp_term_local()) {
+ ODPH_ERR("odp_term_local() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ ODPH_ERR("odp_term_global() failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ int ret;
+ char *env = getenv("FULL_TEST");
+
+ if (env && strcmp(env, "0"))
+ full_test = 1;
+ printf("Test mode: %s\n", full_test ? "full" : "partial");
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ odp_cunit_register_global_init(crypto_init);
+ odp_cunit_register_global_term(crypto_term);
+
+ ret = odp_cunit_register(crypto_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/api/crypto/test_vector_defs.h b/test/validation/api/crypto/test_vector_defs.h
new file mode 100644
index 000000000..46ae4e4e1
--- /dev/null
+++ b/test/validation/api/crypto/test_vector_defs.h
@@ -0,0 +1,3167 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEST_VECTOR_DEFS_H
+#define TEST_VECTOR_DEFS_H
+
+#include <odp_api.h>
+#include "test_vectors.h"
+
+ODP_STATIC_ASSERT(ODP_CIPHER_ALG_NULL == 0, "null cipher is not the default");
+ODP_STATIC_ASSERT(ODP_AUTH_ALG_NULL == 0, "null auth is not the default");
+
+static crypto_test_reference_t null_reference[] = {
+ {
+ .length = 8,
+ .plaintext = { 0x32, 0x6a, 0x49, 0x4c, 0xd3, 0x3f, 0xe7, 0x56 },
+ .ciphertext = { 0x32, 0x6a, 0x49, 0x4c, 0xd3, 0x3f, 0xe7, 0x56 }
+ },
+ {
+ .length = 16,
+ .plaintext = { 0x84, 0x40, 0x1f, 0x78, 0xfe, 0x6c, 0x10, 0x87,
+ 0x6d, 0x8e, 0xa2, 0x30, 0x94, 0xea, 0x53, 0x09 },
+ .ciphertext = { 0x84, 0x40, 0x1f, 0x78, 0xfe, 0x6c, 0x10, 0x87,
+ 0x6d, 0x8e, 0xa2, 0x30, 0x94, 0xea, 0x53, 0x09 }
+ }
+};
+
+/* TDES-CBC reference vectors, according to
+ * "http://csrc.nist.gov/groups/STM/cavp/documents/des/DESMMT.pdf"
+ */
+static crypto_test_reference_t tdes_cbc_reference[] = {
+ {
+ .cipher = ODP_CIPHER_ALG_3DES_CBC,
+ .cipher_key_length = TDES_CBC_KEY_LEN,
+ .cipher_key = { 0x62, 0x7f, 0x46, 0x0e, 0x08, 0x10, 0x4a, 0x10,
+ 0x43, 0xcd, 0x26, 0x5d, 0x58, 0x40, 0xea, 0xf1,
+ 0x31, 0x3e, 0xdf, 0x97, 0xdf, 0x2a, 0x8a, 0x8c},
+ .cipher_iv_length = TDES_CBC_IV_LEN,
+ .cipher_iv = { 0x8e, 0x29, 0xf7, 0x5e, 0xa7, 0x7e, 0x54, 0x75 },
+ .length = 8,
+ .plaintext = { 0x32, 0x6a, 0x49, 0x4c, 0xd3, 0x3f, 0xe7, 0x56 },
+ .ciphertext = { 0xb2, 0x2b, 0x8d, 0x66, 0xde, 0x97, 0x06, 0x92 }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_3DES_CBC,
+ .cipher_key_length = TDES_CBC_KEY_LEN,
+ .cipher_key = { 0x37, 0xae, 0x5e, 0xbf, 0x46, 0xdf, 0xf2, 0xdc,
+ 0x07, 0x54, 0xb9, 0x4f, 0x31, 0xcb, 0xb3, 0x85,
+ 0x5e, 0x7f, 0xd3, 0x6d, 0xc8, 0x70, 0xbf, 0xae},
+ .cipher_iv_length = TDES_CBC_IV_LEN,
+ .cipher_iv = {0x3d, 0x1d, 0xe3, 0xcc, 0x13, 0x2e, 0x3b, 0x65 },
+ .length = 16,
+ .plaintext = { 0x84, 0x40, 0x1f, 0x78, 0xfe, 0x6c, 0x10, 0x87,
+ 0x6d, 0x8e, 0xa2, 0x30, 0x94, 0xea, 0x53, 0x09 },
+ .ciphertext = { 0x7b, 0x1f, 0x7c, 0x7e, 0x3b, 0x1c, 0x94, 0x8e,
+ 0xbd, 0x04, 0xa7, 0x5f, 0xfb, 0xa7, 0xd2, 0xf5 }
+ }
+};
+
+/*
+ * TDES-ECB reference vectors, according to
+ * CAVS 18.0 TECBMMT
+ */
+static crypto_test_reference_t tdes_ecb_reference[] = {
+ /* CAVS 18.0 TECBMMT2.rsp #0 */
+ {
+ .cipher = ODP_CIPHER_ALG_3DES_ECB,
+ .cipher_key_length = TDES_ECB_KEY_LEN,
+ .cipher_key = { 0x15, 0x1f, 0x10, 0x38, 0x3d, 0x6d, 0x19, 0x9b,
+ 0x4a, 0x76, 0x3b, 0xd5, 0x4a, 0x46, 0xa4, 0x45,
+ 0x15, 0x1f, 0x10, 0x38, 0x3d, 0x6d, 0x19, 0x9b},
+ .length = 8,
+ .plaintext = { 0xd8, 0xda, 0x89, 0x29, 0x88, 0x78, 0xed, 0x7d },
+ .ciphertext = { 0x89, 0x32, 0x1b, 0xa7, 0x5b, 0xa5, 0x45, 0xdb }
+ },
+ /* CAVS 18.0 TECBMMT2.rsp #2 */
+ {
+ .cipher = ODP_CIPHER_ALG_3DES_ECB,
+ .cipher_key_length = TDES_ECB_KEY_LEN,
+ .cipher_key = { 0xcd, 0x3d, 0x9b, 0xf7, 0x2f, 0x8c, 0x8a, 0xb5,
+ 0xfe, 0xe6, 0x73, 0x34, 0x31, 0x1c, 0xa4, 0x62,
+ 0xcd, 0x3d, 0x9b, 0xf7, 0x2f, 0x8c, 0x8a, 0xb5},
+ .length = 24,
+ .plaintext = { 0x2f, 0x2a, 0x36, 0x1c, 0x8e, 0x14, 0x5d, 0xc0,
+ 0xa7, 0x4a, 0x1b, 0xdb, 0x7c, 0xa9, 0x29, 0xc3,
+ 0x38, 0x14, 0x4d, 0x89, 0x13, 0x5b, 0x50, 0xa7 },
+ .ciphertext = { 0x7f, 0x1f, 0xd3, 0x2b, 0x36, 0x90, 0x05, 0x4b,
+ 0xfa, 0x1b, 0x17, 0x35, 0x15, 0x79, 0x33, 0x80,
+ 0x99, 0xff, 0xa8, 0x4f, 0xea, 0x16, 0x8c, 0x6b }
+ }
+};
+
+static crypto_test_reference_t aes_cbc_reference[] = {
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CBC,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0x06, 0xa9, 0x21, 0x40, 0x36, 0xb8, 0xa1, 0x5b,
+ 0x51, 0x2e, 0x03, 0xd5, 0x34, 0x12, 0x00, 0x06},
+ .cipher_iv_length = AES_CBC_IV_LEN,
+ .cipher_iv = { 0x3d, 0xaf, 0xba, 0x42, 0x9d, 0x9e, 0xb4, 0x30,
+ 0xb4, 0x22, 0xda, 0x80, 0x2c, 0x9f, 0xac, 0x41 },
+ .length = 16,
+ .plaintext = "Single block msg",
+ .ciphertext = { 0xe3, 0x53, 0x77, 0x9c, 0x10, 0x79, 0xae, 0xb8,
+ 0x27, 0x08, 0x94, 0x2d, 0xbe, 0x77, 0x18, 0x1a }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CBC,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0xc2, 0x86, 0x69, 0x6d, 0x88, 0x7c, 0x9a, 0xa0,
+ 0x61, 0x1b, 0xbb, 0x3e, 0x20, 0x25, 0xa4, 0x5a},
+ .cipher_iv_length = AES_CBC_IV_LEN,
+ .cipher_iv = { 0x56, 0x2e, 0x17, 0x99, 0x6d, 0x09, 0x3d, 0x28,
+ 0xdd, 0xb3, 0xba, 0x69, 0x5a, 0x2e, 0x6f, 0x58 },
+ .length = 32,
+ .plaintext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
+ .ciphertext = { 0xd2, 0x96, 0xcd, 0x94, 0xc2, 0xcc, 0xcf, 0x8a,
+ 0x3a, 0x86, 0x30, 0x28, 0xb5, 0xe1, 0xdc, 0x0a,
+ 0x75, 0x86, 0x60, 0x2d, 0x25, 0x3c, 0xff, 0xf9,
+ 0x1b, 0x82, 0x66, 0xbe, 0xa6, 0xd6, 0x1a, 0xb1 }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CBC,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0x6c, 0x3e, 0xa0, 0x47, 0x76, 0x30, 0xce, 0x21,
+ 0xa2, 0xce, 0x33, 0x4a, 0xa7, 0x46, 0xc2, 0xcd},
+ .cipher_iv_length = AES_CBC_IV_LEN,
+ .cipher_iv = { 0xc7, 0x82, 0xdc, 0x4c, 0x09, 0x8c, 0x66, 0xcb,
+ 0xd9, 0xcd, 0x27, 0xd8, 0x25, 0x68, 0x2c, 0x81 },
+ .length = 48,
+ .plaintext = "This is a 48-byte message (exactly 3 AES blocks)",
+ .ciphertext = { 0xd0, 0xa0, 0x2b, 0x38, 0x36, 0x45, 0x17, 0x53,
+ 0xd4, 0x93, 0x66, 0x5d, 0x33, 0xf0, 0xe8, 0x86,
+ 0x2d, 0xea, 0x54, 0xcd, 0xb2, 0x93, 0xab, 0xc7,
+ 0x50, 0x69, 0x39, 0x27, 0x67, 0x72, 0xf8, 0xd5,
+ 0x02, 0x1c, 0x19, 0x21, 0x6b, 0xad, 0x52, 0x5c,
+ 0x85, 0x79, 0x69, 0x5d, 0x83, 0xba, 0x26, 0x84 }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CBC,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0x56, 0xe4, 0x7a, 0x38, 0xc5, 0x59, 0x89, 0x74,
+ 0xbc, 0x46, 0x90, 0x3d, 0xba, 0x29, 0x03, 0x49},
+ .cipher_iv_length = AES_CBC_IV_LEN,
+ .cipher_iv = { 0x8c, 0xe8, 0x2e, 0xef, 0xbe, 0xa0, 0xda, 0x3c,
+ 0x44, 0x69, 0x9e, 0xd7, 0xdb, 0x51, 0xb7, 0xd9 },
+ .length = 64,
+ .plaintext = { 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf },
+ .ciphertext = { 0xc3, 0x0e, 0x32, 0xff, 0xed, 0xc0, 0x77, 0x4e,
+ 0x6a, 0xff, 0x6a, 0xf0, 0x86, 0x9f, 0x71, 0xaa,
+ 0x0f, 0x3a, 0xf0, 0x7a, 0x9a, 0x31, 0xa9, 0xc6,
+ 0x84, 0xdb, 0x20, 0x7e, 0xb0, 0xef, 0x8e, 0x4e,
+ 0x35, 0x90, 0x7a, 0xa6, 0x32, 0xc3, 0xff, 0xdf,
+ 0x86, 0x8b, 0xb7, 0xb2, 0x9d, 0x3d, 0x46, 0xad,
+ 0x83, 0xce, 0x9f, 0x9a, 0x10, 0x2e, 0xe9, 0x9d,
+ 0x49, 0xa5, 0x3e, 0x87, 0xf4, 0xc3, 0xda, 0x55 }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CBC,
+ .cipher_key_length = AES192_KEY_LEN,
+ .cipher_key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c},
+ .cipher_iv_length = AES_CBC_IV_LEN,
+ .cipher_iv = { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88, 0x01, 0x23, 0x45, 0x67 },
+ .length = 32,
+ .plaintext = { 0x45, 0x00, 0x00, 0x28, 0xa4, 0xad, 0x40, 0x00,
+ 0x40, 0x06, 0x78, 0x80, 0x0a, 0x01, 0x03, 0x8f,
+ 0x0a, 0x01, 0x06, 0x12, 0x80, 0x23, 0x06, 0xb8,
+ 0xcb, 0x71, 0x26, 0x02, 0xdd, 0x6b, 0xb0, 0x3e },
+ .ciphertext = { 0x0d, 0xbe, 0x02, 0xda, 0x68, 0x9c, 0x8f, 0x30,
+ 0xce, 0x7c, 0x91, 0x7d, 0x41, 0x08, 0xf6, 0xf1,
+ 0x8e, 0x0d, 0x7f, 0x02, 0xb6, 0x80, 0x9a, 0x2d,
+ 0x53, 0x1c, 0xc6, 0x98, 0x85, 0xc3, 0x00, 0xe6},
+ },
+ /* NIST Special Publication 800-38A */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CBC,
+ .cipher_key_length = AES192_KEY_LEN,
+ .cipher_key = { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52,
+ 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
+ 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b},
+ .cipher_iv_length = AES_CBC_IV_LEN,
+ .cipher_iv = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+ .length = 64,
+ .plaintext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
+ .ciphertext = { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d,
+ 0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8,
+ 0xb4, 0xd9, 0xad, 0xa9, 0xad, 0x7d, 0xed, 0xf4,
+ 0xe5, 0xe7, 0x38, 0x76, 0x3f, 0x69, 0x14, 0x5a,
+ 0x57, 0x1b, 0x24, 0x20, 0x12, 0xfb, 0x7a, 0xe0,
+ 0x7f, 0xa9, 0xba, 0xac, 0x3d, 0xf1, 0x02, 0xe0,
+ 0x08, 0xb0, 0xe2, 0x79, 0x88, 0x59, 0x88, 0x81,
+ 0xd9, 0x20, 0xa9, 0xe6, 0x4f, 0x56, 0x15, 0xcd }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CBC,
+ .cipher_key_length = AES256_KEY_LEN,
+ .cipher_key = { 0xab, 0xbc, 0xcd, 0xde, 0xf0, 0x01, 0x12, 0x23,
+ 0x34, 0x45, 0x56, 0x67, 0x78, 0x89, 0x9a, 0xab,
+ 0xab, 0xbc, 0xcd, 0xde, 0xf0, 0x01, 0x12, 0x23,
+ 0x34, 0x45, 0x56, 0x67, 0x78, 0x89, 0x9a, 0xab},
+ .cipher_iv_length = AES_CBC_IV_LEN,
+ .cipher_iv = { 0x11, 0x22, 0x33, 0x44, 0x01, 0x02, 0x03, 0x04,
+ 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c },
+ .length = 48,
+ .plaintext = { 0x45, 0x00, 0x00, 0x30, 0x69, 0xa6, 0x40, 0x00,
+ 0x80, 0x06, 0x26, 0x90, 0xc0, 0xa8, 0x01, 0x02,
+ 0x93, 0x89, 0x15, 0x5e, 0x0a, 0x9e, 0x00, 0x8b,
+ 0x2d, 0xc5, 0x7e, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x02, 0x40, 0x00, 0x20, 0xbf, 0x00, 0x00,
+ 0x02, 0x04, 0x05, 0xb4, 0x01, 0x01, 0x04, 0x02 },
+ .ciphertext = { 0x92, 0x1e, 0x2f, 0x37, 0x36, 0x3c, 0x45, 0xda,
+ 0xc9, 0x58, 0xb7, 0x07, 0x06, 0x56, 0x54, 0xc5,
+ 0x93, 0x46, 0x90, 0xb8, 0xcf, 0x0d, 0x4f, 0x79,
+ 0xf1, 0x32, 0xc2, 0xf7, 0x23, 0xb8, 0x83, 0x09,
+ 0xbc, 0x37, 0x1c, 0xeb, 0x95, 0x2c, 0x42, 0x7b,
+ 0x39, 0x10, 0xa8, 0x76, 0xfa, 0xbe, 0x91, 0xe9},
+ },
+ /* NIST Special Publication 800-38A */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CBC,
+ .cipher_key_length = AES256_KEY_LEN,
+ .cipher_key = { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe,
+ 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
+ 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7,
+ 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4},
+ .cipher_iv_length = AES_CBC_IV_LEN,
+ .cipher_iv = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+ .length = 64,
+ .plaintext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
+ .ciphertext = { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba,
+ 0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6,
+ 0x9c, 0xfc, 0x4e, 0x96, 0x7e, 0xdb, 0x80, 0x8d,
+ 0x67, 0x9f, 0x77, 0x7b, 0xc6, 0x70, 0x2c, 0x7d,
+ 0x39, 0xf2, 0x33, 0x69, 0xa9, 0xd9, 0xba, 0xcf,
+ 0xa5, 0x30, 0xe2, 0x63, 0x04, 0x23, 0x14, 0x61,
+ 0xb2, 0xeb, 0x05, 0xe2, 0xc3, 0x9b, 0xe9, 0xfc,
+ 0xda, 0x6c, 0x19, 0x07, 0x8c, 0x6a, 0x9d, 0x1b }
+ }
+};
+
+static crypto_test_reference_t aes_ctr_reference[] = {
+ /* RFC3686 https://tools.ietf.org/html/rfc3686 */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CTR,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0x7E, 0x24, 0x06, 0x78, 0x17, 0xFA, 0xE0, 0xD7,
+ 0x43, 0xD6, 0xCE, 0x1F, 0x32, 0x53, 0x91, 0x63},
+ .cipher_iv_length = AES_CTR_IV_LEN,
+ .cipher_iv = { 0x00, 0x6C, 0xB6, 0xDB, 0xC0, 0x54, 0x3B, 0x59,
+ 0xDA, 0x48, 0xD9, 0x0B, 0x00, 0x00, 0x00, 0x01 },
+ .length = 32,
+ .plaintext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F },
+ .ciphertext = { 0x51, 0x04, 0xA1, 0x06, 0x16, 0x8A, 0x72, 0xD9,
+ 0x79, 0x0D, 0x41, 0xEE, 0x8E, 0xDA, 0xD3, 0x88,
+ 0xEB, 0x2E, 0x1E, 0xFC, 0x46, 0xDA, 0x57, 0xC8,
+ 0xFC, 0xE6, 0x30, 0xDF, 0x91, 0x41, 0xBE, 0x28}
+ },
+ /* NIST Special Publication 800-38A */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CTR,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c},
+ .cipher_iv_length = AES_CTR_IV_LEN,
+ .cipher_iv = { 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff },
+ .length = 64,
+ .plaintext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
+ .ciphertext = { 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26,
+ 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
+ 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
+ 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff, 0xfd, 0xff,
+ 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5, 0xd3, 0x5e,
+ 0x5b, 0x4f, 0x09, 0x02, 0x0d, 0xb0, 0x3e, 0xab,
+ 0x1e, 0x03, 0x1d, 0xda, 0x2f, 0xbe, 0x03, 0xd1,
+ 0x79, 0x21, 0x70, 0xa0, 0xf3, 0x00, 0x9c, 0xee }
+ },
+ /* Generated by Crypto++ 5.6.1 (715 bytes data)*/
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CTR,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c},
+ .cipher_iv_length = AES_CTR_IV_LEN,
+ .cipher_iv = { 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff },
+ .length = 715,
+ .plaintext = { 0x00, 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f,
+ 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17,
+ 0x2a, 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac,
+ 0x9c, 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e,
+ 0x51, 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4,
+ 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52,
+ 0xef, 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b,
+ 0x17, 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37,
+ 0x10, 0x00, 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40,
+ 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93,
+ 0x17, 0x2a, 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03,
+ 0xac, 0x9c, 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf,
+ 0x8e, 0x51, 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c,
+ 0xe4, 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a,
+ 0x52, 0xef, 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f,
+ 0x9b, 0x17, 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c,
+ 0x37, 0x10, 0x00, 0x6b, 0xc1, 0xbe, 0xe2, 0x2e,
+ 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73,
+ 0x93, 0x17, 0x2a, 0xae, 0x2d, 0x8a, 0x57, 0x1e,
+ 0x03, 0xac, 0x9c, 0x9e, 0xb7, 0x6f, 0xac, 0x45,
+ 0xaf, 0x8e, 0x51, 0x30, 0xc8, 0x1c, 0x46, 0xa3,
+ 0x5c, 0xe4, 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x1a,
+ 0x0a, 0x52, 0xef, 0xf6, 0x9f, 0x24, 0x45, 0xdf,
+ 0x4f, 0x9b, 0x17, 0xad, 0x2b, 0x41, 0x7b, 0xe6,
+ 0x6c, 0x37, 0x10, 0x00, 0x6b, 0xc1, 0xbe, 0xe2,
+ 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11,
+ 0x73, 0x93, 0x17, 0x2a, 0xae, 0x2d, 0x8a, 0x57,
+ 0x1e, 0x03, 0xac, 0x9c, 0x9e, 0xb7, 0x6f, 0xac,
+ 0x45, 0xaf, 0x8e, 0x51, 0x30, 0xc8, 0x1c, 0x46,
+ 0xa3, 0x5c, 0xe4, 0x11, 0xe5, 0xfb, 0xc1, 0x19,
+ 0x1a, 0x0a, 0x52, 0xef, 0xf6, 0x9f, 0x24, 0x45,
+ 0xdf, 0x4f, 0x9b, 0x17, 0xad, 0x2b, 0x41, 0x7b,
+ 0xe6, 0x6c, 0x37, 0x10, 0x00, 0x6b, 0xc1, 0xbe,
+ 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e,
+ 0x11, 0x73, 0x93, 0x17, 0x2a, 0xae, 0x2d, 0x8a,
+ 0x57, 0x1e, 0x03, 0xac, 0x9c, 0x9e, 0xb7, 0x6f,
+ 0xac, 0x45, 0xaf, 0x8e, 0x51, 0x30, 0xc8, 0x1c,
+ 0x46, 0xa3, 0x5c, 0xe4, 0x11, 0xe5, 0xfb, 0xc1,
+ 0x19, 0x1a, 0x0a, 0x52, 0xef, 0xf6, 0x9f, 0x24,
+ 0x45, 0xdf, 0x4f, 0x9b, 0x17, 0xad, 0x2b, 0x41,
+ 0x7b, 0xe6, 0x6c, 0x37, 0x10, 0x00, 0x6b, 0xc1,
+ 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d,
+ 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a, 0xae, 0x2d,
+ 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, 0x9e, 0xb7,
+ 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51, 0x30, 0xc8,
+ 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11, 0xe5, 0xfb,
+ 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef, 0xf6, 0x9f,
+ 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17, 0xad, 0x2b,
+ 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10, 0x00, 0x6b,
+ 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9,
+ 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a, 0xae,
+ 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, 0x9e,
+ 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51, 0x30,
+ 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11, 0xe5,
+ 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef, 0xf6,
+ 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17, 0xad,
+ 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10, 0x00,
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10,
+ 0x00, 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f,
+ 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17,
+ 0x2a, 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac,
+ 0x9c, 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e,
+ 0x51, 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4,
+ 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52,
+ 0xef, 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b,
+ 0x17, 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37,
+ 0x10, 0x00, 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40,
+ 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93,
+ 0x17, 0x2a, 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03,
+ 0xac, 0x9c, 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf,
+ 0x8e, 0x51, 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c,
+ 0xe4, 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a,
+ 0x52, 0xef, 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f,
+ 0x9b, 0x17, 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c,
+ 0x37, 0x10, 0x00, 0x6b, 0xc1, 0xbe, 0xe2, 0x2e,
+ 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73,
+ 0x93, 0x17, 0x2a, 0xae, 0x2d, 0x8a, 0x57, 0x1e,
+ 0x03, 0xac, 0x9c, 0x9e, 0xb7, 0x6f, 0xac, 0x45,
+ 0xaf, 0x8e, 0x51, 0x30, 0xc8, 0x1c, 0x46, 0xa3,
+ 0x5c, 0xe4, 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x1a,
+ 0x0a, 0x52, 0xef, 0xf6, 0x9f, 0x24, 0x45, 0xdf,
+ 0x4f, 0x9b, 0x17, 0xad, 0x2b, 0x41, 0x7b, 0xe6,
+ 0x6c, 0x37, 0x10 },
+ .ciphertext = { 0xEC, 0xE7, 0x1E, 0xCD, 0x7A, 0x4E, 0x3C, 0x2F,
+ 0x64, 0x3B, 0x2B, 0x0B, 0xFB, 0xED, 0x32, 0xF3,
+ 0x1C, 0x85, 0x51, 0xB6, 0x30, 0x6D, 0x52, 0xCF,
+ 0x84, 0x3E, 0xC0, 0xB8, 0x50, 0x15, 0xDC, 0x20,
+ 0x3B, 0x1C, 0x0B, 0x64, 0x3E, 0x2A, 0x6B, 0xAB,
+ 0xAF, 0x51, 0x33, 0xDA, 0x0E, 0xA0, 0x66, 0x16,
+ 0x07, 0x6A, 0xA6, 0xBB, 0xB5, 0x2E, 0xD7, 0x5D,
+ 0xC3, 0xA7, 0x1A, 0x9A, 0x6E, 0x8A, 0xC7, 0xC9,
+ 0xA0, 0x0D, 0x2C, 0x39, 0xAA, 0x68, 0xBF, 0x4E,
+ 0x6F, 0xFE, 0xD9, 0xAA, 0xEE, 0x5A, 0xD6, 0x91,
+ 0x4F, 0xB3, 0xEA, 0x77, 0xC7, 0xB6, 0x1F, 0xF6,
+ 0xBF, 0x56, 0x4F, 0x2F, 0x12, 0x25, 0xAC, 0xB4,
+ 0xB5, 0x88, 0x9C, 0xB1, 0x55, 0x98, 0x88, 0xA5,
+ 0x81, 0x78, 0x49, 0xC3, 0x82, 0xE1, 0x68, 0x48,
+ 0x2F, 0x75, 0x38, 0x1F, 0x63, 0x86, 0x8C, 0x46,
+ 0x8E, 0x4D, 0x15, 0x83, 0xB1, 0xFE, 0x71, 0xDD,
+ 0x80, 0x8C, 0xB9, 0x4D, 0x81, 0x50, 0xAA, 0xB9,
+ 0xD5, 0x30, 0xA0, 0xFC, 0x17, 0xCD, 0xE7, 0x48,
+ 0xE9, 0x55, 0x45, 0xD8, 0xA0, 0x33, 0xB2, 0xF6,
+ 0x1F, 0x19, 0x54, 0xD0, 0xC0, 0x22, 0x61, 0x68,
+ 0x02, 0x2E, 0x1C, 0xD7, 0xE0, 0x31, 0xC5, 0x7D,
+ 0x04, 0x8A, 0xC5, 0x60, 0xF1, 0x52, 0x96, 0x0F,
+ 0x47, 0x70, 0x5E, 0x17, 0x4D, 0x95, 0x6D, 0x4B,
+ 0xB5, 0x3A, 0xE8, 0x0B, 0xFF, 0xCD, 0x1B, 0xD5,
+ 0x69, 0xED, 0x8E, 0xFF, 0xA2, 0x23, 0xC0, 0x05,
+ 0x58, 0xB7, 0x02, 0x40, 0x5F, 0x33, 0xE6, 0xE0,
+ 0xED, 0xB2, 0xD9, 0xB0, 0xC1, 0x48, 0xA1, 0x44,
+ 0x1C, 0xC8, 0x0D, 0x6A, 0xBB, 0xCE, 0x78, 0x5A,
+ 0xA1, 0xB9, 0xDA, 0xB7, 0xCB, 0x88, 0x32, 0xF1,
+ 0xB1, 0x2D, 0x2E, 0xE6, 0x0E, 0xE2, 0xDF, 0xCA,
+ 0x37, 0x94, 0x2C, 0xA1, 0x72, 0x4E, 0x56, 0x02,
+ 0xB7, 0xB7, 0x05, 0x25, 0xAC, 0x96, 0x62, 0x02,
+ 0x8A, 0x22, 0xDB, 0x23, 0x46, 0x76, 0x61, 0x5D,
+ 0xB4, 0x74, 0x53, 0x8C, 0xBC, 0x8D, 0x19, 0x7F,
+ 0x38, 0xC8, 0x8B, 0xCC, 0x4F, 0x9E, 0x8D, 0x20,
+ 0x75, 0x38, 0xCA, 0x18, 0xDE, 0x5F, 0x09, 0x54,
+ 0x20, 0xA2, 0xE4, 0xD5, 0x86, 0x8C, 0xEB, 0xB8,
+ 0xB3, 0x4A, 0x93, 0x77, 0xDC, 0x52, 0xD1, 0x19,
+ 0x79, 0x0B, 0x65, 0x21, 0x0F, 0x1B, 0x34, 0x6F,
+ 0x5E, 0x00, 0xD9, 0xBD, 0x00, 0xA8, 0x84, 0x70,
+ 0x48, 0x91, 0x3D, 0x80, 0x72, 0x6B, 0x9B, 0x74,
+ 0x5D, 0x56, 0x5E, 0x62, 0x84, 0xB9, 0x86, 0xDB,
+ 0xAE, 0xA9, 0x97, 0xFF, 0xC5, 0xA0, 0xDE, 0x50,
+ 0x51, 0x52, 0x7D, 0x44, 0xB2, 0xC1, 0x26, 0x6D,
+ 0xBC, 0x91, 0x30, 0xA6, 0xEB, 0x15, 0xF3, 0x7A,
+ 0x0F, 0x00, 0xB6, 0x28, 0x6D, 0x66, 0x78, 0xCA,
+ 0x65, 0x1C, 0x07, 0x74, 0x3B, 0xD3, 0x7F, 0x2E,
+ 0x8F, 0x6A, 0x94, 0xF5, 0xED, 0x8C, 0x63, 0x42,
+ 0x8A, 0xE4, 0x88, 0x3A, 0x96, 0x95, 0x18, 0x38,
+ 0x07, 0xE1, 0x04, 0xBC, 0x33, 0x5C, 0x64, 0xFE,
+ 0xAA, 0xC4, 0x0A, 0x60, 0x59, 0x13, 0xDF, 0x98,
+ 0xFF, 0x44, 0xE0, 0x80, 0x1B, 0x31, 0xA9, 0x68,
+ 0xCC, 0xE5, 0xDC, 0xAF, 0xAD, 0xE1, 0xE0, 0x17,
+ 0xFA, 0x71, 0x1E, 0x05, 0xFF, 0x5A, 0x54, 0xBF,
+ 0xA1, 0x99, 0x9C, 0x2C, 0x46, 0x3F, 0x97, 0xA3,
+ 0xA6, 0x6B, 0x30, 0x21, 0x1B, 0xD3, 0x06, 0xC8,
+ 0x91, 0x1C, 0x98, 0xF8, 0xEE, 0x5E, 0xF4, 0x7A,
+ 0x54, 0x74, 0x6A, 0x4D, 0x16, 0xB7, 0xC7, 0x42,
+ 0x4A, 0x69, 0x54, 0xB4, 0xFC, 0x3B, 0xCF, 0x1A,
+ 0x41, 0xBD, 0xE8, 0xA1, 0x9C, 0xE1, 0x02, 0x7A,
+ 0xE8, 0x6A, 0x32, 0x0D, 0x0E, 0x5E, 0x7D, 0x3C,
+ 0x7E, 0x50, 0xCF, 0xD0, 0xC4, 0x66, 0x5B, 0x81,
+ 0x1D, 0x86, 0xC3, 0x13, 0xF0, 0x9A, 0xDE, 0x5B,
+ 0x4D, 0xBE, 0x01, 0x72, 0x31, 0x85, 0x98, 0x81,
+ 0xE5, 0x87, 0x3E, 0x9E, 0xDB, 0x20, 0x11, 0xCF,
+ 0x59, 0x20, 0xD2, 0xF7, 0x27, 0x7C, 0x4D, 0xE1,
+ 0xAC, 0x43, 0x0A, 0x18, 0x49, 0xF0, 0xB8, 0x70,
+ 0xA6, 0x9A, 0xBE, 0x70, 0x1B, 0x6D, 0x0B, 0x51,
+ 0x23, 0xE5, 0xFF, 0x53, 0x39, 0x54, 0x09, 0x17,
+ 0x7C, 0xF8, 0x4B, 0xF4, 0x1E, 0xC3, 0x3C, 0x5E,
+ 0x4B, 0xCC, 0x2C, 0xF2, 0x92, 0x58, 0xDC, 0x7C,
+ 0x26, 0x04, 0x71, 0xAA, 0xBD, 0xA4, 0x9F, 0xDE,
+ 0x62, 0x91, 0x57, 0x58, 0xEE, 0x4E, 0x57, 0x8D,
+ 0x0F, 0x76, 0x98, 0xE6, 0x45, 0x6B, 0xC1, 0x44,
+ 0x57, 0x37, 0x39, 0xD5, 0xD5, 0x08, 0xCC, 0x76,
+ 0xB3, 0x89, 0x35, 0x9D, 0x2A, 0x0E, 0xCB, 0x5B,
+ 0x7E, 0xE5, 0xFC, 0xB4, 0xC3, 0x15, 0x1D, 0x5A,
+ 0xF7, 0xC7, 0x18, 0x19, 0xEA, 0x3D, 0xD5, 0xF3,
+ 0x6C, 0x7B, 0x27, 0xE5, 0x51, 0xFD, 0x23, 0x73,
+ 0xD0, 0x7F, 0xFD, 0xC7, 0x6A, 0x13, 0xFC, 0x4B,
+ 0x10, 0xA6, 0xF2, 0x9A, 0x83, 0xD6, 0xF4, 0x65,
+ 0xAC, 0xB6, 0x96, 0x06, 0x71, 0xEA, 0xCF, 0x21,
+ 0xA3, 0xE1, 0xCB, 0x44, 0x11, 0xC4, 0xDA, 0xA0,
+ 0xC2, 0xA8, 0x7D, 0xAE, 0xD2, 0x8A, 0xEE, 0x60,
+ 0xB7, 0xEC, 0x02, 0x58, 0xA9, 0xAF, 0x12, 0x5F,
+ 0x2D, 0xDC, 0x80, 0xB9, 0x87, 0x7E, 0xFE, 0x0F,
+ 0x37, 0x2D, 0x9B, 0x83, 0x2C, 0x78, 0x67, 0x70,
+ 0xA8, 0x4E, 0xA1, 0xA0, 0x7C, 0xB6, 0xE1, 0xA9,
+ 0x90, 0x7D, 0x65, 0x1B, 0xBD, 0x0E, 0xFD, 0xEF,
+ 0x2A, 0xFF, 0xC3 }
+ },
+ /* RFC3686 https://tools.ietf.org/html/rfc3686 */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CTR,
+ .cipher_key_length = AES192_KEY_LEN,
+ .cipher_key = { 0x02, 0xBF, 0x39, 0x1E, 0xE8, 0xEC, 0xB1, 0x59,
+ 0xB9, 0x59, 0x61, 0x7B, 0x09, 0x65, 0x27, 0x9B,
+ 0xF5, 0x9B, 0x60, 0xA7, 0x86, 0xD3, 0xE0, 0xFE},
+ .cipher_iv_length = AES_CTR_IV_LEN,
+ .cipher_iv = { 0x00, 0x07, 0xBD, 0xFD, 0x5C, 0xBD, 0x60, 0x27,
+ 0x8D, 0xCC, 0x09, 0x12, 0x00, 0x00, 0x00, 0x01 },
+ .length = 36,
+ .plaintext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+ 0x20, 0x21, 0x22, 0x23 },
+ .ciphertext = { 0x96, 0x89, 0x3F, 0xC5, 0x5E, 0x5C, 0x72, 0x2F,
+ 0x54, 0x0B, 0x7D, 0xD1, 0xDD, 0xF7, 0xE7, 0x58,
+ 0xD2, 0x88, 0xBC, 0x95, 0xC6, 0x91, 0x65, 0x88,
+ 0x45, 0x36, 0xC8, 0x11, 0x66, 0x2F, 0x21, 0x88,
+ 0xAB, 0xEE, 0x09, 0x35 }
+ },
+ /* NIST Special Publication 800-38A */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CTR,
+ .cipher_key_length = AES192_KEY_LEN,
+ .cipher_key = { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52,
+ 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
+ 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b},
+ .cipher_iv_length = AES_CTR_IV_LEN,
+ .cipher_iv = { 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff },
+ .length = 64,
+ .plaintext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
+ .ciphertext = { 0x1a, 0xbc, 0x93, 0x24, 0x17, 0x52, 0x1c, 0xa2,
+ 0x4f, 0x2b, 0x04, 0x59, 0xfe, 0x7e, 0x6e, 0x0b,
+ 0x09, 0x03, 0x39, 0xec, 0x0a, 0xa6, 0xfa, 0xef,
+ 0xd5, 0xcc, 0xc2, 0xc6, 0xf4, 0xce, 0x8e, 0x94,
+ 0x1e, 0x36, 0xb2, 0x6b, 0xd1, 0xeb, 0xc6, 0x70,
+ 0xd1, 0xbd, 0x1d, 0x66, 0x56, 0x20, 0xab, 0xf7,
+ 0x4f, 0x78, 0xa7, 0xf6, 0xd2, 0x98, 0x09, 0x58,
+ 0x5a, 0x97, 0xda, 0xec, 0x58, 0xc6, 0xb0, 0x50 }
+ },
+ /* RFC3686 https://tools.ietf.org/html/rfc3686 */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CTR,
+ .cipher_key_length = AES256_KEY_LEN,
+ .cipher_key = { 0xFF, 0x7A, 0x61, 0x7C, 0xE6, 0x91, 0x48, 0xE4,
+ 0xF1, 0x72, 0x6E, 0x2F, 0x43, 0x58, 0x1D, 0xE2,
+ 0xAA, 0x62, 0xD9, 0xF8, 0x05, 0x53, 0x2E, 0xDF,
+ 0xF1, 0xEE, 0xD6, 0x87, 0xFB, 0x54, 0x15, 0x3D},
+ .cipher_iv_length = AES_CTR_IV_LEN,
+ .cipher_iv = { 0x00, 0x1C, 0xC5, 0xB7, 0x51, 0xA5, 0x1D, 0x70,
+ 0xA1, 0xC1, 0x11, 0x48, 0x00, 0x00, 0x00, 0x01 },
+ .length = 36,
+ .plaintext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+ 0x20, 0x21, 0x22, 0x23 },
+ .ciphertext = { 0xEB, 0x6C, 0x52, 0x82, 0x1D, 0x0B, 0xBB, 0xF7,
+ 0xCE, 0x75, 0x94, 0x46, 0x2A, 0xCA, 0x4F, 0xAA,
+ 0xB4, 0x07, 0xDF, 0x86, 0x65, 0x69, 0xFD, 0x07,
+ 0xF4, 0x8C, 0xC0, 0xB5, 0x83, 0xD6, 0x07, 0x1F,
+ 0x1E, 0xC0, 0xE6, 0xB8 },
+ },
+ /* NIST Special Publication 800-38A */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CTR,
+ .cipher_key_length = AES256_KEY_LEN,
+ .cipher_key = { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe,
+ 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
+ 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7,
+ 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4},
+ .cipher_iv_length = AES_CTR_IV_LEN,
+ .cipher_iv = { 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff },
+ .length = 64,
+ .plaintext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
+ .ciphertext = { 0x60, 0x1e, 0xc3, 0x13, 0x77, 0x57, 0x89, 0xa5,
+ 0xb7, 0xa7, 0xf5, 0x04, 0xbb, 0xf3, 0xd2, 0x28,
+ 0xf4, 0x43, 0xe3, 0xca, 0x4d, 0x62, 0xb5, 0x9a,
+ 0xca, 0x84, 0xe9, 0x90, 0xca, 0xca, 0xf5, 0xc5,
+ 0x2b, 0x09, 0x30, 0xda, 0xa2, 0x3d, 0xe9, 0x4c,
+ 0xe8, 0x70, 0x17, 0xba, 0x2d, 0x84, 0x98, 0x8d,
+ 0xdf, 0xc9, 0xc5, 0x8d, 0xb6, 0x7a, 0xad, 0xa6,
+ 0x13, 0xc2, 0xdd, 0x08, 0x45, 0x79, 0x41, 0xa6 }
+ },
+};
+
+static crypto_test_reference_t aes_ecb_reference[] = {
+ /* NIST Special Publication 800-38A */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_ECB,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c},
+ .length = 64,
+ .plaintext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10},
+ .ciphertext = { 0x3a, 0xd7, 0x7b, 0xb4, 0x0d, 0x7a, 0x36, 0x60,
+ 0xa8, 0x9e, 0xca, 0xf3, 0x24, 0x66, 0xef, 0x97,
+ 0xf5, 0xd3, 0xd5, 0x85, 0x03, 0xb9, 0x69, 0x9d,
+ 0xe7, 0x85, 0x89, 0x5a, 0x96, 0xfd, 0xba, 0xaf,
+ 0x43, 0xb1, 0xcd, 0x7f, 0x59, 0x8e, 0xce, 0x23,
+ 0x88, 0x1b, 0x00, 0xe3, 0xed, 0x03, 0x06, 0x88,
+ 0x7b, 0x0c, 0x78, 0x5e, 0x27, 0xe8, 0xad, 0x3f,
+ 0x82, 0x23, 0x20, 0x71, 0x04, 0x72, 0x5d, 0xd4 }
+ },
+ /* Generated by Crypto++ 5.6.1 (528 bytes) */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_ECB,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c},
+ .length = 528,
+ .plaintext = { 0x00, 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f,
+ 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17,
+ 0x2a, 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac,
+ 0x9c, 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e,
+ 0x51, 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4,
+ 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52,
+ 0xef, 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b,
+ 0x17, 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37,
+ 0x10, 0x00, 0x00, 0x6b, 0xc1, 0xbe, 0xe2, 0x2e,
+ 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73,
+ 0x93, 0x17, 0x2a, 0xae, 0x2d, 0x8a, 0x57, 0x1e,
+ 0x03, 0xac, 0x9c, 0x9e, 0xb7, 0x6f, 0xac, 0x45,
+ 0xaf, 0x8e, 0x51, 0x30, 0xc8, 0x1c, 0x46, 0xa3,
+ 0x5c, 0xe4, 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x1a,
+ 0x0a, 0x52, 0xef, 0xf6, 0x9f, 0x24, 0x45, 0xdf,
+ 0x4f, 0x9b, 0x17, 0xad, 0x2b, 0x41, 0x7b, 0xe6,
+ 0x6c, 0x37, 0x10, 0x00, 0x00, 0x6b, 0xc1, 0xbe,
+ 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e,
+ 0x11, 0x73, 0x93, 0x17, 0x2a, 0xae, 0x2d, 0x8a,
+ 0x57, 0x1e, 0x03, 0xac, 0x9c, 0x9e, 0xb7, 0x6f,
+ 0xac, 0x45, 0xaf, 0x8e, 0x51, 0x30, 0xc8, 0x1c,
+ 0x46, 0xa3, 0x5c, 0xe4, 0x11, 0xe5, 0xfb, 0xc1,
+ 0x19, 0x1a, 0x0a, 0x52, 0xef, 0xf6, 0x9f, 0x24,
+ 0x45, 0xdf, 0x4f, 0x9b, 0x17, 0xad, 0x2b, 0x41,
+ 0x7b, 0xe6, 0x6c, 0x37, 0x10, 0x00, 0x00, 0x6b,
+ 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9,
+ 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a, 0xae,
+ 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, 0x9e,
+ 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51, 0x30,
+ 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11, 0xe5,
+ 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef, 0xf6,
+ 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17, 0xad,
+ 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10, 0x00,
+ 0x00, 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f,
+ 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17,
+ 0x2a, 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac,
+ 0x9c, 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e,
+ 0x51, 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4,
+ 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52,
+ 0xef, 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b,
+ 0x17, 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37,
+ 0x10, 0x00, 0x00, 0x6b, 0xc1, 0xbe, 0xe2, 0x2e,
+ 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73,
+ 0x93, 0x17, 0x2a, 0xae, 0x2d, 0x8a, 0x57, 0x1e,
+ 0x03, 0xac, 0x9c, 0x9e, 0xb7, 0x6f, 0xac, 0x45,
+ 0xaf, 0x8e, 0x51, 0x30, 0xc8, 0x1c, 0x46, 0xa3,
+ 0x5c, 0xe4, 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x1a,
+ 0x0a, 0x52, 0xef, 0xf6, 0x9f, 0x24, 0x45, 0xdf,
+ 0x4f, 0x9b, 0x17, 0xad, 0x2b, 0x41, 0x7b, 0xe6,
+ 0x6c, 0x37, 0x10, 0x00, 0x00, 0x6b, 0xc1, 0xbe,
+ 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e,
+ 0x11, 0x73, 0x93, 0x17, 0x2a, 0xae, 0x2d, 0x8a,
+ 0x57, 0x1e, 0x03, 0xac, 0x9c, 0x9e, 0xb7, 0x6f,
+ 0xac, 0x45, 0xaf, 0x8e, 0x51, 0x30, 0xc8, 0x1c,
+ 0x46, 0xa3, 0x5c, 0xe4, 0x11, 0xe5, 0xfb, 0xc1,
+ 0x19, 0x1a, 0x0a, 0x52, 0xef, 0xf6, 0x9f, 0x24,
+ 0x45, 0xdf, 0x4f, 0x9b, 0x17, 0xad, 0x2b, 0x41,
+ 0x7b, 0xe6, 0x6c, 0x37, 0x10, 0x00, 0x00, 0x6b,
+ 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9,
+ 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a, 0xae,
+ 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, 0x9e,
+ 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51, 0x30,
+ 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11, 0xe5,
+ 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef, 0xf6,
+ 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17, 0xad,
+ 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10, 0x00},
+ .ciphertext = { 0x84, 0xC6, 0xCB, 0xDC, 0x2B, 0x5A, 0x39, 0x98,
+ 0x57, 0x74, 0xB2, 0x3B, 0xAB, 0x06, 0x6A, 0x6A,
+ 0xF8, 0xCB, 0x66, 0xC0, 0x8E, 0x4F, 0x05, 0x8E,
+ 0x5D, 0x3E, 0x7C, 0x35, 0x1E, 0xA8, 0x45, 0xCE,
+ 0xC7, 0xB2, 0x09, 0x21, 0x0E, 0xE7, 0xEF, 0xD3,
+ 0x82, 0x69, 0x62, 0x86, 0x87, 0xF2, 0x1C, 0xB9,
+ 0xBC, 0xEA, 0x34, 0x9D, 0xC0, 0x41, 0x8A, 0xDB,
+ 0xA2, 0xBF, 0x23, 0x64, 0xDF, 0x4D, 0xB1, 0xA1,
+ 0x1A, 0xD8, 0x4C, 0xF6, 0xA4, 0x22, 0xCE, 0x95,
+ 0xC3, 0x7B, 0x2C, 0xF8, 0x11, 0x96, 0x24, 0x5C,
+ 0xD8, 0x57, 0xD0, 0xB9, 0x54, 0xB8, 0x39, 0x85,
+ 0xC1, 0x88, 0x82, 0x30, 0xF3, 0xC3, 0x01, 0x84,
+ 0x7A, 0xAF, 0x71, 0x42, 0x53, 0xEF, 0x76, 0x8C,
+ 0x17, 0xE8, 0x9E, 0x4F, 0x55, 0x13, 0xDB, 0xD5,
+ 0xBE, 0xE1, 0x26, 0x6A, 0x2B, 0x2D, 0x70, 0x63,
+ 0xCE, 0x3D, 0x0B, 0xA8, 0x71, 0x62, 0x52, 0xC5,
+ 0xBC, 0xBB, 0x99, 0x22, 0xCD, 0x46, 0xF3, 0x74,
+ 0xB5, 0x2F, 0xDF, 0xF1, 0xFE, 0xBF, 0x15, 0x5F,
+ 0xF4, 0xAF, 0xEE, 0x18, 0x78, 0x89, 0x99, 0xBC,
+ 0x74, 0x23, 0x4A, 0x3F, 0xFB, 0xA7, 0xB2, 0x85,
+ 0x8B, 0xB2, 0x55, 0x2F, 0x17, 0x2E, 0x56, 0xEC,
+ 0x47, 0x45, 0x68, 0x78, 0x44, 0x0A, 0xBB, 0x5A,
+ 0xDA, 0xE4, 0x99, 0x41, 0xC1, 0xE4, 0x36, 0x16,
+ 0xAC, 0x5D, 0x6E, 0x31, 0xA0, 0x11, 0x61, 0x1B,
+ 0x82, 0x9F, 0x6A, 0x77, 0xBE, 0x1F, 0x50, 0x75,
+ 0x4F, 0x81, 0xF3, 0x5D, 0x24, 0xED, 0x89, 0xFD,
+ 0xE8, 0x04, 0xB1, 0x73, 0x63, 0xF9, 0xA8, 0x1C,
+ 0x3F, 0x12, 0xAE, 0x06, 0x7F, 0xDD, 0x41, 0xA2,
+ 0x98, 0x49, 0x12, 0xCA, 0xE1, 0x92, 0x6C, 0x5F,
+ 0xB3, 0xAC, 0x18, 0xE5, 0x41, 0xFA, 0x4A, 0xD1,
+ 0xE1, 0x71, 0x88, 0x8E, 0x61, 0x42, 0x8F, 0x2A,
+ 0x8F, 0x2E, 0x98, 0x1A, 0xE1, 0x6D, 0x0D, 0x4E,
+ 0x41, 0xD3, 0x3E, 0x5E, 0x67, 0x5F, 0x44, 0x6D,
+ 0xAE, 0x0F, 0x45, 0x4F, 0xC4, 0xCA, 0x05, 0x6F,
+ 0x41, 0xF3, 0xCC, 0x47, 0x44, 0xA9, 0xE9, 0x48,
+ 0x42, 0x8B, 0x22, 0x80, 0xF9, 0x66, 0x63, 0xB7,
+ 0x23, 0x0C, 0x09, 0x69, 0x25, 0x03, 0xC9, 0x5B,
+ 0x3E, 0x34, 0xF8, 0xDE, 0x8D, 0xF2, 0x31, 0x57,
+ 0xF4, 0x5B, 0xDF, 0x68, 0x9B, 0x25, 0x8D, 0x99,
+ 0x4D, 0x9E, 0x6C, 0xE5, 0xD4, 0xDD, 0x6B, 0xDB,
+ 0x96, 0x76, 0x3C, 0xCC, 0x41, 0xDB, 0xBE, 0x57,
+ 0xA4, 0x77, 0x8D, 0x5A, 0x9E, 0x90, 0x22, 0x6D,
+ 0x61, 0x4C, 0x33, 0x5E, 0x44, 0xCA, 0x8A, 0xB4,
+ 0x1E, 0xFE, 0xA8, 0x98, 0xBC, 0x17, 0x0C, 0x65,
+ 0x41, 0x2F, 0x77, 0x19, 0x4A, 0x43, 0xA1, 0x30,
+ 0x5E, 0xF2, 0x3A, 0xC7, 0x0B, 0x05, 0x9E, 0x6E,
+ 0x04, 0x77, 0x96, 0xEF, 0x51, 0x8D, 0x76, 0x96,
+ 0xBC, 0x3D, 0xAD, 0x5E, 0x26, 0x34, 0xF9, 0x2D,
+ 0xD1, 0xC9, 0x0D, 0x20, 0x6A, 0x2B, 0x6D, 0x3A,
+ 0x7C, 0xE8, 0x86, 0x68, 0xBE, 0xAD, 0x64, 0x61,
+ 0x4E, 0x90, 0x00, 0xAC, 0xFB, 0xA7, 0x9E, 0xB3,
+ 0x60, 0x16, 0x06, 0x21, 0x4E, 0x21, 0xE0, 0x8F,
+ 0x14, 0xCE, 0x77, 0xE3, 0x6B, 0xB6, 0x6F, 0xE4,
+ 0xA0, 0xFC, 0xD2, 0xA2, 0x1B, 0xCA, 0xA2, 0x39,
+ 0x1A, 0x9C, 0x20, 0x16, 0xAC, 0x3B, 0xC7, 0xCD,
+ 0xF1, 0x43, 0x8E, 0xB6, 0xDD, 0x26, 0x69, 0x66,
+ 0x44, 0x58, 0x3E, 0x2B, 0x0A, 0x0C, 0x68, 0x62,
+ 0x9D, 0x73, 0x6F, 0x67, 0x23, 0xDF, 0x66, 0x85,
+ 0x9C, 0xF8, 0x0B, 0x4E, 0x5B, 0x5C, 0x5B, 0xF0,
+ 0x3F, 0x33, 0x4D, 0x65, 0xC4, 0x8D, 0xB3, 0xB2,
+ 0x66, 0x0E, 0x2C, 0xE3, 0x3B, 0x51, 0x0F, 0xD6,
+ 0x0C, 0x91, 0x2B, 0x85, 0xD1, 0x6A, 0xEE, 0x7C,
+ 0xDB, 0xFD, 0xF6, 0x28, 0x5B, 0x0A, 0x77, 0xBA,
+ 0xE0, 0x7D, 0x98, 0x7F, 0x9C, 0xE1, 0x72, 0xA5,
+ 0x48, 0xE6, 0xBF, 0x0A, 0x30, 0xCF, 0x09, 0x9A,
+ 0xA8, 0x2B, 0xE0, 0xA2, 0x5E, 0x0E, 0x89, 0x19 }
+ },
+ /* NIST Special Publication 800-38A */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_ECB,
+ .cipher_key_length = AES192_KEY_LEN,
+ .cipher_key = { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52,
+ 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
+ 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b},
+ .length = 64,
+ .plaintext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
+ .ciphertext = { 0xbd, 0x33, 0x4f, 0x1d, 0x6e, 0x45, 0xf2, 0x5f,
+ 0xf7, 0x12, 0xa2, 0x14, 0x57, 0x1f, 0xa5, 0xcc,
+ 0x97, 0x41, 0x04, 0x84, 0x6d, 0x0a, 0xd3, 0xad,
+ 0x77, 0x34, 0xec, 0xb3, 0xec, 0xee, 0x4e, 0xef,
+ 0xef, 0x7a, 0xfd, 0x22, 0x70, 0xe2, 0xe6, 0x0a,
+ 0xdc, 0xe0, 0xba, 0x2f, 0xac, 0xe6, 0x44, 0x4e,
+ 0x9a, 0x4b, 0x41, 0xba, 0x73, 0x8d, 0x6c, 0x72,
+ 0xfb, 0x16, 0x69, 0x16, 0x03, 0xc1, 0x8e, 0x0e }
+ },
+ /* NIST Special Publication 800-38A */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_ECB,
+ .cipher_key_length = AES256_KEY_LEN,
+ .cipher_key = { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe,
+ 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
+ 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7,
+ 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4},
+ .length = 64,
+ .plaintext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
+ .ciphertext = { 0xf3, 0xee, 0xd1, 0xbd, 0xb5, 0xd2, 0xa0, 0x3c,
+ 0x06, 0x4b, 0x5a, 0x7e, 0x3d, 0xb1, 0x81, 0xf8,
+ 0x59, 0x1c, 0xcb, 0x10, 0xd4, 0x10, 0xed, 0x26,
+ 0xdc, 0x5b, 0xa7, 0x4a, 0x31, 0x36, 0x28, 0x70,
+ 0xb6, 0xed, 0x21, 0xb9, 0x9c, 0xa6, 0xf4, 0xf9,
+ 0xf1, 0x53, 0xe7, 0xb1, 0xbe, 0xaf, 0xed, 0x1d,
+ 0x23, 0x30, 0x4b, 0x7a, 0x39, 0xf9, 0xf3, 0xff,
+ 0x06, 0x7d, 0x8d, 0x8f, 0x9e, 0x24, 0xec, 0xc7}
+ }
+};
+
+static crypto_test_reference_t aes_cfb128_reference[] = {
+ /* NIST Special Publication 800-38A */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CFB128,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c},
+ .cipher_iv_length = AES_CFB128_IV_LEN,
+ .cipher_iv = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+ .length = 64,
+ .plaintext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10},
+ .ciphertext = { 0x3b, 0x3f, 0xd9, 0x2e, 0xb7, 0x2d, 0xad, 0x20,
+ 0x33, 0x34, 0x49, 0xf8, 0xe8, 0x3c, 0xfb, 0x4a,
+ 0xc8, 0xa6, 0x45, 0x37, 0xa0, 0xb3, 0xa9, 0x3f,
+ 0xcd, 0xe3, 0xcd, 0xad, 0x9f, 0x1c, 0xe5, 0x8b,
+ 0x26, 0x75, 0x1f, 0x67, 0xa3, 0xcb, 0xb1, 0x40,
+ 0xb1, 0x80, 0x8c, 0xf1, 0x87, 0xa4, 0xf4, 0xdf,
+ 0xc0, 0x4b, 0x05, 0x35, 0x7c, 0x5d, 0x1c, 0x0e,
+ 0xea, 0xc4, 0xc6, 0x6f, 0x9f, 0xf7, 0xf2, 0xe6 }
+ },
+ /* NIST Special Publication 800-38A */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CFB128,
+ .cipher_key_length = AES192_KEY_LEN,
+ .cipher_key = { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52,
+ 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
+ 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b},
+ .cipher_iv_length = AES_CFB128_IV_LEN,
+ .cipher_iv = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+ .length = 64,
+ .plaintext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
+ .ciphertext = { 0xcd, 0xc8, 0x0d, 0x6f, 0xdd, 0xf1, 0x8c, 0xab,
+ 0x34, 0xc2, 0x59, 0x09, 0xc9, 0x9a, 0x41, 0x74,
+ 0x67, 0xce, 0x7f, 0x7f, 0x81, 0x17, 0x36, 0x21,
+ 0x96, 0x1a, 0x2b, 0x70, 0x17, 0x1d, 0x3d, 0x7a,
+ 0x2e, 0x1e, 0x8a, 0x1d, 0xd5, 0x9b, 0x88, 0xb1,
+ 0xc8, 0xe6, 0x0f, 0xed, 0x1e, 0xfa, 0xc4, 0xc9,
+ 0xc0, 0x5f, 0x9f, 0x9c, 0xa9, 0x83, 0x4f, 0xa0,
+ 0x42, 0xae, 0x8f, 0xba, 0x58, 0x4b, 0x09, 0xff }
+ },
+ /* NIST Special Publication 800-38A */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CFB128,
+ .cipher_key_length = AES256_KEY_LEN,
+ .cipher_key = { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe,
+ 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
+ 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7,
+ 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4},
+ .cipher_iv_length = AES_CFB128_IV_LEN,
+ .cipher_iv = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+ .length = 64,
+ .plaintext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
+ .ciphertext = { 0xdc, 0x7e, 0x84, 0xbf, 0xda, 0x79, 0x16, 0x4b,
+ 0x7e, 0xcd, 0x84, 0x86, 0x98, 0x5d, 0x38, 0x60,
+ 0x39, 0xff, 0xed, 0x14, 0x3b, 0x28, 0xb1, 0xc8,
+ 0x32, 0x11, 0x3c, 0x63, 0x31, 0xe5, 0x40, 0x7b,
+ 0xdf, 0x10, 0x13, 0x24, 0x15, 0xe5, 0x4b, 0x92,
+ 0xa1, 0x3e, 0xd0, 0xa8, 0x26, 0x7a, 0xe2, 0xf9,
+ 0x75, 0xa3, 0x85, 0x74, 0x1a, 0xb9, 0xce, 0xf8,
+ 0x20, 0x31, 0x62, 0x3d, 0x55, 0xb1, 0xe4, 0x71 }
+ }
+};
+
+static crypto_test_reference_t aes_xts_reference[] = {
+ /* CAVS 11.0 XTSGen information, #1 */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_XTS,
+ .cipher_key_length = AES128_XTS_KEY_LEN,
+ .cipher_key = { 0xa1, 0xb9, 0x0c, 0xba, 0x3f, 0x06, 0xac, 0x35,
+ 0x3b, 0x2c, 0x34, 0x38, 0x76, 0x08, 0x17, 0x62,
+ 0x09, 0x09, 0x23, 0x02, 0x6e, 0x91, 0x77, 0x18,
+ 0x15, 0xf2, 0x9d, 0xab, 0x01, 0x93, 0x2f, 0x2f},
+ .cipher_iv_length = AES_XTS_IV_LEN,
+ .cipher_iv = { 0x4f, 0xae, 0xf7, 0x11, 0x7c, 0xda, 0x59, 0xc6,
+ 0x6e, 0x4b, 0x92, 0x01, 0x3e, 0x76, 0x8a, 0xd5},
+ .length = 16,
+ .plaintext = { 0xeb, 0xab, 0xce, 0x95, 0xb1, 0x4d, 0x3c, 0x8d,
+ 0x6f, 0xb3, 0x50, 0x39, 0x07, 0x90, 0x31, 0x1c},
+ .ciphertext = { 0x77, 0x8a, 0xe8, 0xb4, 0x3c, 0xb9, 0x8d, 0x5a,
+ 0x82, 0x50, 0x81, 0xd5, 0xbe, 0x47, 0x1c, 0x63}
+ },
+ /* CAVS 11.0 XTSGen information, #101 */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_XTS,
+ .cipher_key_length = AES128_XTS_KEY_LEN,
+ .cipher_key = { 0xb7, 0xb9, 0x3f, 0x51, 0x6a, 0xef, 0x29, 0x5e,
+ 0xff, 0x3a, 0x29, 0xd8, 0x37, 0xcf, 0x1f, 0x13,
+ 0x53, 0x47, 0xe8, 0xa2, 0x1d, 0xae, 0x61, 0x6f,
+ 0xf5, 0x06, 0x2b, 0x2e, 0x8d, 0x78, 0xce, 0x5e},
+ .cipher_iv_length = AES_XTS_IV_LEN,
+ .cipher_iv = { 0x87, 0x3e, 0xde, 0xa6, 0x53, 0xb6, 0x43, 0xbd,
+ 0x8b, 0xcf, 0x51, 0x40, 0x31, 0x97, 0xed, 0x14},
+ .length = 32,
+ .plaintext = { 0x23, 0x6f, 0x8a, 0x5b, 0x58, 0xdd, 0x55, 0xf6,
+ 0x19, 0x4e, 0xd7, 0x0c, 0x4a, 0xc1, 0xa1, 0x7f,
+ 0x1f, 0xe6, 0x0e, 0xc9, 0xa6, 0xc4, 0x54, 0xd0,
+ 0x87, 0xcc, 0xb7, 0x7d, 0x6b, 0x63, 0x8c, 0x47},
+ .ciphertext = { 0x22, 0xe6, 0xa3, 0xc6, 0x37, 0x9d, 0xcf, 0x75,
+ 0x99, 0xb0, 0x52, 0xb5, 0xa7, 0x49, 0xc7, 0xf7,
+ 0x8a, 0xd8, 0xa1, 0x1b, 0x9f, 0x1a, 0xa9, 0x43,
+ 0x0c, 0xf3, 0xae, 0xf4, 0x45, 0x68, 0x2e, 0x19}
+ },
+ /* CAVS 11.0 XTSGen information, #227 TODO (Length 130 bits)*/
+ /* {
+ .cipher = ODP_CIPHER_ALG_AES_XTS,
+ .cipher_key_length = AES128_XTS_KEY_LEN,
+ .cipher_key = { 0xec, 0x14, 0xc0, 0xa3, 0xb7, 0x72, 0x58, 0x5c,
+ 0x15, 0xd4, 0xeb, 0x94, 0xe6, 0x9e, 0x2c, 0x55,
+ 0x80, 0xcf, 0x3a, 0x63, 0xc1, 0x7c, 0xe9, 0xda,
+ 0xd8, 0x2b, 0xb4, 0x54, 0xe3, 0x87, 0x90, 0x45},
+ .cipher_iv_length = AES_XTS_IV_LEN,
+ .cipher_iv = { 0x4a, 0x02, 0x87, 0xc2, 0x6e, 0xd2, 0x41, 0x26,
+ 0x5b, 0x3a, 0x42, 0xcd, 0xd1, 0x9c, 0xea, 0xe2},
+ .length = 17,
+ .plaintext = { 0x50, 0x82, 0x64, 0x75, 0x82, 0xc6, 0xe5, 0xa7,
+ 0x88, 0x73, 0x6f, 0xc5, 0x90, 0x5e, 0xa5, 0x65,
+ 0xc0 },
+ .ciphertext = { 0x04, 0x3a, 0xb9, 0xc0, 0x3d, 0x5b, 0x44, 0x13,
+ 0x1d, 0x3e, 0x6e, 0xb2, 0x57, 0x61, 0x89, 0xde,
+ 0x80 },
+ }, */
+ /* CAVS 11.0 XTSGen information, #1 */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_XTS,
+ .cipher_key_length = AES256_XTS_KEY_LEN,
+ .cipher_key = { 0x1e, 0xa6, 0x61, 0xc5, 0x8d, 0x94, 0x3a, 0x0e,
+ 0x48, 0x01, 0xe4, 0x2f, 0x4b, 0x09, 0x47, 0x14,
+ 0x9e, 0x7f, 0x9f, 0x8e, 0x3e, 0x68, 0xd0, 0xc7,
+ 0x50, 0x52, 0x10, 0xbd, 0x31, 0x1a, 0x0e, 0x7c,
+ 0xd6, 0xe1, 0x3f, 0xfd, 0xf2, 0x41, 0x8d, 0x8d,
+ 0x19, 0x11, 0xc0, 0x04, 0xcd, 0xa5, 0x8d, 0xa3,
+ 0xd6, 0x19, 0xb7, 0xe2, 0xb9, 0x14, 0x1e, 0x58,
+ 0x31, 0x8e, 0xea, 0x39, 0x2c, 0xf4, 0x1b, 0x08},
+ .cipher_iv_length = AES_XTS_IV_LEN,
+ .cipher_iv = { 0xad, 0xf8, 0xd9, 0x26, 0x27, 0x46, 0x4a, 0xd2,
+ 0xf0, 0x42, 0x8e, 0x84, 0xa9, 0xf8, 0x75, 0x64},
+ .length = 32,
+ .plaintext = { 0x2e, 0xed, 0xea, 0x52, 0xcd, 0x82, 0x15, 0xe1,
+ 0xac, 0xc6, 0x47, 0xe8, 0x10, 0xbb, 0xc3, 0x64,
+ 0x2e, 0x87, 0x28, 0x7f, 0x8d, 0x2e, 0x57, 0xe3,
+ 0x6c, 0x0a, 0x24, 0xfb, 0xc1, 0x2a, 0x20, 0x2e},
+ .ciphertext = { 0xcb, 0xaa, 0xd0, 0xe2, 0xf6, 0xce, 0xa3, 0xf5,
+ 0x0b, 0x37, 0xf9, 0x34, 0xd4, 0x6a, 0x9b, 0x13,
+ 0x0b, 0x9d, 0x54, 0xf0, 0x7e, 0x34, 0xf3, 0x6a,
+ 0xf7, 0x93, 0xe8, 0x6f, 0x73, 0xc6, 0xd7, 0xdb},
+ },
+ /* CAVS 11.0 XTSGen information, #110 */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_XTS,
+ .cipher_key_length = AES256_XTS_KEY_LEN,
+ .cipher_key = { 0x6b, 0x19, 0x84, 0xc2, 0x4e, 0x7e, 0xb6, 0x62,
+ 0x8e, 0x3a, 0x11, 0xc9, 0xcc, 0xd2, 0x59, 0x40,
+ 0x33, 0xa3, 0xa0, 0xd9, 0x01, 0x6e, 0xae, 0x65,
+ 0xc2, 0xf2, 0x4e, 0x09, 0xb9, 0xa6, 0x6e, 0x9f,
+ 0xe9, 0xd1, 0x63, 0xa5, 0x06, 0xdf, 0xbc, 0xcf,
+ 0x2d, 0x93, 0xe8, 0x99, 0x1e, 0x2f, 0xc5, 0x60,
+ 0xe1, 0x04, 0x35, 0xb8, 0x90, 0xb5, 0x88, 0x9a,
+ 0x50, 0x03, 0xe4, 0xbf, 0x81, 0x7d, 0xc3, 0xe0},
+ .cipher_iv_length = AES_XTS_IV_LEN,
+ .cipher_iv = { 0x6b, 0xb0, 0xd3, 0xae, 0x4f, 0xa8, 0x6e, 0x43,
+ 0x16, 0x19, 0xe4, 0x07, 0xd5, 0x9a, 0xd4, 0xf4},
+ .length = 48,
+ .plaintext = { 0x6a, 0x74, 0x1a, 0x94, 0x5b, 0xfb, 0xf0, 0xc6,
+ 0x7a, 0xfd, 0x43, 0xba, 0x1f, 0x84, 0x18, 0x16,
+ 0xc0, 0x99, 0x51, 0x58, 0x05, 0xd0, 0xfc, 0x1f,
+ 0x7d, 0xbf, 0x6d, 0xe9, 0x00, 0xe0, 0xaa, 0x7a,
+ 0x21, 0x9c, 0x88, 0x56, 0x32, 0x71, 0xb0, 0x09,
+ 0xd1, 0xac, 0x90, 0xeb, 0x7d, 0xc9, 0x97, 0x35},
+ .ciphertext = { 0xe4, 0x7b, 0xce, 0x29, 0x2b, 0xaa, 0x63, 0xbe,
+ 0xf3, 0x16, 0xf6, 0x80, 0xa5, 0xf4, 0x80, 0xa7,
+ 0xb8, 0x83, 0xdf, 0xab, 0x6e, 0xd5, 0xa5, 0x7f,
+ 0x7e, 0x29, 0xec, 0xb8, 0x9e, 0x35, 0x4a, 0x31,
+ 0xc9, 0xb1, 0x74, 0xc4, 0xab, 0xad, 0x6c, 0xba,
+ 0xba, 0xba, 0x19, 0x14, 0x0c, 0x46, 0x20, 0xa3},
+ },
+ /* CAVS 11.0 XTSGen information, #211 TODO: length 140 bits */
+ /* {
+ .cipher = ODP_CIPHER_ALG_AES_XTS,
+ .cipher_key_length = AES256_XTS_KEY_LEN,
+ .cipher_key = { 0x62, 0xc2, 0xe4, 0xf8, 0x52, 0xa9, 0x3e, 0xea,
+ 0x4a, 0x2f, 0x61, 0xe8, 0x67, 0x68, 0x14, 0xf4,
+ 0xa8, 0x0d, 0xc4, 0x7e, 0xe1, 0x81, 0x32, 0xc8,
+ 0x38, 0xbf, 0x89, 0xa6, 0x18, 0xfd, 0xb8, 0xe2,
+ 0x91, 0x3e, 0x2e, 0x5c, 0x32, 0x1b, 0x19, 0xea,
+ 0x04, 0xbb, 0xa6, 0x34, 0x7d, 0x22, 0x6f, 0x41,
+ 0xdb, 0xee, 0x88, 0x0d, 0x61, 0x67, 0xb8, 0xe1,
+ 0xe9, 0x17, 0xfa, 0xf0, 0x46, 0xf0, 0x87, 0x5e},
+ .cipher_iv_length = AES_XTS_IV_LEN,
+ .cipher_iv = { 0x53, 0x7e, 0xe3, 0xdc, 0x13, 0xce, 0x27, 0xa8,
+ 0xd3, 0x0e, 0x6e, 0x42, 0xb5, 0xb9, 0x96, 0xae},
+ .length = 18,
+ .plaintext = { 0x00, 0xc9, 0xeb, 0x87, 0x78, 0xe0, 0x3d, 0xdd,
+ 0x5f, 0x3d, 0xe8, 0xc1, 0x8b, 0x34, 0x8f, 0xac,
+ 0x9c, 0x30},
+ .ciphertext = { 0x9d, 0x4a, 0x08, 0xac, 0x0f, 0xb4, 0x4e, 0x90,
+ 0xd0, 0x5f, 0x62, 0x86, 0x19, 0x3f, 0x3a, 0xab,
+ 0xc2, 0x90},
+ } */
+};
+
+/* AES-GCM test vectors extracted from
+ * https://tools.ietf.org/html/draft-mcgrew-gcm-test-01#section-2
+ */
+static crypto_test_reference_t aes_gcm_reference[] = {
+ {
+ .cipher = ODP_CIPHER_ALG_AES_GCM,
+ .auth = ODP_AUTH_ALG_AES_GCM,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0x4c, 0x80, 0xcd, 0xef, 0xbb, 0x5d, 0x10, 0xda,
+ 0x90, 0x6a, 0xc7, 0x3c, 0x36, 0x13, 0xa6, 0x34},
+ .cipher_iv_length = AES_GCM_IV_LEN,
+ .cipher_iv = { 0x2e, 0x44, 0x3b, 0x68, 0x49, 0x56, 0xed, 0x7e,
+ 0x3b, 0x24, 0x4c, 0xfe },
+ .length = 72,
+ .plaintext = { 0x45, 0x00, 0x00, 0x48, 0x69, 0x9a, 0x00, 0x00,
+ 0x80, 0x11, 0x4d, 0xb7, 0xc0, 0xa8, 0x01, 0x02,
+ 0xc0, 0xa8, 0x01, 0x01, 0x0a, 0x9b, 0xf1, 0x56,
+ 0x38, 0xd3, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x04, 0x5f, 0x73, 0x69,
+ 0x70, 0x04, 0x5f, 0x75, 0x64, 0x70, 0x03, 0x73,
+ 0x69, 0x70, 0x09, 0x63, 0x79, 0x62, 0x65, 0x72,
+ 0x63, 0x69, 0x74, 0x79, 0x02, 0x64, 0x6b, 0x00,
+ 0x00, 0x21, 0x00, 0x01, 0x01, 0x02, 0x02, 0x01 },
+ .ciphertext = { 0xfe, 0xcf, 0x53, 0x7e, 0x72, 0x9d, 0x5b, 0x07,
+ 0xdc, 0x30, 0xdf, 0x52, 0x8d, 0xd2, 0x2b, 0x76,
+ 0x8d, 0x1b, 0x98, 0x73, 0x66, 0x96, 0xa6, 0xfd,
+ 0x34, 0x85, 0x09, 0xfa, 0x13, 0xce, 0xac, 0x34,
+ 0xcf, 0xa2, 0x43, 0x6f, 0x14, 0xa3, 0xf3, 0xcf,
+ 0x65, 0x92, 0x5b, 0xf1, 0xf4, 0xa1, 0x3c, 0x5d,
+ 0x15, 0xb2, 0x1e, 0x18, 0x84, 0xf5, 0xff, 0x62,
+ 0x47, 0xae, 0xab, 0xb7, 0x86, 0xb9, 0x3b, 0xce,
+ 0x61, 0xbc, 0x17, 0xd7, 0x68, 0xfd, 0x97, 0x32},
+ .aad_length = 12,
+ .aad = { 0x00, 0x00, 0x43, 0x21, 0x87, 0x65, 0x43, 0x21,
+ 0x00, 0x00, 0x00, 0x00 },
+ .digest_length = AES_GCM_DIGEST_LEN,
+ .digest = { 0x45, 0x90, 0x18, 0x14, 0x8f, 0x6c, 0xbe, 0x72,
+ 0x2f, 0xd0, 0x47, 0x96, 0x56, 0x2d, 0xfd, 0xb4 }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_GCM,
+ .auth = ODP_AUTH_ALG_AES_GCM,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08},
+ .cipher_iv_length = AES_GCM_IV_LEN,
+ .cipher_iv = { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .length = 64,
+ .plaintext = { 0x45, 0x00, 0x00, 0x3e, 0x69, 0x8f, 0x00, 0x00,
+ 0x80, 0x11, 0x4d, 0xcc, 0xc0, 0xa8, 0x01, 0x02,
+ 0xc0, 0xa8, 0x01, 0x01, 0x0a, 0x98, 0x00, 0x35,
+ 0x00, 0x2a, 0x23, 0x43, 0xb2, 0xd0, 0x01, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x73, 0x69, 0x70, 0x09, 0x63, 0x79, 0x62,
+ 0x65, 0x72, 0x63, 0x69, 0x74, 0x79, 0x02, 0x64,
+ 0x6b, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01 },
+ .ciphertext = { 0xde, 0xb2, 0x2c, 0xd9, 0xb0, 0x7c, 0x72, 0xc1,
+ 0x6e, 0x3a, 0x65, 0xbe, 0xeb, 0x8d, 0xf3, 0x04,
+ 0xa5, 0xa5, 0x89, 0x7d, 0x33, 0xae, 0x53, 0x0f,
+ 0x1b, 0xa7, 0x6d, 0x5d, 0x11, 0x4d, 0x2a, 0x5c,
+ 0x3d, 0xe8, 0x18, 0x27, 0xc1, 0x0e, 0x9a, 0x4f,
+ 0x51, 0x33, 0x0d, 0x0e, 0xec, 0x41, 0x66, 0x42,
+ 0xcf, 0xbb, 0x85, 0xa5, 0xb4, 0x7e, 0x48, 0xa4,
+ 0xec, 0x3b, 0x9b, 0xa9, 0x5d, 0x91, 0x8b, 0xd1},
+ .aad_length = 8,
+ .aad = { 0x00, 0x00, 0xa5, 0xf8, 0x00, 0x00, 0x00, 0x0a },
+ .digest_length = AES_GCM_DIGEST_LEN,
+ .digest = { 0x83, 0xb7, 0x0d, 0x3a, 0xa8, 0xbc, 0x6e, 0xe4,
+ 0xc3, 0x09, 0xe9, 0xd8, 0x5a, 0x41, 0xad, 0x4a }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_GCM,
+ .auth = ODP_AUTH_ALG_AES_GCM,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .cipher_iv_length = AES_GCM_IV_LEN,
+ .cipher_iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 },
+ .length = 64,
+ .plaintext = { 0x45, 0x00, 0x00, 0x3c, 0x99, 0xc5, 0x00, 0x00,
+ 0x80, 0x01, 0xcb, 0x7a, 0x40, 0x67, 0x93, 0x18,
+ 0x01, 0x01, 0x01, 0x01, 0x08, 0x00, 0x07, 0x5c,
+ 0x02, 0x00, 0x44, 0x00, 0x61, 0x62, 0x63, 0x64,
+ 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74,
+ 0x75, 0x76, 0x77, 0x61, 0x62, 0x63, 0x64, 0x65,
+ 0x66, 0x67, 0x68, 0x69, 0x01, 0x02, 0x02, 0x01 },
+ .ciphertext = { 0x46, 0x88, 0xda, 0xf2, 0xf9, 0x73, 0xa3, 0x92,
+ 0x73, 0x29, 0x09, 0xc3, 0x31, 0xd5, 0x6d, 0x60,
+ 0xf6, 0x94, 0xab, 0xaa, 0x41, 0x4b, 0x5e, 0x7f,
+ 0xf5, 0xfd, 0xcd, 0xff, 0xf5, 0xe9, 0xa2, 0x84,
+ 0x45, 0x64, 0x76, 0x49, 0x27, 0x19, 0xff, 0xb6,
+ 0x4d, 0xe7, 0xd9, 0xdc, 0xa1, 0xe1, 0xd8, 0x94,
+ 0xbc, 0x3b, 0xd5, 0x78, 0x73, 0xed, 0x4d, 0x18,
+ 0x1d, 0x19, 0xd4, 0xd5, 0xc8, 0xc1, 0x8a, 0xf3},
+ .aad_length = 8,
+ .aad = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+ .digest_length = AES_GCM_DIGEST_LEN,
+ .digest = { 0xf8, 0x21, 0xd4, 0x96, 0xee, 0xb0, 0x96, 0xe9,
+ 0x8a, 0xd2, 0xb6, 0x9e, 0x47, 0x99, 0xc7, 0x1d }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_GCM,
+ .auth = ODP_AUTH_ALG_AES_GCM,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0x3d, 0xe0, 0x98, 0x74, 0xb3, 0x88, 0xe6, 0x49,
+ 0x19, 0x88, 0xd0, 0xc3, 0x60, 0x7e, 0xae, 0x1f},
+ .cipher_iv_length = AES_GCM_IV_LEN,
+ .cipher_iv = { 0x57, 0x69, 0x0e, 0x43, 0x4e, 0x28, 0x00, 0x00,
+ 0xa2, 0xfc, 0xa1, 0xa3 },
+ .length = 28,
+ .plaintext = { 0x45, 0x00, 0x00, 0x1c, 0x42, 0xa2, 0x00, 0x00,
+ 0x80, 0x01, 0x44, 0x1f, 0x40, 0x67, 0x93, 0xb6,
+ 0xe0, 0x00, 0x00, 0x02, 0x0a, 0x00, 0xf5, 0xff,
+ 0x01, 0x02, 0x02, 0x01 },
+ .ciphertext = { 0xfb, 0xa2, 0xca, 0x84, 0x5e, 0x5d, 0xf9, 0xf0,
+ 0xf2, 0x2c, 0x3e, 0x6e, 0x86, 0xdd, 0x83, 0x1e,
+ 0x1f, 0xc6, 0x57, 0x92, 0xcd, 0x1a, 0xf9, 0x13,
+ 0x0e, 0x13, 0x79, 0xed },
+ .aad_length = 12,
+ .aad = { 0x42, 0xf6, 0x7e, 0x3f, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10 },
+ .digest_length = AES_GCM_DIGEST_LEN,
+ .digest = { 0x36, 0x9f, 0x07, 0x1f, 0x35, 0xe0, 0x34, 0xbe,
+ 0x95, 0xf1, 0x12, 0xe4, 0xe7, 0xd0, 0x5d, 0x35 }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_GCM,
+ .auth = ODP_AUTH_ALG_AES_GCM,
+ .cipher_key_length = AES192_KEY_LEN,
+ .cipher_key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c},
+ .cipher_iv_length = AES_GCM_IV_LEN,
+ .cipher_iv = { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .length = 40,
+ .plaintext = { 0x45, 0x00, 0x00, 0x28, 0xa4, 0xad, 0x40, 0x00,
+ 0x40, 0x06, 0x78, 0x80, 0x0a, 0x01, 0x03, 0x8f,
+ 0x0a, 0x01, 0x06, 0x12, 0x80, 0x23, 0x06, 0xb8,
+ 0xcb, 0x71, 0x26, 0x02, 0xdd, 0x6b, 0xb0, 0x3e,
+ 0x50, 0x10, 0x16, 0xd0, 0x75, 0x68, 0x00, 0x01 },
+ .ciphertext = { 0xa5, 0xb1, 0xf8, 0x06, 0x60, 0x29, 0xae, 0xa4,
+ 0x0e, 0x59, 0x8b, 0x81, 0x22, 0xde, 0x02, 0x42,
+ 0x09, 0x38, 0xb3, 0xab, 0x33, 0xf8, 0x28, 0xe6,
+ 0x87, 0xb8, 0x85, 0x8b, 0x5b, 0xfb, 0xdb, 0xd0,
+ 0x31, 0x5b, 0x27, 0x45, 0x21, 0x44, 0xcc, 0x77},
+ .aad_length = 8,
+ .aad = { 0x00, 0x00, 0xa5, 0xf8, 0x00, 0x00, 0x00, 0x0a },
+ .digest_length = AES_GCM_DIGEST_LEN,
+ .digest = { 0x95, 0x45, 0x7b, 0x96, 0x52, 0x03, 0x7f, 0x53,
+ 0x18, 0x02, 0x7b, 0x5b, 0x4c, 0xd7, 0xa6, 0x36 }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_GCM,
+ .auth = ODP_AUTH_ALG_AES_GCM,
+ .cipher_key_length = AES256_KEY_LEN,
+ .cipher_key = { 0xab, 0xbc, 0xcd, 0xde, 0xf0, 0x01, 0x12, 0x23,
+ 0x34, 0x45, 0x56, 0x67, 0x78, 0x89, 0x9a, 0xab,
+ 0xab, 0xbc, 0xcd, 0xde, 0xf0, 0x01, 0x12, 0x23,
+ 0x34, 0x45, 0x56, 0x67, 0x78, 0x89, 0x9a, 0xab},
+ .cipher_iv_length = AES_GCM_IV_LEN,
+ .cipher_iv = { 0x11, 0x22, 0x33, 0x44, 0x01, 0x02, 0x03, 0x04,
+ 0x05, 0x06, 0x07, 0x08 },
+ .length = 52,
+ .plaintext = { 0x45, 0x00, 0x00, 0x30, 0x69, 0xa6, 0x40, 0x00,
+ 0x80, 0x06, 0x26, 0x90, 0xc0, 0xa8, 0x01, 0x02,
+ 0x93, 0x89, 0x15, 0x5e, 0x0a, 0x9e, 0x00, 0x8b,
+ 0x2d, 0xc5, 0x7e, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x02, 0x40, 0x00, 0x20, 0xbf, 0x00, 0x00,
+ 0x02, 0x04, 0x05, 0xb4, 0x01, 0x01, 0x04, 0x02,
+ 0x01, 0x02, 0x02, 0x01 },
+ .ciphertext = { 0xff, 0x42, 0x5c, 0x9b, 0x72, 0x45, 0x99, 0xdf,
+ 0x7a, 0x3b, 0xcd, 0x51, 0x01, 0x94, 0xe0, 0x0d,
+ 0x6a, 0x78, 0x10, 0x7f, 0x1b, 0x0b, 0x1c, 0xbf,
+ 0x06, 0xef, 0xae, 0x9d, 0x65, 0xa5, 0xd7, 0x63,
+ 0x74, 0x8a, 0x63, 0x79, 0x85, 0x77, 0x1d, 0x34,
+ 0x7f, 0x05, 0x45, 0x65, 0x9f, 0x14, 0xe9, 0x9d,
+ 0xef, 0x84, 0x2d, 0x8e },
+ .aad_length = 8,
+ .aad = { 0x4a, 0x2c, 0xbf, 0xe3, 0x00, 0x00, 0x00, 0x02 },
+ .digest_length = AES_GCM_DIGEST_LEN,
+ .digest = { 0xb3, 0x35, 0xf4, 0xee, 0xcf, 0xdb, 0xf8, 0x31,
+ 0x82, 0x4b, 0x4c, 0x49, 0x15, 0x95, 0x6c, 0x96 }
+ }
+};
+
+static crypto_test_reference_t aes_ccm_reference[] = {
+ /*
+ * AES-CCM reference from RFC 3610
+ */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CCM,
+ .auth = ODP_AUTH_ALG_AES_CCM,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf},
+ .cipher_iv_length = 13,
+ .cipher_iv = { 0x00, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00, 0xa0,
+ 0xa1, 0xa2, 0xa3, 0xa4, 0xa5 },
+ .aad_length = 8,
+ .aad = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 },
+ .length = 23,
+ .plaintext = { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e },
+ .ciphertext = { 0x58, 0x8c, 0x97, 0x9a, 0x61, 0xc6, 0x63, 0xd2,
+ 0xf0, 0x66, 0xd0, 0xc2, 0xc0, 0xf9, 0x89, 0x80,
+ 0x6d, 0x5f, 0x6b, 0x61, 0xda, 0xc3, 0x84 },
+ .digest_length = 8,
+ .digest = { 0x17, 0xe8, 0xd1, 0x2c, 0xfd, 0xf9, 0x26, 0xe0 }
+ },
+ /* The rest of test vectors are generated manually, no "interesting"
+ * vectors for use cases in RFC 3610 or SP 800-38C. */
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CCM,
+ .auth = ODP_AUTH_ALG_AES_CCM,
+ .cipher_key_length = AES192_KEY_LEN,
+ .cipher_key = { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7},
+ .cipher_iv_length = 13,
+ .cipher_iv = { 0x00, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00, 0xa0,
+ 0xa1, 0xa2, 0xa3, 0xa4, 0xa5 },
+ .aad_length = 8,
+ .aad = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 },
+ .length = 23,
+ .plaintext = { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e },
+ .ciphertext = { 0x57, 0x9f, 0xb8, 0x6e, 0xdd, 0xb4, 0xa6, 0x4a,
+ 0xae, 0x5f, 0xe9, 0x6d, 0xbd, 0x75, 0x44, 0x05,
+ 0x33, 0xa9, 0xfc, 0x3a, 0x84, 0x57, 0x36 },
+ .digest_length = 8,
+ .digest = { 0x67, 0xae, 0xc8, 0x0a, 0xc5, 0x88, 0xab, 0x16 }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CCM,
+ .auth = ODP_AUTH_ALG_AES_CCM,
+ .cipher_key_length = AES256_KEY_LEN,
+ .cipher_key = { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf},
+ .cipher_iv_length = 13,
+ .cipher_iv = { 0x00, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00, 0xa0,
+ 0xa1, 0xa2, 0xa3, 0xa4, 0xa5 },
+ .aad_length = 8,
+ .aad = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 },
+ .length = 23,
+ .plaintext = { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e },
+ .ciphertext = { 0x59, 0x61, 0x55, 0x10, 0xa7, 0xc4, 0x3b, 0xfb,
+ 0x12, 0x3d, 0x63, 0x6b, 0x46, 0x13, 0xc0, 0x3c,
+ 0x6c, 0xe2, 0x69, 0x07, 0x10, 0x2a, 0x3f },
+ .digest_length = 8,
+ .digest = { 0xb5, 0x57, 0x2a, 0x17, 0x2d, 0x49, 0x16, 0xd5 }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CCM,
+ .auth = ODP_AUTH_ALG_AES_CCM,
+ .cipher_key_length = AES128_KEY_LEN,
+ .cipher_key = { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf},
+ .cipher_iv_length = 11,
+ .cipher_iv = { 0x00, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00, 0xa0,
+ 0xa1, 0xa2, 0xa3 },
+ .aad_length = 8,
+ .aad = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 },
+ .length = 23,
+ .plaintext = { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e },
+ .ciphertext = { 0xaa, 0x2d, 0x3e, 0xcb, 0xa6, 0x68, 0x63, 0x75,
+ 0x8f, 0x03, 0x01, 0x51, 0x16, 0xde, 0x30, 0xed,
+ 0x8a, 0xb5, 0x42, 0xdc, 0xfa, 0x72, 0xd0 },
+ .digest_length = 8,
+ .digest = { 0x63, 0xe7, 0x01, 0x5c, 0x69, 0xaf, 0xb4, 0x0c }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CCM,
+ .auth = ODP_AUTH_ALG_AES_CCM,
+ .cipher_key_length = AES192_KEY_LEN,
+ .cipher_key = { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7},
+ .cipher_iv_length = 11,
+ .cipher_iv = { 0x00, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00, 0xa0,
+ 0xa1, 0xa2, 0xa3 },
+ .aad_length = 8,
+ .aad = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 },
+ .length = 23,
+ .plaintext = { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e },
+ .ciphertext = { 0xee, 0x99, 0x99, 0x1e, 0xc5, 0x8f, 0xd7, 0x7e,
+ 0x56, 0x71, 0x16, 0x39, 0x8e, 0xc4, 0x4f, 0xcc,
+ 0x14, 0x45, 0x57, 0x3e, 0x38, 0x76, 0x51 },
+ .digest_length = 8,
+ .digest = { 0x31, 0x29, 0x47, 0xa4, 0x6d, 0x76, 0x34, 0xb4 }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_CCM,
+ .auth = ODP_AUTH_ALG_AES_CCM,
+ .cipher_key_length = AES256_KEY_LEN,
+ .cipher_key = { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf},
+ .cipher_iv_length = 11,
+ .cipher_iv = { 0x00, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00, 0xa0,
+ 0xa1, 0xa2, 0xa3 },
+ .aad_length = 8,
+ .aad = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 },
+ .length = 23,
+ .plaintext = { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e },
+ .ciphertext = { 0xfa, 0x07, 0x47, 0x5c, 0xe8, 0xc9, 0x37, 0x88,
+ 0x54, 0x64, 0xb8, 0xc3, 0x85, 0xbb, 0x76, 0x0b,
+ 0xf2, 0xc2, 0x4c, 0x4e, 0x31, 0x16, 0x77 },
+ .digest_length = 8,
+ .digest = { 0x88, 0x56, 0x7e, 0x19, 0x84, 0x13, 0x29, 0xc4 }
+ },
+};
+
+static crypto_test_reference_t aes_gmac_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_AES_GMAC,
+ .auth_key_length = AES128_KEY_LEN,
+ .auth_key = { 0x4c, 0x80, 0xcd, 0xef, 0xbb, 0x5d, 0x10, 0xda,
+ 0x90, 0x6a, 0xc7, 0x3c, 0x36, 0x13, 0xa6, 0x34},
+ .auth_iv_length = AES_GCM_IV_LEN,
+ .auth_iv = { 0x22, 0x43, 0x3c, 0x64, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 },
+ .length = 68,
+ .plaintext = { 0x00, 0x00, 0x43, 0x21, 0x00, 0x00, 0x00, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x45, 0x00, 0x00, 0x30, 0xda, 0x3a, 0x00, 0x00,
+ 0x80, 0x01, 0xdf, 0x3b, 0xc0, 0xa8, 0x00, 0x05,
+ 0xc0, 0xa8, 0x00, 0x01, 0x08, 0x00, 0xc6, 0xcd,
+ 0x02, 0x00, 0x07, 0x00, 0x61, 0x62, 0x63, 0x64,
+ 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74,
+ 0x01, 0x02, 0x02, 0x01 },
+ .ciphertext = { 0x00, 0x00, 0x43, 0x21, 0x00, 0x00, 0x00, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x45, 0x00, 0x00, 0x30, 0xda, 0x3a, 0x00, 0x00,
+ 0x80, 0x01, 0xdf, 0x3b, 0xc0, 0xa8, 0x00, 0x05,
+ 0xc0, 0xa8, 0x00, 0x01, 0x08, 0x00, 0xc6, 0xcd,
+ 0x02, 0x00, 0x07, 0x00, 0x61, 0x62, 0x63, 0x64,
+ 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74,
+ 0x01, 0x02, 0x02, 0x01 },
+ .digest_length = AES_GCM_DIGEST_LEN,
+ .digest = { 0xf2, 0xa9, 0xa8, 0x36, 0xe1, 0x55, 0x10, 0x6a,
+ 0xa8, 0xdc, 0xd6, 0x18, 0xe4, 0x09, 0x9a, 0xaa }
+ },
+ /* AES192-GMAC from DPDK 17.02 */
+ {
+ .auth = ODP_AUTH_ALG_AES_GMAC,
+ .auth_key_length = AES192_KEY_LEN,
+ .auth_key = { 0xaa, 0x74, 0x0a, 0xbf, 0xad, 0xcd, 0xa7, 0x79,
+ 0x22, 0x0d, 0x3b, 0x40, 0x6c, 0x5d, 0x7e, 0xc0,
+ 0x9a, 0x77, 0xfe, 0x9d, 0x94, 0x10, 0x45, 0x39},
+ .auth_iv_length = AES_GCM_IV_LEN,
+ .auth_iv = { 0xab, 0x22, 0x65, 0xb4, 0xc1, 0x68, 0x95, 0x55,
+ 0x61, 0xf0, 0x43, 0x15 },
+ .length = 80,
+ .plaintext = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10},
+ .ciphertext = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10},
+ .digest_length = AES_GCM_DIGEST_LEN,
+ .digest = { 0xCF, 0x82, 0x80, 0x64, 0x02, 0x46, 0xF4, 0xFB,
+ 0x33, 0xAE, 0x1D, 0x90, 0xEA, 0x48, 0x83, 0xDB },
+ },
+ /* AES256-GMAC from DPDK 17.02 */
+ {
+ .auth = ODP_AUTH_ALG_AES_GMAC,
+ .auth_key_length = AES256_KEY_LEN,
+ .auth_key = { 0xb5, 0x48, 0xe4, 0x93, 0x4f, 0x5c, 0x64, 0xd3,
+ 0xc0, 0xf0, 0xb7, 0x8f, 0x7b, 0x4d, 0x88, 0x24,
+ 0xaa, 0xc4, 0x6b, 0x3c, 0x8d, 0x2c, 0xc3, 0x5e,
+ 0xe4, 0xbf, 0xb2, 0x54, 0xe4, 0xfc, 0xba, 0xf7},
+ .auth_iv_length = AES_GCM_IV_LEN,
+ .auth_iv = { 0x2e, 0xed, 0xe1, 0xdc, 0x64, 0x47, 0xc7, 0xaf,
+ 0xc4, 0x41, 0x53, 0x58 },
+ .length = 65,
+ .plaintext = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02 },
+ .ciphertext = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02 },
+ .digest_length = AES_GCM_DIGEST_LEN,
+ .digest = { 0x77, 0x46, 0x0D, 0x6F, 0xB1, 0x87, 0xDB, 0xA9,
+ 0x46, 0xAD, 0xCD, 0xFB, 0xB7, 0xF9, 0x13, 0xA1 },
+ }
+};
+
+/*
+ * Test vectors from SP800-38B / CSRC examples
+ * 12-byte vectors are just truncated 16-byte vectors
+ */
+static crypto_test_reference_t aes_cmac_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_AES_CMAC,
+ .auth_key_length = AES128_KEY_LEN,
+ .auth_key = { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c },
+ .length = 16,
+ .plaintext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a},
+ .ciphertext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a
+ },
+ .digest_length = 16,
+ .digest = { 0x07, 0x0a, 0x16, 0xb4, 0x6b, 0x4d, 0x41, 0x44,
+ 0xf7, 0x9b, 0xdd, 0x9d, 0xd0, 0x4a, 0x28, 0x7c },
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = 12,
+ },
+ {
+ .auth = ODP_AUTH_ALG_AES_CMAC,
+ .auth_key_length = AES192_KEY_LEN,
+ .auth_key = { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52,
+ 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
+ 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b },
+ .length = 16,
+ .plaintext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a},
+ .ciphertext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a
+ },
+ .digest_length = 16,
+ .digest = { 0x9e, 0x99, 0xa7, 0xbf, 0x31, 0xe7, 0x10, 0x90,
+ 0x06, 0x62, 0xf6, 0x5e, 0x61, 0x7c, 0x51, 0x84 },
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = 12,
+ },
+ {
+ .auth = ODP_AUTH_ALG_AES_CMAC,
+ .auth_key_length = AES256_KEY_LEN,
+ .auth_key = { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe,
+ 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
+ 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7,
+ 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4 },
+ .length = 16,
+ .plaintext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a},
+ .ciphertext = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a
+ },
+ .digest_length = 16,
+ .digest = { 0x28, 0xa7, 0x02, 0x3f, 0x45, 0x2e, 0x8f, 0x82,
+ 0xbd, 0x4b, 0xf2, 0x8d, 0x8c, 0x37, 0xc3, 0x5c },
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = 12,
+ },
+};
+
+/*
+ * Test vector from RFC 7539, sections 2.8.2, A.5
+ */
+static crypto_test_reference_t chacha20_poly1305_reference[] = {
+ {
+ .cipher = ODP_CIPHER_ALG_CHACHA20_POLY1305,
+ .auth = ODP_AUTH_ALG_CHACHA20_POLY1305,
+ .cipher_key_length = CHACHA20_POLY1305_KEY_LEN,
+ .cipher_key = { 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f},
+ .cipher_iv_length = CHACHA20_POLY1305_IV_LEN,
+ .cipher_iv = { 0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43,
+ 0x44, 0x45, 0x46, 0x47 },
+ .length = 114,
+ .plaintext = { 0x4c, 0x61, 0x64, 0x69, 0x65, 0x73, 0x20, 0x61,
+ 0x6e, 0x64, 0x20, 0x47, 0x65, 0x6e, 0x74, 0x6c,
+ 0x65, 0x6d, 0x65, 0x6e, 0x20, 0x6f, 0x66, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x61, 0x73,
+ 0x73, 0x20, 0x6f, 0x66, 0x20, 0x27, 0x39, 0x39,
+ 0x3a, 0x20, 0x49, 0x66, 0x20, 0x49, 0x20, 0x63,
+ 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6f, 0x66, 0x66,
+ 0x65, 0x72, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x6f,
+ 0x6e, 0x6c, 0x79, 0x20, 0x6f, 0x6e, 0x65, 0x20,
+ 0x74, 0x69, 0x70, 0x20, 0x66, 0x6f, 0x72, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x66, 0x75, 0x74, 0x75,
+ 0x72, 0x65, 0x2c, 0x20, 0x73, 0x75, 0x6e, 0x73,
+ 0x63, 0x72, 0x65, 0x65, 0x6e, 0x20, 0x77, 0x6f,
+ 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x69,
+ 0x74, 0x2e },
+ .ciphertext = { 0xd3, 0x1a, 0x8d, 0x34, 0x64, 0x8e, 0x60, 0xdb,
+ 0x7b, 0x86, 0xaf, 0xbc, 0x53, 0xef, 0x7e, 0xc2,
+ 0xa4, 0xad, 0xed, 0x51, 0x29, 0x6e, 0x08, 0xfe,
+ 0xa9, 0xe2, 0xb5, 0xa7, 0x36, 0xee, 0x62, 0xd6,
+ 0x3d, 0xbe, 0xa4, 0x5e, 0x8c, 0xa9, 0x67, 0x12,
+ 0x82, 0xfa, 0xfb, 0x69, 0xda, 0x92, 0x72, 0x8b,
+ 0x1a, 0x71, 0xde, 0x0a, 0x9e, 0x06, 0x0b, 0x29,
+ 0x05, 0xd6, 0xa5, 0xb6, 0x7e, 0xcd, 0x3b, 0x36,
+ 0x92, 0xdd, 0xbd, 0x7f, 0x2d, 0x77, 0x8b, 0x8c,
+ 0x98, 0x03, 0xae, 0xe3, 0x28, 0x09, 0x1b, 0x58,
+ 0xfa, 0xb3, 0x24, 0xe4, 0xfa, 0xd6, 0x75, 0x94,
+ 0x55, 0x85, 0x80, 0x8b, 0x48, 0x31, 0xd7, 0xbc,
+ 0x3f, 0xf4, 0xde, 0xf0, 0x8e, 0x4b, 0x7a, 0x9d,
+ 0xe5, 0x76, 0xd2, 0x65, 0x86, 0xce, 0xc6, 0x4b,
+ 0x61, 0x16 },
+ .aad_length = 12,
+ .aad = { 0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4,
+ 0xc5, 0xc6, 0xc7 },
+ .digest_length = 16,
+ .digest = { 0x1a, 0xe1, 0x0b, 0x59, 0x4f, 0x09, 0xe2, 0x6a,
+ 0x7e, 0x90, 0x2e, 0xcb, 0xd0, 0x60, 0x06, 0x91 }
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_CHACHA20_POLY1305,
+ .auth = ODP_AUTH_ALG_CHACHA20_POLY1305,
+ .cipher_key_length = CHACHA20_POLY1305_KEY_LEN,
+ .cipher_key = { 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0},
+ .cipher_iv_length = CHACHA20_POLY1305_IV_LEN,
+ .cipher_iv = { 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04,
+ 0x05, 0x06, 0x07, 0x08 },
+ .length = 265,
+ .plaintext = { 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
+ 0x9d },
+ .ciphertext = { 0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4,
+ 0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd,
+ 0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89,
+ 0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2,
+ 0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee,
+ 0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0,
+ 0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00,
+ 0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf,
+ 0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce,
+ 0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81,
+ 0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd,
+ 0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55,
+ 0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61,
+ 0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38,
+ 0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0,
+ 0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4,
+ 0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46,
+ 0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9,
+ 0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e,
+ 0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e,
+ 0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15,
+ 0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a,
+ 0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea,
+ 0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a,
+ 0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99,
+ 0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e,
+ 0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10,
+ 0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10,
+ 0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94,
+ 0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30,
+ 0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf,
+ 0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29,
+ 0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70,
+ 0x9b },
+ .aad_length = 12,
+ .aad = { 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x4e, 0x91 },
+ .digest_length = CHACHA20_POLY1305_CHECK_LEN,
+ .digest = { 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb, 0x22,
+ 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f, 0x38 }
+ },
+};
+
+static crypto_test_reference_t hmac_md5_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_MD5_HMAC,
+ .auth_key_length = HMAC_MD5_KEY_LEN,
+ .auth_key = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b },
+ .length = 8,
+ /* "Hi There" */
+ .plaintext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
+ .ciphertext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
+ .digest_length = HMAC_MD5_CHECK_LEN,
+ .digest = { 0x92, 0x94, 0x72, 0x7a, 0x36, 0x38, 0xbb, 0x1c,
+ 0x13, 0xf4, 0x8e, 0xf8, 0x15, 0x8b, 0xfc, 0x9d },
+
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_MD5_96_CHECK_LEN,
+ },
+ {
+ .auth = ODP_AUTH_ALG_MD5_HMAC,
+ .auth_key_length = HMAC_MD5_KEY_LEN,
+ /* "Jefe" */
+ .auth_key = { 0x4a, 0x65, 0x66, 0x65 },
+ .length = 28,
+ /* what do ya want for nothing?*/
+ .plaintext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f },
+ .ciphertext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f },
+ .digest_length = HMAC_MD5_CHECK_LEN,
+ .digest = { 0x75, 0x0c, 0x78, 0x3e, 0x6a, 0xb0, 0xb5, 0x03,
+ 0xea, 0xa8, 0x6e, 0x31, 0x0a, 0x5d, 0xb7, 0x38 },
+
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_MD5_96_CHECK_LEN,
+ },
+ {
+ .auth = ODP_AUTH_ALG_MD5_HMAC,
+ .auth_key_length = HMAC_MD5_KEY_LEN,
+ .auth_key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa },
+ .length = 50,
+ .plaintext = { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd },
+ .ciphertext = { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd },
+ .digest_length = HMAC_MD5_CHECK_LEN,
+ .digest = { 0x56, 0xbe, 0x34, 0x52, 0x1d, 0x14, 0x4c, 0x88,
+ 0xdb, 0xb8, 0xc7, 0x33, 0xf0, 0xe8, 0xb3, 0xf6 }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_MD5_96_CHECK_LEN,
+ },
+};
+
+static crypto_test_reference_t hmac_sha1_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_SHA1_HMAC,
+ .auth_key_length = HMAC_SHA1_KEY_LEN,
+ .auth_key = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b },
+ .length = 8,
+ /* "Hi There" */
+ .plaintext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
+ .ciphertext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
+ .digest_length = HMAC_SHA1_CHECK_LEN,
+ .digest = { 0xb6, 0x17, 0x31, 0x86, 0x55, 0x05, 0x72, 0x64,
+ 0xe2, 0x8b, 0xc0, 0xb6, 0xfb, 0x37, 0x8c, 0x8e,
+ 0xf1, 0x46, 0xbe, 0x00 }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_SHA1_96_CHECK_LEN,
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA1_HMAC,
+ .auth_key_length = HMAC_SHA1_KEY_LEN,
+ /* "Jefe" */
+ .auth_key = { 0x4a, 0x65, 0x66, 0x65 },
+ .length = 28,
+ /* what do ya want for nothing?*/
+ .plaintext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f },
+ .ciphertext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f },
+ .digest_length = HMAC_SHA1_CHECK_LEN,
+ .digest = { 0xef, 0xfc, 0xdf, 0x6a, 0xe5, 0xeb, 0x2f, 0xa2,
+ 0xd2, 0x74, 0x16, 0xd5, 0xf1, 0x84, 0xdf, 0x9c,
+ 0x25, 0x9a, 0x7c, 0x79 }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_SHA1_96_CHECK_LEN,
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA1_HMAC,
+ .auth_key_length = HMAC_SHA1_KEY_LEN,
+ .auth_key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa },
+ .length = 50,
+ .plaintext = { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd },
+ .ciphertext = { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd },
+ .digest_length = HMAC_SHA1_CHECK_LEN,
+ .digest = { 0x12, 0x5d, 0x73, 0x42, 0xb9, 0xac, 0x11, 0xcd,
+ 0x91, 0xa3, 0x9a, 0xf4, 0x8a, 0xa1, 0x7b, 0x4f,
+ 0x63, 0xf1, 0x75, 0xd3 }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_SHA1_96_CHECK_LEN,
+ },
+};
+
+static crypto_test_reference_t hmac_sha224_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_SHA224_HMAC,
+ .auth_key_length = HMAC_SHA224_KEY_LEN,
+ .auth_key = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b },
+ .length = 8,
+ /* "Hi There" */
+ .plaintext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
+ .ciphertext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
+ .digest_length = HMAC_SHA224_CHECK_LEN,
+ .digest = { 0x89, 0x6f, 0xb1, 0x12, 0x8a, 0xbb, 0xdf, 0x19,
+ 0x68, 0x32, 0x10, 0x7c, 0xd4, 0x9d, 0xf3, 0x3f,
+ 0x47, 0xb4, 0xb1, 0x16, 0x99, 0x12, 0xba, 0x4f,
+ 0x53, 0x68, 0x4b, 0x22 }
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA224_HMAC,
+ .auth_key_length = HMAC_SHA224_KEY_LEN,
+ /* "Jefe" */
+ .auth_key = { 0x4a, 0x65, 0x66, 0x65 },
+ .length = 28,
+ /* what do ya want for nothing?*/
+ .plaintext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f },
+ .ciphertext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f },
+ .digest_length = HMAC_SHA224_CHECK_LEN,
+ .digest = { 0xa3, 0x0e, 0x01, 0x09, 0x8b, 0xc6, 0xdb, 0xbf,
+ 0x45, 0x69, 0x0f, 0x3a, 0x7e, 0x9e, 0x6d, 0x0f,
+ 0x8b, 0xbe, 0xa2, 0xa3, 0x9e, 0x61, 0x48, 0x00,
+ 0x8f, 0xd0, 0x5e, 0x44 }
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA224_HMAC,
+ .auth_key_length = HMAC_SHA224_KEY_LEN,
+ .auth_key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa },
+ .length = 50,
+ .plaintext = { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd },
+ .ciphertext = { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd },
+ .digest_length = HMAC_SHA224_CHECK_LEN,
+ .digest = { 0x7f, 0xb3, 0xcb, 0x35, 0x88, 0xc6, 0xc1, 0xf6,
+ 0xff, 0xa9, 0x69, 0x4d, 0x7d, 0x6a, 0xd2, 0x64,
+ 0x93, 0x65, 0xb0, 0xc1, 0xf6, 0x5d, 0x69, 0xd1,
+ 0xec, 0x83, 0x33, 0xea }
+ }
+};
+
+static crypto_test_reference_t hmac_sha256_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_SHA256_HMAC,
+ .auth_key_length = HMAC_SHA256_KEY_LEN,
+ .auth_key = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b },
+ .length = 8,
+ /* "Hi There" */
+ .plaintext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
+ .ciphertext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
+ .digest_length = HMAC_SHA256_CHECK_LEN,
+ .digest = { 0xb0, 0x34, 0x4c, 0x61, 0xd8, 0xdb, 0x38, 0x53,
+ 0x5c, 0xa8, 0xaf, 0xce, 0xaf, 0x0b, 0xf1, 0x2b,
+ 0x88, 0x1d, 0xc2, 0x00, 0xc9, 0x83, 0x3d, 0xa7,
+ 0x26, 0xe9, 0x37, 0x6c, 0x2e, 0x32, 0xcf, 0xf7 }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_SHA256_128_CHECK_LEN,
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA256_HMAC,
+ .auth_key_length = HMAC_SHA256_KEY_LEN,
+ /* "Jefe" */
+ .auth_key = { 0x4a, 0x65, 0x66, 0x65 },
+ .length = 28,
+ /* what do ya want for nothing?*/
+ .plaintext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f },
+ .ciphertext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f },
+ .digest_length = HMAC_SHA256_CHECK_LEN,
+ .digest = { 0x5b, 0xdc, 0xc1, 0x46, 0xbf, 0x60, 0x75, 0x4e,
+ 0x6a, 0x04, 0x24, 0x26, 0x08, 0x95, 0x75, 0xc7,
+ 0x5a, 0x00, 0x3f, 0x08, 0x9d, 0x27, 0x39, 0x83,
+ 0x9d, 0xec, 0x58, 0xb9, 0x64, 0xec, 0x38, 0x43 }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_SHA256_128_CHECK_LEN,
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA256_HMAC,
+ .auth_key_length = HMAC_SHA256_KEY_LEN,
+ .auth_key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa },
+ .length = 50,
+ .plaintext = { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd },
+ .ciphertext = { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd },
+ .digest_length = HMAC_SHA256_CHECK_LEN,
+ .digest = { 0x77, 0x3e, 0xa9, 0x1e, 0x36, 0x80, 0x0e, 0x46,
+ 0x85, 0x4d, 0xb8, 0xeb, 0xd0, 0x91, 0x81, 0xa7,
+ 0x29, 0x59, 0x09, 0x8b, 0x3e, 0xf8, 0xc1, 0x22,
+ 0xd9, 0x63, 0x55, 0x14, 0xce, 0xd5, 0x65, 0xfe }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_SHA256_128_CHECK_LEN,
+ },
+};
+
+static crypto_test_reference_t hmac_sha384_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_SHA384_HMAC,
+ .auth_key_length = HMAC_SHA384_KEY_LEN,
+ .auth_key = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b },
+ .length = 8,
+ /* "Hi There" */
+ .plaintext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
+ .ciphertext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
+ .digest_length = HMAC_SHA384_CHECK_LEN,
+ .digest = { 0xaf, 0xd0, 0x39, 0x44, 0xd8, 0x48, 0x95, 0x62,
+ 0x6b, 0x08, 0x25, 0xf4, 0xab, 0x46, 0x90, 0x7f,
+ 0x15, 0xf9, 0xda, 0xdb, 0xe4, 0x10, 0x1e, 0xc6,
+ 0x82, 0xaa, 0x03, 0x4c, 0x7c, 0xeb, 0xc5, 0x9c,
+ 0xfa, 0xea, 0x9e, 0xa9, 0x07, 0x6e, 0xde, 0x7f,
+ 0x4a, 0xf1, 0x52, 0xe8, 0xb2, 0xfa, 0x9c, 0xb6 }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_SHA384_192_CHECK_LEN,
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA384_HMAC,
+ .auth_key_length = HMAC_SHA384_KEY_LEN,
+ /* "Jefe" */
+ .auth_key = { 0x4a, 0x65, 0x66, 0x65 },
+ .length = 28,
+ /* what do ya want for nothing?*/
+ .plaintext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f },
+ .ciphertext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f },
+ .digest_length = HMAC_SHA384_CHECK_LEN,
+ .digest = { 0xaf, 0x45, 0xd2, 0xe3, 0x76, 0x48, 0x40, 0x31,
+ 0x61, 0x7f, 0x78, 0xd2, 0xb5, 0x8a, 0x6b, 0x1b,
+ 0x9c, 0x7e, 0xf4, 0x64, 0xf5, 0xa0, 0x1b, 0x47,
+ 0xe4, 0x2e, 0xc3, 0x73, 0x63, 0x22, 0x44, 0x5e,
+ 0x8e, 0x22, 0x40, 0xca, 0x5e, 0x69, 0xe2, 0xc7,
+ 0x8b, 0x32, 0x39, 0xec, 0xfa, 0xb2, 0x16, 0x49 }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_SHA384_192_CHECK_LEN,
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA384_HMAC,
+ .auth_key_length = HMAC_SHA384_KEY_LEN,
+ .auth_key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa },
+ .length = 50,
+ .plaintext = { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd },
+ .ciphertext = { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd },
+ .digest_length = HMAC_SHA384_CHECK_LEN,
+ .digest = {0x88, 0x06, 0x26, 0x08, 0xd3, 0xe6, 0xad, 0x8a,
+ 0x0a, 0xa2, 0xac, 0xe0, 0x14, 0xc8, 0xa8, 0x6f,
+ 0x0a, 0xa6, 0x35, 0xd9, 0x47, 0xac, 0x9f, 0xeb,
+ 0xe8, 0x3e, 0xf4, 0xe5, 0x59, 0x66, 0x14, 0x4b,
+ 0x2a, 0x5a, 0xb3, 0x9d, 0xc1, 0x38, 0x14, 0xb9,
+ 0x4e, 0x3a, 0xb6, 0xe1, 0x01, 0xa3, 0x4f, 0x27 }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_SHA384_192_CHECK_LEN,
+ },
+};
+
+static crypto_test_reference_t hmac_sha512_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_SHA512_HMAC,
+ .auth_key_length = HMAC_SHA512_KEY_LEN,
+ .auth_key = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b },
+ .length = 8,
+ /* "Hi There" */
+ .plaintext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
+ .ciphertext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
+ .digest_length = HMAC_SHA512_CHECK_LEN,
+ .digest = { 0x87, 0xaa, 0x7c, 0xde, 0xa5, 0xef, 0x61, 0x9d,
+ 0x4f, 0xf0, 0xb4, 0x24, 0x1a, 0x1d, 0x6c, 0xb0,
+ 0x23, 0x79, 0xf4, 0xe2, 0xce, 0x4e, 0xc2, 0x78,
+ 0x7a, 0xd0, 0xb3, 0x05, 0x45, 0xe1, 0x7c, 0xde,
+ 0xda, 0xa8, 0x33, 0xb7, 0xd6, 0xb8, 0xa7, 0x02,
+ 0x03, 0x8b, 0x27, 0x4e, 0xae, 0xa3, 0xf4, 0xe4,
+ 0xbe, 0x9d, 0x91, 0x4e, 0xeb, 0x61, 0xf1, 0x70,
+ 0x2e, 0x69, 0x6c, 0x20, 0x3a, 0x12, 0x68, 0x54 }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_SHA512_256_CHECK_LEN,
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA512_HMAC,
+ .auth_key_length = HMAC_SHA512_KEY_LEN,
+ /* "Jefe" */
+ .auth_key = { 0x4a, 0x65, 0x66, 0x65 },
+ .length = 28,
+ /* what do ya want for nothing?*/
+ .plaintext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f },
+ .ciphertext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f },
+ .digest_length = HMAC_SHA512_CHECK_LEN,
+ .digest = { 0x16, 0x4b, 0x7a, 0x7b, 0xfc, 0xf8, 0x19, 0xe2,
+ 0xe3, 0x95, 0xfb, 0xe7, 0x3b, 0x56, 0xe0, 0xa3,
+ 0x87, 0xbd, 0x64, 0x22, 0x2e, 0x83, 0x1f, 0xd6,
+ 0x10, 0x27, 0x0c, 0xd7, 0xea, 0x25, 0x05, 0x54,
+ 0x97, 0x58, 0xbf, 0x75, 0xc0, 0x5a, 0x99, 0x4a,
+ 0x6d, 0x03, 0x4f, 0x65, 0xf8, 0xf0, 0xe6, 0xfd,
+ 0xca, 0xea, 0xb1, 0xa3, 0x4d, 0x4a, 0x6b, 0x4b,
+ 0x63, 0x6e, 0x07, 0x0a, 0x38, 0xbc, 0xe7, 0x37 }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_SHA512_256_CHECK_LEN,
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA512_HMAC,
+ .auth_key_length = HMAC_SHA512_KEY_LEN,
+ .auth_key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa },
+ .length = 50,
+ .plaintext = { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd },
+ .ciphertext = { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd },
+ .digest_length = HMAC_SHA512_CHECK_LEN,
+ .digest = { 0xfa, 0x73, 0xb0, 0x08, 0x9d, 0x56, 0xa2, 0x84,
+ 0xef, 0xb0, 0xf0, 0x75, 0x6c, 0x89, 0x0b, 0xe9,
+ 0xb1, 0xb5, 0xdb, 0xdd, 0x8e, 0xe8, 0x1a, 0x36,
+ 0x55, 0xf8, 0x3e, 0x33, 0xb2, 0x27, 0x9d, 0x39,
+ 0xbf, 0x3e, 0x84, 0x82, 0x79, 0xa7, 0x22, 0xc8,
+ 0x06, 0xb4, 0x85, 0xa4, 0x7e, 0x67, 0xc8, 0x07,
+ 0xb9, 0x46, 0xa3, 0x37, 0xbe, 0xe8, 0x94, 0x26,
+ 0x74, 0x27, 0x88, 0x59, 0xe1, 0x32, 0x92, 0xfb }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = HMAC_SHA512_256_CHECK_LEN,
+ },
+};
+
+/*
+ * RFC 3566
+ */
+static crypto_test_reference_t aes_xcbc_reference[] = {
+ /* Test Case #1 */
+ {
+ .auth = ODP_AUTH_ALG_AES_XCBC_MAC,
+ .auth_key_length = AES_XCBC_MAC_KEY_LEN,
+ .auth_key = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f},
+ .length = 0,
+ .digest_length = AES_XCBC_MAC_CHECK_LEN,
+ .digest = {0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c,
+ 0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29}
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = AES_XCBC_MAC_96_CHECK_LEN,
+ },
+ /* Test Case #2 */
+ {
+ .auth = ODP_AUTH_ALG_AES_XCBC_MAC,
+ .auth_key_length = AES_XCBC_MAC_KEY_LEN,
+ .auth_key = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+ .length = 3,
+ .plaintext = { 0x00, 0x01, 0x02 },
+ .ciphertext = { 0x00, 0x01, 0x02 },
+ .digest_length = AES_XCBC_MAC_CHECK_LEN,
+ .digest = { 0x5b, 0x37, 0x65, 0x80, 0xae, 0x2f, 0x19, 0xaf,
+ 0xe7, 0x21, 0x9c, 0xee, 0xf1, 0x72, 0x75, 0x6f }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = AES_XCBC_MAC_96_CHECK_LEN,
+ },
+ /* Test Case #3 */
+ {
+ .auth = ODP_AUTH_ALG_AES_XCBC_MAC,
+ .auth_key_length = AES_XCBC_MAC_KEY_LEN,
+ .auth_key = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+ .length = 16,
+ .plaintext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+ .ciphertext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+ .digest_length = AES_XCBC_MAC_CHECK_LEN,
+ .digest = { 0xd2, 0xa2, 0x46, 0xfa, 0x34, 0x9b, 0x68, 0xa7,
+ 0x99, 0x98, 0xa4, 0x39, 0x4f, 0xf7, 0xa2, 0x63 }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = AES_XCBC_MAC_96_CHECK_LEN,
+ },
+ /* Test Case #4 */
+ {
+ .auth = ODP_AUTH_ALG_AES_XCBC_MAC,
+ .auth_key_length = AES_XCBC_MAC_KEY_LEN,
+ .auth_key = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+ .length = 20,
+ .plaintext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13 },
+ .ciphertext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13 },
+ .digest_length = AES_XCBC_MAC_CHECK_LEN,
+ .digest = { 0x47, 0xf5, 0x1b, 0x45, 0x64, 0x96, 0x62, 0x15,
+ 0xb8, 0x98, 0x5c, 0x63, 0x05, 0x5e, 0xd3, 0x08 }
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = AES_XCBC_MAC_96_CHECK_LEN,
+ },
+ /* Test Case #5 */
+ {
+ .auth = ODP_AUTH_ALG_AES_XCBC_MAC,
+ .auth_key_length = AES_XCBC_MAC_KEY_LEN,
+ .auth_key = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f},
+ .length = 32,
+ .plaintext = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f},
+ .ciphertext = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f},
+ .digest_length = AES_XCBC_MAC_CHECK_LEN,
+ .digest = {0xf5, 0x4f, 0x0e, 0xc8, 0xd2, 0xb9, 0xf3, 0xd3,
+ 0x68, 0x07, 0x73, 0x4b, 0xd5, 0x28, 0x3f, 0xd4}
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = AES_XCBC_MAC_96_CHECK_LEN,
+ },
+ /* Test Case #6 */
+ {
+ .auth = ODP_AUTH_ALG_AES_XCBC_MAC,
+ .auth_key_length = AES_XCBC_MAC_KEY_LEN,
+ .auth_key = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f},
+ .length = 34,
+ .plaintext = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21},
+ .ciphertext = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21},
+ .digest_length = AES_XCBC_MAC_CHECK_LEN,
+ .digest = {0xbe, 0xcb, 0xb3, 0xbc, 0xcd, 0xb5, 0x18, 0xa3,
+ 0x06, 0x77, 0xd5, 0x48, 0x1f, 0xb6, 0xb4, 0xd8}
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = AES_XCBC_MAC_96_CHECK_LEN,
+ },
+ /* Test Case #7 */
+ {
+ .auth = ODP_AUTH_ALG_AES_XCBC_MAC,
+ .auth_key_length = AES_XCBC_MAC_KEY_LEN,
+ .auth_key = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f},
+ .length = 1000,
+ /* Plaintext is 1000 zero bytes. No explicit init needed. */
+ .digest_length = AES_XCBC_MAC_CHECK_LEN,
+ .digest = {0xf0, 0xda, 0xfe, 0xe8, 0x95, 0xdb, 0x30, 0x25,
+ 0x37, 0x61, 0x10, 0x3b, 0x5d, 0x84, 0x52, 0x8f}
+ },
+ {
+ .copy_previous_vector = 1,
+ .digest_length = AES_XCBC_MAC_96_CHECK_LEN,
+ },
+};
+
+/*
+ * Kasumi F8 and F9 test vectors are taken from
+ * 3GPP TS 35.203 V9.0.0 (2009-12)
+ * 3rd Generation Partnership Project;
+ * Technical Specification Group Services and System Aspects;
+ * 3G Security;
+ * Specification of the 3GPP Confidentiality
+ * and Integrity Algorithms;
+ * Document 3: Implementors' Test Data
+ * (Release 9)
+ */
+static crypto_test_reference_t kasumi_f8_reference[] = {
+ {
+ .cipher = ODP_CIPHER_ALG_KASUMI_F8,
+ .cipher_key_length = KASUMI_F8_KEY_LEN,
+ .cipher_key = { 0x5a, 0xcb, 0x1d, 0x64, 0x4c, 0x0d, 0x51, 0x20,
+ 0x4e, 0xa5, 0xf1, 0x45, 0x10, 0x10, 0xd8, 0x52},
+ .cipher_iv_length = KASUMI_F8_IV_LEN,
+ .cipher_iv = { 0xfa, 0x55, 0x6b, 0x26, 0x1c, 0x00, 0x00, 0x00},
+ .length = 120, /* 15 bytes */
+ .is_length_in_bits = true,
+ .plaintext = { 0xad, 0x9c, 0x44, 0x1f, 0x89, 0x0b, 0x38, 0xc4,
+ 0x57, 0xa4, 0x9d, 0x42, 0x14, 0x07, 0xe8 },
+ .ciphertext = { 0x9b, 0xc9, 0x2c, 0xa8, 0x03, 0xc6, 0x7b, 0x28,
+ 0xa1, 0x1a, 0x4b, 0xee, 0x5a, 0x0c, 0x25 }
+ }
+};
+
+static crypto_test_reference_t kasumi_f9_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_KASUMI_F9,
+ .auth_key_length = KASUMI_F9_KEY_LEN,
+ .auth_key = { 0xc7, 0x36, 0xc6, 0xaa, 0xb2, 0x2b, 0xff, 0xf9,
+ 0x1e, 0x26, 0x98, 0xd2, 0xe2, 0x2a, 0xd5, 0x7e },
+ .auth_iv_length = KASUMI_F9_IV_LEN,
+ .auth_iv = { 0x14, 0x79, 0x3e, 0x41, 0x03, 0x97, 0xe8, 0xfd,
+ 0x01, },
+ .length = 384, /* 48 bytes */
+ .is_length_in_bits = true,
+ .plaintext = { 0xd0, 0xa7, 0xd4, 0x63, 0xdf, 0x9f, 0xb2, 0xb2,
+ 0x78, 0x83, 0x3f, 0xa0, 0x2e, 0x23, 0x5a, 0xa1,
+ 0x72, 0xbd, 0x97, 0x0c, 0x14, 0x73, 0xe1, 0x29,
+ 0x07, 0xfb, 0x64, 0x8b, 0x65, 0x99, 0xaa, 0xa0,
+ 0xb2, 0x4a, 0x03, 0x86, 0x65, 0x42, 0x2b, 0x20,
+ 0xa4, 0x99, 0x27, 0x6a, 0x50, 0x42, 0x70, 0x09},
+ .ciphertext = { 0xd0, 0xa7, 0xd4, 0x63, 0xdf, 0x9f, 0xb2, 0xb2,
+ 0x78, 0x83, 0x3f, 0xa0, 0x2e, 0x23, 0x5a, 0xa1,
+ 0x72, 0xbd, 0x97, 0x0c, 0x14, 0x73, 0xe1, 0x29,
+ 0x07, 0xfb, 0x64, 0x8b, 0x65, 0x99, 0xaa, 0xa0,
+ 0xb2, 0x4a, 0x03, 0x86, 0x65, 0x42, 0x2b, 0x20,
+ 0xa4, 0x99, 0x27, 0x6a, 0x50, 0x42, 0x70, 0x09},
+ .digest_length = KASUMI_F9_DIGEST_LEN,
+ .digest = { 0xdd, 0x7d, 0xfa, 0xdd },
+ }
+};
+
+/*
+ * Snow3G UEA2 & UIA2 test vectors are taken from
+ * Specification of the 3GPP Confidentiality and
+ * Integrity Algorithms UEA2 & UIA2
+ * Document 3: Implementors’ Test Data
+ * Version: 1.1
+ * Date: 25 th October 2012
+ */
+static crypto_test_reference_t snow3g_uea2_reference[] = {
+ {
+ .cipher = ODP_CIPHER_ALG_SNOW3G_UEA2,
+ .cipher_key_length = SNOW3G_UEA2_KEY_LEN,
+ .cipher_key = { 0x5a, 0xcb, 0x1d, 0x64, 0x4c, 0x0d, 0x51, 0x20,
+ 0x4e, 0xa5, 0xf1, 0x45, 0x10, 0x10, 0xd8, 0x52},
+ .cipher_iv_length = SNOW3G_UEA2_IV_LEN,
+ .cipher_iv = { 0xfa, 0x55, 0x6b, 0x26, 0x1c, 0x00, 0x00, 0x00,
+ 0xfa, 0x55, 0x6b, 0x26, 0x1c, 0x00, 0x00, 0x00},
+ .length = 120, /* 15 bytes */
+ .is_length_in_bits = true,
+ .plaintext = { 0xad, 0x9c, 0x44, 0x1f, 0x89, 0x0b, 0x38, 0xc4,
+ 0x57, 0xa4, 0x9d, 0x42, 0x14, 0x07, 0xe8 },
+ .ciphertext = { 0xba, 0x0f, 0x31, 0x30, 0x03, 0x34, 0xc5, 0x6b,
+ 0x52, 0xa7, 0x49, 0x7c, 0xba, 0xc0, 0x46 }
+ }
+};
+
+static crypto_test_reference_t snow3g_uia2_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_SNOW3G_UIA2,
+ .auth_key_length = SNOW3G_UIA2_KEY_LEN,
+ .auth_key = { 0xc7, 0x36, 0xc6, 0xaa, 0xb2, 0x2b, 0xff, 0xf9,
+ 0x1e, 0x26, 0x98, 0xd2, 0xe2, 0x2a, 0xd5, 0x7e},
+ .auth_iv_length = SNOW3G_UIA2_IV_LEN,
+ .auth_iv = { 0x14, 0x79, 0x3e, 0x41, 0x03, 0x97, 0xe8, 0xfd,
+ 0x94, 0x79, 0x3e, 0x41, 0x03, 0x97, 0x68, 0xfd },
+ .length = 384, /* 48 bytes */
+ .is_length_in_bits = true,
+ .plaintext = { 0xd0, 0xa7, 0xd4, 0x63, 0xdf, 0x9f, 0xb2, 0xb2,
+ 0x78, 0x83, 0x3f, 0xa0, 0x2e, 0x23, 0x5a, 0xa1,
+ 0x72, 0xbd, 0x97, 0x0c, 0x14, 0x73, 0xe1, 0x29,
+ 0x07, 0xfb, 0x64, 0x8b, 0x65, 0x99, 0xaa, 0xa0,
+ 0xb2, 0x4a, 0x03, 0x86, 0x65, 0x42, 0x2b, 0x20,
+ 0xa4, 0x99, 0x27, 0x6a, 0x50, 0x42, 0x70, 0x09},
+ .ciphertext = { 0xd0, 0xa7, 0xd4, 0x63, 0xdf, 0x9f, 0xb2, 0xb2,
+ 0x78, 0x83, 0x3f, 0xa0, 0x2e, 0x23, 0x5a, 0xa1,
+ 0x72, 0xbd, 0x97, 0x0c, 0x14, 0x73, 0xe1, 0x29,
+ 0x07, 0xfb, 0x64, 0x8b, 0x65, 0x99, 0xaa, 0xa0,
+ 0xb2, 0x4a, 0x03, 0x86, 0x65, 0x42, 0x2b, 0x20,
+ 0xa4, 0x99, 0x27, 0x6a, 0x50, 0x42, 0x70, 0x09},
+ .digest_length = SNOW3G_UIA2_DIGEST_LEN,
+ .digest = { 0x38, 0xb5, 0x54, 0xc0 }
+ }
+};
+
+/*
+ * AES EEA2 and AES EIA2 test vectors from
+ * Specification of the 3GPP Confidentiality and Integrity
+ * Algorithms 128-EEA2 & 128-EIA2
+ */
+static crypto_test_reference_t aes_eea2_reference[] = {
+ {
+ .cipher = ODP_CIPHER_ALG_AES_EEA2,
+ .cipher_key_length = AES_EEA2_KEY_LEN,
+ .cipher_key = { 0xD3, 0xC5, 0xD5, 0x92, 0x32, 0x7F, 0xB1, 0x1C,
+ 0x40, 0x35, 0xC6, 0x68, 0x0A, 0xF8, 0xC6, 0xD1},
+ .cipher_iv_length = AES_EEA2_IV_LEN,
+ .cipher_iv = { 0x39, 0x8a, 0x59, 0xb4, 0xac, },
+ .length = 253,
+ .is_length_in_bits = true,
+ .plaintext = { 0x98, 0x1B, 0xA6, 0x82, 0x4C, 0x1B, 0xFB, 0x1A,
+ 0xB4, 0x85, 0x47, 0x20, 0x29, 0xB7, 0x1D, 0x80,
+ 0x8C, 0xE3, 0x3E, 0x2C, 0xC3, 0xC0, 0xB5, 0xFC,
+ 0x1F, 0x3D, 0xE8, 0xA6, 0xDC, 0x66, 0xB1, 0xF0 },
+ .ciphertext = { 0xE9, 0xFE, 0xD8, 0xA6, 0x3D, 0x15, 0x53, 0x04,
+ 0xD7, 0x1D, 0xF2, 0x0B, 0xF3, 0xE8, 0x22, 0x14,
+ 0xB2, 0x0E, 0xD7, 0xDA, 0xD2, 0xF2, 0x33, 0xDC,
+ 0x3C, 0x22, 0xD7, 0xBD, 0xEE, 0xED, 0x8E, 0x78}
+ },
+ {
+ .cipher = ODP_CIPHER_ALG_AES_EEA2,
+ .cipher_key_length = AES_EEA2_KEY_LEN,
+ .cipher_key = { 0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC4, 0x40, 0xE0,
+ 0x95, 0x2C, 0x49, 0x10, 0x48, 0x05, 0xFF, 0x48},
+ .cipher_iv_length = AES_EEA2_IV_LEN,
+ .cipher_iv = { 0xc6, 0x75, 0xa6, 0x4b, 0x64, },
+ .length = 798,
+ .is_length_in_bits = true,
+ .plaintext = { 0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61,
+ 0x47, 0x26, 0x44, 0x6A, 0x6C, 0x38, 0xCE, 0xD1,
+ 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92,
+ 0x92, 0x2B, 0x03, 0x45, 0x0D, 0x3A, 0x99, 0x75,
+ 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20,
+ 0xE9, 0xA1, 0xB2, 0x85, 0xE7, 0x62, 0x79, 0x53,
+ 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE,
+ 0xE6, 0x38, 0xBF, 0x5F, 0xD5, 0xA6, 0x06, 0x19,
+ 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80 },
+ .ciphertext = { 0x59, 0x61, 0x60, 0x53, 0x53, 0xC6, 0x4B, 0xDC,
+ 0xA1, 0x5B, 0x19, 0x5E, 0x28, 0x85, 0x53, 0xA9,
+ 0x10, 0x63, 0x25, 0x06, 0xD6, 0x20, 0x0A, 0xA7,
+ 0x90, 0xC4, 0xC8, 0x06, 0xC9, 0x99, 0x04, 0xCF,
+ 0x24, 0x45, 0xCC, 0x50, 0xBB, 0x1C, 0xF1, 0x68,
+ 0xA4, 0x96, 0x73, 0x73, 0x4E, 0x08, 0x1B, 0x57,
+ 0xE3, 0x24, 0xCE, 0x52, 0x59, 0xC0, 0xE7, 0x8D,
+ 0x4C, 0xD9, 0x7B, 0x87, 0x09, 0x76, 0x50, 0x3C,
+ 0x09, 0x43, 0xF2, 0xCB, 0x5A, 0xE8, 0xF0, 0x52,
+ 0xC7, 0xB7, 0xD3, 0x92, 0x23, 0x95, 0x87, 0xB8,
+ 0x95, 0x60, 0x86, 0xBC, 0xAB, 0x18, 0x83, 0x60,
+ 0x42, 0xE2, 0xE6, 0xCE, 0x42, 0x43, 0x2A, 0x17,
+ 0x10, 0x5C, 0x53, 0xD0 }
+ },
+};
+
+static crypto_test_reference_t aes_eia2_reference[] = {
+ /* 3GPP TS 33.401, C.2.1 */
+ {
+ .auth = ODP_AUTH_ALG_AES_EIA2,
+ .auth_key_length = AES_EIA2_KEY_LEN,
+ .auth_key = { 0x2b, 0xd6, 0x45, 0x9f, 0x82, 0xc5, 0xb3, 0x00,
+ 0x95, 0x2c, 0x49, 0x10, 0x48, 0x81, 0xff, 0x48 },
+ .auth_iv_length = AES_EIA2_IV_LEN,
+ .auth_iv = { 0x38, 0xa6, 0xf0, 0x56, 0xc0 },
+ .length = 58,
+ .is_length_in_bits = true,
+ .plaintext = { 0x33, 0x32, 0x34, 0x62, 0x63, 0x39, 0x38, 0x40 },
+ .ciphertext = { 0x33, 0x32, 0x34, 0x62, 0x63, 0x39, 0x38, 0x40 },
+ .digest_length = AES_EIA2_DIGEST_LEN,
+ .digest = { 0x11, 0x8c, 0x6e, 0xb8 }
+ },
+ /* 3GPP TS 33.401, C.2.2. */
+ {
+ .auth = ODP_AUTH_ALG_AES_EIA2,
+ .auth_key_length = AES_EIA2_KEY_LEN,
+ .auth_key = { 0xD3, 0xC5, 0xD5, 0x92, 0x32, 0x7F, 0xB1, 0x1C,
+ 0x40, 0x35, 0xC6, 0x68, 0x0A, 0xF8, 0xC6, 0xD1 },
+ .auth_iv_length = AES_EIA2_IV_LEN,
+ .auth_iv = { 0x39, 0x8a, 0x59, 0xb4, 0xd4, },
+ .length = 64, /* 8 bytes */
+ .is_length_in_bits = true,
+ .plaintext = { 0x48, 0x45, 0x83, 0xd5, 0xaf, 0xe0, 0x82, 0xae },
+ .ciphertext = { 0x48, 0x45, 0x83, 0xd5, 0xaf, 0xe0, 0x82, 0xae},
+ .digest_length = AES_EIA2_DIGEST_LEN,
+ .digest = { 0xb9, 0x37, 0x87, 0xe6 }
+ },
+ /* 3GPP TS 33.401, C.2.5 */
+ {
+ .auth = ODP_AUTH_ALG_AES_EIA2,
+ .auth_key_length = AES_EIA2_KEY_LEN,
+ .auth_key = { 0x83, 0xfd, 0x23, 0xa2, 0x44, 0xa7, 0x4c, 0xf3,
+ 0x58, 0xda, 0x30, 0x19, 0xf1, 0x72, 0x26, 0x35 },
+ .auth_iv_length = AES_EIA2_IV_LEN,
+ .auth_iv = { 0x36, 0xaf, 0x61, 0x44, 0x7c },
+ .length = 768, /* 96 bytes */
+ .is_length_in_bits = true,
+ .plaintext = { 0x35, 0xc6, 0x87, 0x16, 0x63, 0x3c, 0x66, 0xfb,
+ 0x75, 0x0c, 0x26, 0x68, 0x65, 0xd5, 0x3c, 0x11,
+ 0xea, 0x05, 0xb1, 0xe9, 0xfa, 0x49, 0xc8, 0x39,
+ 0x8d, 0x48, 0xe1, 0xef, 0xa5, 0x90, 0x9d, 0x39,
+ 0x47, 0x90, 0x28, 0x37, 0xf5, 0xae, 0x96, 0xd5,
+ 0xa0, 0x5b, 0xc8, 0xd6, 0x1c, 0xa8, 0xdb, 0xef,
+ 0x1b, 0x13, 0xa4, 0xb4, 0xab, 0xfe, 0x4f, 0xb1,
+ 0x00, 0x60, 0x45, 0xb6, 0x74, 0xbb, 0x54, 0x72,
+ 0x93, 0x04, 0xc3, 0x82, 0xbe, 0x53, 0xa5, 0xaf,
+ 0x05, 0x55, 0x61, 0x76, 0xf6, 0xea, 0xa2, 0xef,
+ 0x1d, 0x05, 0xe4, 0xb0, 0x83, 0x18, 0x1e, 0xe6,
+ 0x74, 0xcd, 0xa5, 0xa4, 0x85, 0xf7, 0x4d, 0x7a },
+ .ciphertext = { 0x35, 0xc6, 0x87, 0x16, 0x63, 0x3c, 0x66, 0xfb,
+ 0x75, 0x0c, 0x26, 0x68, 0x65, 0xd5, 0x3c, 0x11,
+ 0xea, 0x05, 0xb1, 0xe9, 0xfa, 0x49, 0xc8, 0x39,
+ 0x8d, 0x48, 0xe1, 0xef, 0xa5, 0x90, 0x9d, 0x39,
+ 0x47, 0x90, 0x28, 0x37, 0xf5, 0xae, 0x96, 0xd5,
+ 0xa0, 0x5b, 0xc8, 0xd6, 0x1c, 0xa8, 0xdb, 0xef,
+ 0x1b, 0x13, 0xa4, 0xb4, 0xab, 0xfe, 0x4f, 0xb1,
+ 0x00, 0x60, 0x45, 0xb6, 0x74, 0xbb, 0x54, 0x72,
+ 0x93, 0x04, 0xc3, 0x82, 0xbe, 0x53, 0xa5, 0xaf,
+ 0x05, 0x55, 0x61, 0x76, 0xf6, 0xea, 0xa2, 0xef,
+ 0x1d, 0x05, 0xe4, 0xb0, 0x83, 0x18, 0x1e, 0xe6,
+ 0x74, 0xcd, 0xa5, 0xa4, 0x85, 0xf7, 0x4d, 0x7a },
+ .digest_length = AES_EIA2_DIGEST_LEN,
+ .digest = { 0xe6, 0x57, 0xe1, 0x82 }
+ },
+};
+
+/*
+ * ZUC EEA3 and EIA3 test vectors from
+ * Specification of the 3GPP Confidentiality and Integrity
+ * Algorithms 128-EEA3 & 128-EIA3
+ * Document 3: Implementor’s Test Data
+ * Version: 1.1
+ * Date: 4 th Jan. 2011
+ */
+static crypto_test_reference_t zuc_eea3_reference[] = {
+ {
+ .cipher = ODP_CIPHER_ALG_ZUC_EEA3,
+ .cipher_key_length = ZUC_EEA3_KEY_LEN,
+ .cipher_key = { 0xe5, 0xbd, 0x3e, 0xa0, 0xeb, 0x55, 0xad, 0xe8,
+ 0x66, 0xc6, 0xac, 0x58, 0xbd, 0x54, 0x30, 0x2a},
+ .cipher_iv_length = ZUC_EEA3_IV_LEN,
+ .cipher_iv = { 0x00, 0x05, 0x68, 0x23, 0xc4, 0x00, 0x00, 0x00,
+ 0x00, 0x05, 0x68, 0x23, 0xc4, 0x00, 0x00, 0x00 },
+ .length = 800, /* 100 bytes */
+ .is_length_in_bits = true,
+ .plaintext = { 0x14, 0xa8, 0xef, 0x69, 0x3d, 0x67, 0x85, 0x07,
+ 0xbb, 0xe7, 0x27, 0x0a, 0x7f, 0x67, 0xff, 0x50,
+ 0x06, 0xc3, 0x52, 0x5b, 0x98, 0x07, 0xe4, 0x67,
+ 0xc4, 0xe5, 0x60, 0x00, 0xba, 0x33, 0x8f, 0x5d,
+ 0x42, 0x95, 0x59, 0x03, 0x67, 0x51, 0x82, 0x22,
+ 0x46, 0xc8, 0x0d, 0x3b, 0x38, 0xf0, 0x7f, 0x4b,
+ 0xe2, 0xd8, 0xff, 0x58, 0x05, 0xf5, 0x13, 0x22,
+ 0x29, 0xbd, 0xe9, 0x3b, 0xbb, 0xdc, 0xaf, 0x38,
+ 0x2b, 0xf1, 0xee, 0x97, 0x2f, 0xbf, 0x99, 0x77,
+ 0xba, 0xda, 0x89, 0x45, 0x84, 0x7a, 0x2a, 0x6c,
+ 0x9a, 0xd3, 0x4a, 0x66, 0x75, 0x54, 0xe0, 0x4d,
+ 0x1f, 0x7f, 0xa2, 0xc3, 0x32, 0x41, 0xbd, 0x8f,
+ 0x01, 0xba, 0x22, 0x0d },
+ .ciphertext = { 0x13, 0x1d, 0x43, 0xe0, 0xde, 0xa1, 0xbe, 0x5c,
+ 0x5a, 0x1b, 0xfd, 0x97, 0x1d, 0x85, 0x2c, 0xbf,
+ 0x71, 0x2d, 0x7b, 0x4f, 0x57, 0x96, 0x1f, 0xea,
+ 0x32, 0x08, 0xaf, 0xa8, 0xbc, 0xa4, 0x33, 0xf4,
+ 0x56, 0xad, 0x09, 0xc7, 0x41, 0x7e, 0x58, 0xbc,
+ 0x69, 0xcf, 0x88, 0x66, 0xd1, 0x35, 0x3f, 0x74,
+ 0x86, 0x5e, 0x80, 0x78, 0x1d, 0x20, 0x2d, 0xfb,
+ 0x3e, 0xcf, 0xf7, 0xfc, 0xbc, 0x3b, 0x19, 0x0f,
+ 0xe8, 0x2a, 0x20, 0x4e, 0xd0, 0xe3, 0x50, 0xfc,
+ 0x0f, 0x6f, 0x26, 0x13, 0xb2, 0xf2, 0xbc, 0xa6,
+ 0xdf, 0x5a, 0x47, 0x3a, 0x57, 0xa4, 0xa0, 0x0d,
+ 0x98, 0x5e, 0xba, 0xd8, 0x80, 0xd6, 0xf2, 0x38,
+ 0x64, 0xa0, 0x7b, 0x01 }
+ },
+
+ /* Privately generated test data */
+ {
+ .cipher = ODP_CIPHER_ALG_ZUC_EEA3,
+ .cipher_key_length = ZUC_EEA3_256_KEY_LEN,
+ .cipher_key = { 0xf7, 0xb4, 0x04, 0x5a, 0x81, 0x5c, 0x1b, 0x01,
+ 0x82, 0xf9, 0xf4, 0x26, 0x80, 0xd4, 0x56, 0x26,
+ 0xd5, 0xf7, 0x4b, 0x68, 0x48, 0x6b, 0x92, 0x6a,
+ 0x34, 0x1f, 0x86, 0x66, 0x60, 0x0a, 0xfc, 0x57},
+ .cipher_iv_length = ZUC_EEA3_256_IV_LEN,
+ .cipher_iv = { 0x8e, 0x5d, 0xbc, 0x3f, 0xb9, 0xae, 0x66, 0xa3,
+ 0xb9, 0x5c, 0x12, 0x14, 0xdb, 0xc5, 0xbc, 0x18,
+ 0x48, 0x12, 0x09, 0x06, 0x25, 0x33, 0x2e, 0x12,
+ 0x12 },
+ .length = 1024,
+ .is_length_in_bits = true,
+ .plaintext = { 0x36, 0xdb, 0x63, 0x68, 0xb5, 0x1f, 0x4e, 0x92,
+ 0x46, 0x1f, 0xde, 0xdb, 0xc2, 0xec, 0xfa, 0x7e,
+ 0x49, 0x85, 0x77, 0xaa, 0x46, 0x98, 0x30, 0x2d,
+ 0x3b, 0xc4, 0x11, 0x24, 0x98, 0x20, 0xa9, 0xce,
+ 0xfb, 0x0d, 0x36, 0xb0, 0x2c, 0x85, 0x42, 0x72,
+ 0xa4, 0x21, 0x4e, 0x66, 0x0d, 0x48, 0xe4, 0x57,
+ 0xce, 0x5b, 0x01, 0x14, 0xf3, 0x31, 0x42, 0x2e,
+ 0xf5, 0x53, 0x52, 0x8d, 0x73, 0xfc, 0x5c, 0x6e,
+ 0x09, 0x92, 0x1e, 0x35, 0x17, 0x60, 0xa8, 0xbb,
+ 0x81, 0xf6, 0x21, 0x8f, 0x3e, 0x05, 0xe6, 0x0c,
+ 0x60, 0xe7, 0x21, 0x53, 0x18, 0x63, 0x81, 0x0d,
+ 0xb6, 0xd4, 0x9a, 0x29, 0xd0, 0xf6, 0x97, 0xd9,
+ 0x89, 0xb5, 0x0e, 0xa0, 0x15, 0xb6, 0x5c, 0x97,
+ 0xac, 0x7d, 0x26, 0xeb, 0x83, 0x0c, 0xf7, 0xe3,
+ 0xf3, 0x18, 0x37, 0x0b, 0x7b, 0xb8, 0x18, 0x31,
+ 0x8c, 0xb2, 0x5a, 0x5c, 0xa9, 0xf1, 0x35, 0x32 },
+ .ciphertext = { 0xa6, 0xe5, 0x71, 0x58, 0x5c, 0xcf, 0x5d, 0x0d,
+ 0x59, 0xb5, 0x51, 0xab, 0xf5, 0xfa, 0x31, 0xf9,
+ 0x8d, 0x4f, 0xf0, 0x3c, 0x7d, 0x61, 0x8d, 0x7a,
+ 0x6b, 0xcb, 0x2c, 0x79, 0xca, 0x99, 0x06, 0x6f,
+ 0xff, 0x5d, 0x12, 0x5f, 0x0e, 0x7a, 0x33, 0x6b,
+ 0x51, 0xbc, 0x58, 0x53, 0xff, 0xbd, 0x85, 0xc9,
+ 0xac, 0x5f, 0x33, 0xc2, 0xa2, 0xf1, 0x17, 0x7a,
+ 0xd9, 0x3f, 0x81, 0x82, 0x2f, 0x0a, 0xb0, 0xaf,
+ 0xb9, 0x19, 0x3b, 0xfa, 0xcd, 0xa4, 0x06, 0x81,
+ 0x2a, 0x7a, 0xbf, 0x2c, 0x07, 0xde, 0xc1, 0xa4,
+ 0x8c, 0x15, 0x85, 0x81, 0xa6, 0xd3, 0x73, 0x1c,
+ 0x29, 0x0b, 0xee, 0x3c, 0x57, 0xfa, 0x82, 0xad,
+ 0x6f, 0xe0, 0xa1, 0x54, 0x8d, 0xa4, 0x92, 0x29,
+ 0xf4, 0xfa, 0x6d, 0x01, 0xe3, 0x6c, 0xb9, 0x76,
+ 0x80, 0x53, 0xbb, 0x27, 0xb8, 0x18, 0x47, 0x6c,
+ 0xae, 0xb5, 0x44, 0x60, 0x43, 0x9d, 0xa7, 0x3f }
+ },
+ /* Privately generated test data */
+ {
+ .cipher = ODP_CIPHER_ALG_ZUC_EEA3,
+ .cipher_key_length = ZUC_EEA3_256_KEY_LEN,
+ .cipher_key = { 0x1d, 0x0f, 0x0e, 0x75, 0x86, 0xb3, 0xfc, 0x65,
+ 0x94, 0xbf, 0xaa, 0xa8, 0xf5, 0xd0, 0x0f, 0xe8,
+ 0x14, 0x7a, 0x96, 0x61, 0x15, 0x49, 0x79, 0x71,
+ 0x13, 0x82, 0xb4, 0xae, 0x34, 0x04, 0x75, 0x51 },
+ .cipher_iv_length = ZUC_EEA3_256_IV_LEN,
+ .cipher_iv = { 0x98, 0xcc, 0x89, 0x9f, 0xaf, 0x6d, 0x64, 0xb6,
+ 0xb1, 0xe8, 0x21, 0x72, 0xee, 0xb6, 0xcc, 0xe3,
+ 0xcf, 0x32, 0x28, 0x21, 0x21, 0x0d, 0x1e, 0x1c,
+ 0x34 },
+ .length = 1928,
+ .is_length_in_bits = true,
+ .plaintext = { 0xa4, 0xcb, 0x6e, 0x76, 0x99, 0xfb, 0x0c, 0xab,
+ 0x6d, 0x57, 0xb1, 0x69, 0xc0, 0x47, 0x80, 0x63,
+ 0x00, 0xe1, 0xf9, 0x51, 0x10, 0xbe, 0xc0, 0x0f,
+ 0x99, 0x62, 0x2d, 0x71, 0xca, 0x75, 0xa0, 0x6e,
+ 0x41, 0x0e, 0xe4, 0xda, 0x09, 0xf1, 0x86, 0x76,
+ 0x48, 0x37, 0xe0, 0x08, 0x7e, 0x60, 0x6c, 0x7f,
+ 0x41, 0x65, 0xd0, 0x51, 0x24, 0x91, 0x61, 0xbd,
+ 0xf3, 0x8e, 0x2e, 0xbd, 0x04, 0xce, 0x2b, 0x45,
+ 0xdc, 0x0f, 0x1f, 0xe5, 0x00, 0xa5, 0x5c, 0x48,
+ 0xdd, 0x3c, 0x51, 0x5b, 0x9c, 0xbd, 0xda, 0xde,
+ 0x22, 0xab, 0x2f, 0x46, 0x3c, 0x90, 0x03, 0x2f,
+ 0x1f, 0x31, 0xec, 0x23, 0xff, 0x17, 0x68, 0xdb,
+ 0x26, 0x87, 0xc1, 0x27, 0x2d, 0x1d, 0x6f, 0x0a,
+ 0x59, 0xc0, 0x65, 0xf5, 0x7d, 0x40, 0xd3, 0xa0,
+ 0xeb, 0x03, 0xe6, 0x27, 0x93, 0xea, 0x56, 0xb2,
+ 0x1b, 0x42, 0xd5, 0x1b, 0x59, 0x3d, 0xf6, 0x7f,
+ 0xc5, 0xb7, 0xa6, 0xf2, 0xd4, 0x16, 0xfc, 0x2d,
+ 0xd6, 0x61, 0x23, 0x54, 0xa1, 0xf6, 0xf4, 0x8c,
+ 0xf9, 0xda, 0xb3, 0x8d, 0xc4, 0x09, 0x3f, 0xe0,
+ 0x4b, 0x15, 0xfb, 0xa4, 0x52, 0xf1, 0x24, 0x17,
+ 0xa9, 0xca, 0x09, 0x7d, 0xe0, 0x05, 0xab, 0xb7,
+ 0x67, 0xce, 0x0b, 0x08, 0xc4, 0xff, 0x95, 0xbe,
+ 0xd9, 0x48, 0x4b, 0x9e, 0x52, 0x8a, 0x7e, 0x9d,
+ 0x9f, 0x79, 0x42, 0xf2, 0x6a, 0x66, 0x09, 0x13,
+ 0x30, 0x13, 0x91, 0x11, 0x18, 0x3c, 0xc8, 0x7f,
+ 0x0a, 0xd3, 0x88, 0xce, 0xd2, 0x1d, 0x8c, 0xab,
+ 0x65, 0xd7, 0x49, 0xb7, 0x62, 0xc7, 0x55, 0x01,
+ 0x40, 0x97, 0xf3, 0xab, 0xfd, 0xfd, 0xbe, 0x2d,
+ 0x10, 0x4f, 0x3e, 0x28, 0x8b, 0x06, 0xa8, 0x95,
+ 0xd9, 0x30, 0x64, 0xab, 0x4d, 0xf0, 0x57, 0xb2,
+ 0xc8 },
+ .ciphertext = { 0xd0, 0xf9, 0xff, 0xce, 0x03, 0x81, 0x14, 0x9c,
+ 0xd5, 0xf2, 0xbf, 0xe5, 0xff, 0xc8, 0x15, 0x4a,
+ 0x9c, 0x06, 0x2b, 0x17, 0x99, 0xe3, 0x48, 0x70,
+ 0x37, 0x01, 0x5e, 0x24, 0x80, 0x9a, 0x46, 0x4e,
+ 0xa8, 0xc0, 0x59, 0xd7, 0x03, 0x74, 0x28, 0x91,
+ 0x79, 0xb4, 0xb5, 0xd6, 0x52, 0x92, 0x04, 0x77,
+ 0x5b, 0x4f, 0x34, 0xd1, 0xbe, 0xaa, 0x74, 0xd9,
+ 0x01, 0x40, 0x24, 0xc7, 0x8c, 0x62, 0x2a, 0x51,
+ 0x5a, 0x58, 0x0e, 0xc8, 0x70, 0x12, 0x06, 0x1c,
+ 0x62, 0x7f, 0xf5, 0x23, 0xcb, 0x3c, 0xc1, 0xbe,
+ 0x8b, 0x7f, 0x9d, 0x12, 0xb8, 0x26, 0xc8, 0xa3,
+ 0x77, 0x7e, 0x83, 0xda, 0x83, 0xe1, 0x9f, 0xef,
+ 0x33, 0x62, 0x17, 0xa7, 0x74, 0x68, 0x34, 0x5e,
+ 0x16, 0xcc, 0xbc, 0x6c, 0x33, 0x2f, 0x73, 0xf0,
+ 0xfc, 0xe5, 0x2c, 0x2d, 0xfb, 0x81, 0xbe, 0x1e,
+ 0x6e, 0x4f, 0xf4, 0x14, 0x37, 0x7c, 0x97, 0xac,
+ 0xa9, 0xac, 0x68, 0x95, 0xf3, 0x55, 0xb3, 0xfb,
+ 0xf6, 0x64, 0xd9, 0x1b, 0xe1, 0x54, 0x79, 0x6e,
+ 0xfa, 0x21, 0xa4, 0x19, 0x9f, 0xb4, 0x4b, 0xb7,
+ 0xef, 0x52, 0xd8, 0x44, 0x75, 0x99, 0x07, 0x6d,
+ 0xa9, 0xcf, 0x32, 0xc5, 0xc1, 0x31, 0x0c, 0xa8,
+ 0x86, 0x40, 0x75, 0xeb, 0x12, 0xcf, 0x26, 0x5c,
+ 0x5f, 0xa3, 0x3c, 0xb6, 0x12, 0x45, 0xf3, 0x0a,
+ 0x38, 0x09, 0xa8, 0x36, 0x32, 0x4a, 0x2f, 0xad,
+ 0x50, 0x11, 0x38, 0xba, 0x8f, 0xdd, 0xd1, 0x58,
+ 0xd7, 0x3d, 0x3a, 0x40, 0x7c, 0x3f, 0xa7, 0x98,
+ 0xf3, 0x12, 0x7f, 0x9f, 0x89, 0xcf, 0x48, 0x58,
+ 0x01, 0xeb, 0x98, 0x7c, 0x59, 0x11, 0x9f, 0x57,
+ 0x74, 0x5f, 0x70, 0x72, 0x74, 0xa4, 0x82, 0x3c,
+ 0x36, 0xe6, 0x31, 0x9e, 0xba, 0x7b, 0x53, 0xfc,
+ 0x56 }
+ },
+};
+
+static crypto_test_reference_t zuc_eia3_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_ZUC_EIA3,
+ .auth_key_length = ZUC_EIA3_KEY_LEN,
+ .auth_key = { 0xc9, 0xe6, 0xce, 0xc4, 0x60, 0x7c, 0x72, 0xdb,
+ 0x00, 0x0a, 0xef, 0xa8, 0x83, 0x85, 0xab, 0x0a },
+ .auth_iv_length = ZUC_EIA3_IV_LEN,
+ .auth_iv = { 0xa9, 0x40, 0x59, 0xda, 0x50, 0x00, 0x00, 0x00,
+ 0x29, 0x40, 0x59, 0xda, 0x50, 0x00, 0x80, 0x00 },
+ .length = 584, /* 73 bytes */
+ .is_length_in_bits = true,
+ .plaintext = { 0x98, 0x3b, 0x41, 0xd4, 0x7d, 0x78, 0x0c, 0x9e,
+ 0x1a, 0xd1, 0x1d, 0x7e, 0xb7, 0x03, 0x91, 0xb1,
+ 0xde, 0x0b, 0x35, 0xda, 0x2d, 0xc6, 0x2f, 0x83,
+ 0xe7, 0xb7, 0x8d, 0x63, 0x06, 0xca, 0x0e, 0xa0,
+ 0x7e, 0x94, 0x1b, 0x7b, 0xe9, 0x13, 0x48, 0xf9,
+ 0xfc, 0xb1, 0x70, 0xe2, 0x21, 0x7f, 0xec, 0xd9,
+ 0x7f, 0x9f, 0x68, 0xad, 0xb1, 0x6e, 0x5d, 0x7d,
+ 0x21, 0xe5, 0x69, 0xd2, 0x80, 0xed, 0x77, 0x5c,
+ 0xeb, 0xde, 0x3f, 0x40, 0x93, 0xc5, 0x38, 0x81,
+ 0x00, 0x00, 0x00, 0x00 },
+ .ciphertext = { 0x98, 0x3b, 0x41, 0xd4, 0x7d, 0x78, 0x0c, 0x9e,
+ 0x1a, 0xd1, 0x1d, 0x7e, 0xb7, 0x03, 0x91, 0xb1,
+ 0xde, 0x0b, 0x35, 0xda, 0x2d, 0xc6, 0x2f, 0x83,
+ 0xe7, 0xb7, 0x8d, 0x63, 0x06, 0xca, 0x0e, 0xa0,
+ 0x7e, 0x94, 0x1b, 0x7b, 0xe9, 0x13, 0x48, 0xf9,
+ 0xfc, 0xb1, 0x70, 0xe2, 0x21, 0x7f, 0xec, 0xd9,
+ 0x7f, 0x9f, 0x68, 0xad, 0xb1, 0x6e, 0x5d, 0x7d,
+ 0x21, 0xe5, 0x69, 0xd2, 0x80, 0xed, 0x77, 0x5c,
+ 0xeb, 0xde, 0x3f, 0x40, 0x93, 0xc5, 0x38, 0x81,
+ 0x00, 0x00, 0x00, 0x00 },
+ .digest_length = ZUC_EIA3_DIGEST_LEN,
+ .digest = { 0x24, 0xa8, 0x42, 0xb3 }
+ },
+ /* Privately generated test data */
+ {
+ .auth = ODP_AUTH_ALG_ZUC_EIA3,
+ .auth_key_length = ZUC_EIA3_256_KEY_LEN,
+ .auth_key = { 0xe3, 0x8e, 0xaf, 0x08, 0xde, 0x8c, 0x08, 0x41,
+ 0x7f, 0x2b, 0x97, 0x20, 0x10, 0x87, 0xc7, 0xf7,
+ 0xbe, 0x3c, 0xd2, 0x68, 0x80, 0x10, 0x1e, 0x71,
+ 0xfd, 0xb2, 0xbb, 0xad, 0x25, 0x0f, 0x06, 0x08 },
+ .auth_iv_length = ZUC_EIA3_256_IV_LEN,
+ .auth_iv = { 0xf5, 0x8d, 0x08, 0x26, 0x94, 0x14, 0xc7, 0x4d,
+ 0xf5, 0x7c, 0x9c, 0xaa, 0x45, 0x53, 0xfd, 0x85,
+ 0x23, 0x0b, 0x00, 0x0e, 0x26, 0x2b, 0x0f, 0x01,
+ 0x26 },
+ .length = 360,
+ .is_length_in_bits = true,
+ .plaintext = { 0x08, 0xba, 0x8d, 0xf1, 0xf8, 0x62, 0xa6, 0xaf,
+ 0xf9, 0x03, 0x88, 0x9c, 0xa3, 0x68, 0x6b, 0x87,
+ 0xb6, 0x92, 0xd1, 0x47, 0x3e, 0x54, 0xaf, 0x46,
+ 0x07, 0x8f, 0x89, 0xea, 0x26, 0x9d, 0x0e, 0x2f,
+ 0x57, 0x9b, 0x20, 0x4f, 0xfe, 0xc7, 0xfe, 0xf7,
+ 0xca, 0x86, 0x93, 0x6d, 0xee },
+ .ciphertext = { 0x08, 0xba, 0x8d, 0xf1, 0xf8, 0x62, 0xa6, 0xaf,
+ 0xf9, 0x03, 0x88, 0x9c, 0xa3, 0x68, 0x6b, 0x87,
+ 0xb6, 0x92, 0xd1, 0x47, 0x3e, 0x54, 0xaf, 0x46,
+ 0x07, 0x8f, 0x89, 0xea, 0x26, 0x9d, 0x0e, 0x2f,
+ 0x57, 0x9b, 0x20, 0x4f, 0xfe, 0xc7, 0xfe, 0xf7,
+ 0xca, 0x86, 0x93, 0x6d, 0xee },
+ .digest_length = ZUC_EIA3_DIGEST_LEN,
+ .digest = {0x58, 0x19, 0xab, 0xa5}
+ },
+ /* Privately generated test data */
+ {
+ .auth = ODP_AUTH_ALG_ZUC_EIA3,
+ .auth_key_length = ZUC_EIA3_256_KEY_LEN,
+ .auth_key = { 0x6a, 0x7e, 0x4c, 0x7e, 0x51, 0x25, 0xb3, 0x48,
+ 0x84, 0x53, 0x3a, 0x94, 0xfb, 0x31, 0x99, 0x90,
+ 0x32, 0x57, 0x44, 0xee, 0x9b, 0xbc, 0xe9, 0xe5,
+ 0x25, 0xcf, 0x08, 0xf5, 0xe9, 0xe2, 0x5e, 0x53 },
+ .auth_iv_length = ZUC_EIA3_256_IV_LEN,
+ .auth_iv = { 0x60, 0xaa, 0xd2, 0xb2, 0xd0, 0x85, 0xfa, 0x54,
+ 0xd8, 0x35, 0xe8, 0xd4, 0x66, 0x82, 0x64, 0x98,
+ 0xd9, 0x2a, 0x08, 0x1d, 0x35, 0x19, 0x17, 0x01,
+ 0x1A },
+ .length = 2872,
+ .is_length_in_bits = true,
+ .plaintext = { 0xc6, 0x69, 0x73, 0x51, 0xff, 0x4a, 0xec, 0x29,
+ 0xcd, 0xba, 0xab, 0xf2, 0xfb, 0xe3, 0x46, 0x7c,
+ 0xc2, 0x54, 0xf8, 0x1b, 0xe8, 0xe7, 0x8d, 0x76,
+ 0x5a, 0x2e, 0x63, 0x33, 0x9f, 0xc9, 0x9a, 0x66,
+ 0x32, 0x0d, 0xb7, 0x31, 0x58, 0xa3, 0x5a, 0x25,
+ 0x5d, 0x05, 0x17, 0x58, 0xe9, 0x5e, 0xd4, 0xab,
+ 0xb2, 0xcd, 0xc6, 0x9b, 0xb4, 0x54, 0x11, 0x0e,
+ 0x82, 0x74, 0x41, 0x21, 0x3d, 0xdc, 0x87, 0x70,
+ 0xe9, 0x3e, 0xa1, 0x41, 0xe1, 0xfc, 0x67, 0x3e,
+ 0x01, 0x7e, 0x97, 0xea, 0xdc, 0x6b, 0x96, 0x8f,
+ 0x38, 0x5c, 0x2a, 0xec, 0xb0, 0x3b, 0xfb, 0x32,
+ 0xaf, 0x3c, 0x54, 0xec, 0x18, 0xdb, 0x5c, 0x02,
+ 0x1a, 0xfe, 0x43, 0xfb, 0xfa, 0xaa, 0x3a, 0xfb,
+ 0x29, 0xd1, 0xe6, 0x05, 0x3c, 0x7c, 0x94, 0x75,
+ 0xd8, 0xbe, 0x61, 0x89, 0xf9, 0x5c, 0xbb, 0xa8,
+ 0x99, 0x0f, 0x95, 0xb1, 0xeb, 0xf1, 0xb3, 0x05,
+ 0xef, 0xf7, 0x00, 0xe9, 0xa1, 0x3a, 0xe5, 0xca,
+ 0x0b, 0xcb, 0xd0, 0x48, 0x47, 0x64, 0xbd, 0x1f,
+ 0x23, 0x1e, 0xa8, 0x1c, 0x7b, 0x64, 0xc5, 0x14,
+ 0x73, 0x5a, 0xc5, 0x5e, 0x4b, 0x79, 0x63, 0x3b,
+ 0x70, 0x64, 0x24, 0x11, 0x9e, 0x09, 0xdc, 0xaa,
+ 0xd4, 0xac, 0xf2, 0x1b, 0x10, 0xaf, 0x3b, 0x33,
+ 0xcd, 0xe3, 0x50, 0x48, 0x47, 0x15, 0x5c, 0xbb,
+ 0x6f, 0x22, 0x19, 0xba, 0x9b, 0x7d, 0xf5, 0x0b,
+ 0xe1, 0x1a, 0x1c, 0x7f, 0x23, 0xf8, 0x29, 0xf8,
+ 0xa4, 0x1b, 0x13, 0xb5, 0xca, 0x4e, 0xe8, 0x98,
+ 0x32, 0x38, 0xe0, 0x79, 0x4d, 0x3d, 0x34, 0xbc,
+ 0x5f, 0x4e, 0x77, 0xfa, 0xcb, 0x6c, 0x05, 0xac,
+ 0x86, 0x21, 0x2b, 0xaa, 0x1a, 0x55, 0xa2, 0xbe,
+ 0x70, 0xb5, 0x73, 0x3b, 0x04, 0x5c, 0xd3, 0x36,
+ 0x94, 0xb3, 0xaf, 0xe2, 0xf0, 0xe4, 0x9e, 0x4f,
+ 0x32, 0x15, 0x49, 0xfd, 0x82, 0x4e, 0xa9, 0x08,
+ 0x70, 0xd4, 0xb2, 0x8a, 0x29, 0x54, 0x48, 0x9a,
+ 0x0a, 0xbc, 0xd5, 0x0e, 0x18, 0xa8, 0x44, 0xac,
+ 0x5b, 0xf3, 0x8e, 0x4c, 0xd7, 0x2d, 0x9b, 0x09,
+ 0x42, 0xe5, 0x06, 0xc4, 0x33, 0xaf, 0xcd, 0xa3,
+ 0x84, 0x7f, 0x2d, 0xad, 0xd4, 0x76, 0x47, 0xde,
+ 0x32, 0x1c, 0xec, 0x4a, 0xc4, 0x30, 0xf6, 0x20,
+ 0x23, 0x85, 0x6c, 0xfb, 0xb2, 0x07, 0x04, 0xf4,
+ 0xec, 0x0b, 0xb9, 0x20, 0xba, 0x86, 0xc3, 0x3e,
+ 0x05, 0xf1, 0xec, 0xd9, 0x67, 0x33, 0xb7, 0x99,
+ 0x50, 0xa3, 0xe3, 0x14, 0xd3, 0xd9, 0x34, 0xf7,
+ 0x5e, 0xa0, 0xf2, 0x10, 0xa8, 0xf6, 0x05, 0x94,
+ 0x01, 0xbe, 0xb4, 0xbc, 0x44, 0x78, 0xfa, 0x49,
+ 0x69, 0xe6, 0x23, 0xd0, 0x1a, 0xda, 0x69 },
+ .ciphertext = { 0xc6, 0x69, 0x73, 0x51, 0xff, 0x4a, 0xec, 0x29,
+ 0xcd, 0xba, 0xab, 0xf2, 0xfb, 0xe3, 0x46, 0x7c,
+ 0xc2, 0x54, 0xf8, 0x1b, 0xe8, 0xe7, 0x8d, 0x76,
+ 0x5a, 0x2e, 0x63, 0x33, 0x9f, 0xc9, 0x9a, 0x66,
+ 0x32, 0x0d, 0xb7, 0x31, 0x58, 0xa3, 0x5a, 0x25,
+ 0x5d, 0x05, 0x17, 0x58, 0xe9, 0x5e, 0xd4, 0xab,
+ 0xb2, 0xcd, 0xc6, 0x9b, 0xb4, 0x54, 0x11, 0x0e,
+ 0x82, 0x74, 0x41, 0x21, 0x3d, 0xdc, 0x87, 0x70,
+ 0xe9, 0x3e, 0xa1, 0x41, 0xe1, 0xfc, 0x67, 0x3e,
+ 0x01, 0x7e, 0x97, 0xea, 0xdc, 0x6b, 0x96, 0x8f,
+ 0x38, 0x5c, 0x2a, 0xec, 0xb0, 0x3b, 0xfb, 0x32,
+ 0xaf, 0x3c, 0x54, 0xec, 0x18, 0xdb, 0x5c, 0x02,
+ 0x1a, 0xfe, 0x43, 0xfb, 0xfa, 0xaa, 0x3a, 0xfb,
+ 0x29, 0xd1, 0xe6, 0x05, 0x3c, 0x7c, 0x94, 0x75,
+ 0xd8, 0xbe, 0x61, 0x89, 0xf9, 0x5c, 0xbb, 0xa8,
+ 0x99, 0x0f, 0x95, 0xb1, 0xeb, 0xf1, 0xb3, 0x05,
+ 0xef, 0xf7, 0x00, 0xe9, 0xa1, 0x3a, 0xe5, 0xca,
+ 0x0b, 0xcb, 0xd0, 0x48, 0x47, 0x64, 0xbd, 0x1f,
+ 0x23, 0x1e, 0xa8, 0x1c, 0x7b, 0x64, 0xc5, 0x14,
+ 0x73, 0x5a, 0xc5, 0x5e, 0x4b, 0x79, 0x63, 0x3b,
+ 0x70, 0x64, 0x24, 0x11, 0x9e, 0x09, 0xdc, 0xaa,
+ 0xd4, 0xac, 0xf2, 0x1b, 0x10, 0xaf, 0x3b, 0x33,
+ 0xcd, 0xe3, 0x50, 0x48, 0x47, 0x15, 0x5c, 0xbb,
+ 0x6f, 0x22, 0x19, 0xba, 0x9b, 0x7d, 0xf5, 0x0b,
+ 0xe1, 0x1a, 0x1c, 0x7f, 0x23, 0xf8, 0x29, 0xf8,
+ 0xa4, 0x1b, 0x13, 0xb5, 0xca, 0x4e, 0xe8, 0x98,
+ 0x32, 0x38, 0xe0, 0x79, 0x4d, 0x3d, 0x34, 0xbc,
+ 0x5f, 0x4e, 0x77, 0xfa, 0xcb, 0x6c, 0x05, 0xac,
+ 0x86, 0x21, 0x2b, 0xaa, 0x1a, 0x55, 0xa2, 0xbe,
+ 0x70, 0xb5, 0x73, 0x3b, 0x04, 0x5c, 0xd3, 0x36,
+ 0x94, 0xb3, 0xaf, 0xe2, 0xf0, 0xe4, 0x9e, 0x4f,
+ 0x32, 0x15, 0x49, 0xfd, 0x82, 0x4e, 0xa9, 0x08,
+ 0x70, 0xd4, 0xb2, 0x8a, 0x29, 0x54, 0x48, 0x9a,
+ 0x0a, 0xbc, 0xd5, 0x0e, 0x18, 0xa8, 0x44, 0xac,
+ 0x5b, 0xf3, 0x8e, 0x4c, 0xd7, 0x2d, 0x9b, 0x09,
+ 0x42, 0xe5, 0x06, 0xc4, 0x33, 0xaf, 0xcd, 0xa3,
+ 0x84, 0x7f, 0x2d, 0xad, 0xd4, 0x76, 0x47, 0xde,
+ 0x32, 0x1c, 0xec, 0x4a, 0xc4, 0x30, 0xf6, 0x20,
+ 0x23, 0x85, 0x6c, 0xfb, 0xb2, 0x07, 0x04, 0xf4,
+ 0xec, 0x0b, 0xb9, 0x20, 0xba, 0x86, 0xc3, 0x3e,
+ 0x05, 0xf1, 0xec, 0xd9, 0x67, 0x33, 0xb7, 0x99,
+ 0x50, 0xa3, 0xe3, 0x14, 0xd3, 0xd9, 0x34, 0xf7,
+ 0x5e, 0xa0, 0xf2, 0x10, 0xa8, 0xf6, 0x05, 0x94,
+ 0x01, 0xbe, 0xb4, 0xbc, 0x44, 0x78, 0xfa, 0x49,
+ 0x69, 0xe6, 0x23, 0xd0, 0x1a, 0xda, 0x69 },
+ .digest_length = ZUC_EIA3_DIGEST_LEN,
+ .digest = {0xd1, 0x1e, 0x33, 0xf6}
+ },
+};
+
+/*
+ * MD5 test vectors from RFC 1321
+ */
+static crypto_test_reference_t md5_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_MD5,
+ .length = 3,
+ .plaintext = { 0x61, 0x62, 0x63 },
+ .ciphertext = { 0x61, 0x62, 0x63 },
+ .digest_length = MD5_DIGEST_LEN,
+ .digest = { 0x90, 0x01, 0x50, 0x98, 0x3c, 0xd2, 0x4f, 0xb0,
+ 0xd6, 0x96, 0x3f, 0x7d, 0x28, 0xe1, 0x7f, 0x72}
+ },
+ {
+ .auth = ODP_AUTH_ALG_MD5,
+ .length = 62,
+ .plaintext = { 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50,
+ 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ 0x59, 0x5a, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
+ 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e,
+ 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
+ 0x77, 0x78, 0x79, 0x7a, 0x30, 0x31, 0x32, 0x33,
+ 0x34, 0x35, 0x36, 0x37, 0x38, 0x39 },
+ .ciphertext = { 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50,
+ 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ 0x59, 0x5a, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
+ 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e,
+ 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
+ 0x77, 0x78, 0x79, 0x7a, 0x30, 0x31, 0x32, 0x33,
+ 0x34, 0x35, 0x36, 0x37, 0x38, 0x39 },
+ .digest_length = MD5_DIGEST_LEN,
+ .digest = { 0xd1, 0x74, 0xab, 0x98, 0xd2, 0x77, 0xd9, 0xf5,
+ 0xa5, 0x61, 0x1c, 0x2c, 0x9f, 0x41, 0x9d, 0x9f},
+ }
+};
+
+/*
+ * SHA test vectors from Crypto++
+ */
+static crypto_test_reference_t sha1_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_SHA1,
+ .length = 3,
+ .plaintext = { 0x61, 0x62, 0x63 },
+ .ciphertext = { 0x61, 0x62, 0x63 },
+ .digest_length = SHA1_DIGEST_LEN,
+ .digest = { 0xA9, 0x99, 0x3E, 0x36, 0x47, 0x06, 0x81, 0x6A,
+ 0xBA, 0x3E, 0x25, 0x71, 0x78, 0x50, 0xC2, 0x6C,
+ 0x9C, 0xD0, 0xD8, 0x9D},
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA1,
+ .length = 56,
+ .plaintext = { 0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
+ 0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
+ 0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
+ 0x67, 0x68, 0x69, 0x6a, 0x68, 0x69, 0x6a, 0x6b,
+ 0x69, 0x6a, 0x6b, 0x6c, 0x6a, 0x6b, 0x6c, 0x6d,
+ 0x6b, 0x6c, 0x6d, 0x6e, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x6e, 0x6f, 0x70, 0x71},
+ .ciphertext = { 0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
+ 0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
+ 0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
+ 0x67, 0x68, 0x69, 0x6a, 0x68, 0x69, 0x6a, 0x6b,
+ 0x69, 0x6a, 0x6b, 0x6c, 0x6a, 0x6b, 0x6c, 0x6d,
+ 0x6b, 0x6c, 0x6d, 0x6e, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x6e, 0x6f, 0x70, 0x71},
+ .digest_length = SHA1_DIGEST_LEN,
+ .digest = { 0x84, 0x98, 0x3E, 0x44, 0x1C, 0x3B, 0xD2, 0x6E,
+ 0xBA, 0xAE, 0x4A, 0xA1, 0xF9, 0x51, 0x29, 0xE5,
+ 0xE5, 0x46, 0x70, 0xF1},
+ }
+};
+
+static crypto_test_reference_t sha224_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_SHA224,
+ .length = 3,
+ .plaintext = { 0x61, 0x62, 0x63 },
+ .ciphertext = { 0x61, 0x62, 0x63 },
+ .digest_length = SHA224_DIGEST_LEN,
+ .digest = { 0x23, 0x09, 0x7d, 0x22, 0x34, 0x05, 0xd8, 0x22,
+ 0x86, 0x42, 0xa4, 0x77, 0xbd, 0xa2, 0x55, 0xb3,
+ 0x2a, 0xad, 0xbc, 0xe4, 0xbd, 0xa0, 0xb3, 0xf7,
+ 0xe3, 0x6c, 0x9d, 0xa7 },
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA224,
+ .length = 56,
+ .plaintext = { 0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
+ 0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
+ 0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
+ 0x67, 0x68, 0x69, 0x6a, 0x68, 0x69, 0x6a, 0x6b,
+ 0x69, 0x6a, 0x6b, 0x6c, 0x6a, 0x6b, 0x6c, 0x6d,
+ 0x6b, 0x6c, 0x6d, 0x6e, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x6e, 0x6f, 0x70, 0x71},
+ .ciphertext = { 0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
+ 0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
+ 0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
+ 0x67, 0x68, 0x69, 0x6a, 0x68, 0x69, 0x6a, 0x6b,
+ 0x69, 0x6a, 0x6b, 0x6c, 0x6a, 0x6b, 0x6c, 0x6d,
+ 0x6b, 0x6c, 0x6d, 0x6e, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x6e, 0x6f, 0x70, 0x71},
+ .digest_length = SHA224_DIGEST_LEN,
+ .digest = { 0x75, 0x38, 0x8b, 0x16, 0x51, 0x27, 0x76, 0xcc,
+ 0x5d, 0xba, 0x5d, 0xa1, 0xfd, 0x89, 0x01, 0x50,
+ 0xb0, 0xc6, 0x45, 0x5c, 0xb4, 0xf5, 0x8b, 0x19,
+ 0x52, 0x52, 0x25, 0x25},
+ }
+};
+
+static crypto_test_reference_t sha256_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_SHA256,
+ .length = 3,
+ .plaintext = { 0x61, 0x62, 0x63 },
+ .ciphertext = { 0x61, 0x62, 0x63 },
+ .digest_length = SHA256_DIGEST_LEN,
+ .digest = { 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea,
+ 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23,
+ 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
+ 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad},
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA256,
+ .length = 56,
+ .plaintext = { 0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
+ 0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
+ 0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
+ 0x67, 0x68, 0x69, 0x6a, 0x68, 0x69, 0x6a, 0x6b,
+ 0x69, 0x6a, 0x6b, 0x6c, 0x6a, 0x6b, 0x6c, 0x6d,
+ 0x6b, 0x6c, 0x6d, 0x6e, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x6e, 0x6f, 0x70, 0x71},
+ .ciphertext = { 0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
+ 0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
+ 0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
+ 0x67, 0x68, 0x69, 0x6a, 0x68, 0x69, 0x6a, 0x6b,
+ 0x69, 0x6a, 0x6b, 0x6c, 0x6a, 0x6b, 0x6c, 0x6d,
+ 0x6b, 0x6c, 0x6d, 0x6e, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x6e, 0x6f, 0x70, 0x71},
+ .digest_length = SHA256_DIGEST_LEN,
+ .digest = { 0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8,
+ 0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39,
+ 0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67,
+ 0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1},
+ }
+};
+
+static crypto_test_reference_t sha384_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_SHA384,
+ .length = 3,
+ .plaintext = { 0x61, 0x62, 0x63 },
+ .ciphertext = { 0x61, 0x62, 0x63 },
+ .digest_length = SHA384_DIGEST_LEN,
+ .digest = { 0xcb, 0x00, 0x75, 0x3f, 0x45, 0xa3, 0x5e, 0x8b,
+ 0xb5, 0xa0, 0x3d, 0x69, 0x9a, 0xc6, 0x50, 0x07,
+ 0x27, 0x2c, 0x32, 0xab, 0x0e, 0xde, 0xd1, 0x63,
+ 0x1a, 0x8b, 0x60, 0x5a, 0x43, 0xff, 0x5b, 0xed,
+ 0x80, 0x86, 0x07, 0x2b, 0xa1, 0xe7, 0xcc, 0x23,
+ 0x58, 0xba, 0xec, 0xa1, 0x34, 0xc8, 0x25, 0xa7},
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA384,
+ .length = 112,
+ .plaintext = { 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a,
+ 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
+ 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c,
+ 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d,
+ 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
+ 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71,
+ 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72,
+ 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74,
+ 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75},
+ .ciphertext = { 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a,
+ 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
+ 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c,
+ 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d,
+ 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
+ 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71,
+ 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72,
+ 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74,
+ 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75},
+ .digest_length = SHA384_DIGEST_LEN,
+ .digest = { 0x09, 0x33, 0x0c, 0x33, 0xf7, 0x11, 0x47, 0xe8,
+ 0x3d, 0x19, 0x2f, 0xc7, 0x82, 0xcd, 0x1b, 0x47,
+ 0x53, 0x11, 0x1b, 0x17, 0x3b, 0x3b, 0x05, 0xd2,
+ 0x2f, 0xa0, 0x80, 0x86, 0xe3, 0xb0, 0xf7, 0x12,
+ 0xfc, 0xc7, 0xc7, 0x1a, 0x55, 0x7e, 0x2d, 0xb9,
+ 0x66, 0xc3, 0xe9, 0xfa, 0x91, 0x74, 0x60, 0x39},
+ }
+};
+
+static crypto_test_reference_t sha512_reference[] = {
+ {
+ .auth = ODP_AUTH_ALG_SHA512,
+ .length = 3,
+ .plaintext = { 0x61, 0x62, 0x63 },
+ .ciphertext = { 0x61, 0x62, 0x63 },
+ .digest_length = SHA512_DIGEST_LEN,
+ .digest = { 0xdd, 0xaf, 0x35, 0xa1, 0x93, 0x61, 0x7a, 0xba,
+ 0xcc, 0x41, 0x73, 0x49, 0xae, 0x20, 0x41, 0x31,
+ 0x12, 0xe6, 0xfa, 0x4e, 0x89, 0xa9, 0x7e, 0xa2,
+ 0x0a, 0x9e, 0xee, 0xe6, 0x4b, 0x55, 0xd3, 0x9a,
+ 0x21, 0x92, 0x99, 0x2a, 0x27, 0x4f, 0xc1, 0xa8,
+ 0x36, 0xba, 0x3c, 0x23, 0xa3, 0xfe, 0xeb, 0xbd,
+ 0x45, 0x4d, 0x44, 0x23, 0x64, 0x3c, 0xe8, 0x0e,
+ 0x2a, 0x9a, 0xc9, 0x4f, 0xa5, 0x4c, 0xa4, 0x9f},
+ },
+ {
+ .auth = ODP_AUTH_ALG_SHA512,
+ .length = 112,
+ .plaintext = { 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a,
+ 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
+ 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c,
+ 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d,
+ 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
+ 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71,
+ 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72,
+ 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74,
+ 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75},
+ .ciphertext = { 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a,
+ 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
+ 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c,
+ 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d,
+ 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
+ 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71,
+ 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72,
+ 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74,
+ 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75},
+ .digest_length = SHA512_DIGEST_LEN,
+ .digest = { 0x8e, 0x95, 0x9b, 0x75, 0xda, 0xe3, 0x13, 0xda,
+ 0x8c, 0xf4, 0xf7, 0x28, 0x14, 0xfc, 0x14, 0x3f,
+ 0x8f, 0x77, 0x79, 0xc6, 0xeb, 0x9f, 0x7f, 0xa1,
+ 0x72, 0x99, 0xae, 0xad, 0xb6, 0x88, 0x90, 0x18,
+ 0x50, 0x1d, 0x28, 0x9e, 0x49, 0x00, 0xf7, 0xe4,
+ 0x33, 0x1b, 0x99, 0xde, 0xc4, 0xb5, 0x43, 0x3a,
+ 0xc7, 0xd3, 0x29, 0xee, 0xb6, 0xdd, 0x26, 0x54,
+ 0x5e, 0x96, 0xe5, 0x5b, 0x87, 0x4b, 0xe9, 0x09},
+ }
+};
+
+#endif
diff --git a/test/validation/api/crypto/test_vectors.h b/test/validation/api/crypto/test_vectors.h
new file mode 100644
index 000000000..a38644246
--- /dev/null
+++ b/test/validation/api/crypto/test_vectors.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEST_VECTORS_H
+#define TEST_VECTORS_H
+
+#include <odp_api.h>
+#include "test_vectors_len.h"
+
+typedef struct crypto_test_reference_s {
+ uint8_t copy_previous_vector; /* does not copy digest_length */
+ odp_cipher_alg_t cipher;
+ odp_auth_alg_t auth;
+ uint32_t cipher_key_length;
+ uint8_t cipher_key[MAX_KEY_LEN];
+ uint32_t auth_key_length;
+ uint8_t auth_key[MAX_KEY_LEN];
+ uint32_t cipher_iv_length;
+ uint8_t cipher_iv[MAX_IV_LEN];
+ uint32_t auth_iv_length;
+ uint8_t auth_iv[MAX_IV_LEN];
+ uint32_t length;
+ odp_bool_t is_length_in_bits;
+ uint8_t plaintext[MAX_DATA_LEN];
+ uint8_t ciphertext[MAX_DATA_LEN];
+ uint32_t aad_length;
+ uint8_t aad[MAX_AAD_LEN];
+ uint32_t digest_length;
+ uint8_t digest[MAX_DIGEST_LEN];
+} crypto_test_reference_t;
+
+ODP_STATIC_ASSERT(ODP_CIPHER_ALG_NULL == 0, "null cipher is not the default");
+ODP_STATIC_ASSERT(ODP_AUTH_ALG_NULL == 0, "null auth is not the default");
+
+/*
+ * Return test data length in bytes, rounding up to full bytes.
+ */
+static inline uint32_t ref_length_in_bytes(const crypto_test_reference_t *ref)
+{
+ return ref->is_length_in_bits ? (ref->length + 7) / 8 : ref->length;
+}
+
+/*
+ * Return test data length in bits
+ */
+static inline uint32_t ref_length_in_bits(const crypto_test_reference_t *ref)
+{
+ return ref->is_length_in_bits ? ref->length : 8 * ref->length;
+}
+
+static inline void init_reference(crypto_test_reference_t *ref, int size)
+{
+ int n;
+ crypto_test_reference_t *prev = NULL;
+
+ for (n = 0; n < size; n++) {
+ if (prev && ref[n].copy_previous_vector) {
+ uint32_t len;
+
+ len = ref[n].digest_length;
+ ref[n] = *prev;
+ ref[n].digest_length = len;
+ }
+ prev = &ref[n];
+ }
+}
+
+#endif
diff --git a/test/validation/api/crypto/test_vectors_len.h b/test/validation/api/crypto/test_vectors_len.h
new file mode 100644
index 000000000..3818b57a0
--- /dev/null
+++ b/test/validation/api/crypto/test_vectors_len.h
@@ -0,0 +1,150 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef TEST_VECTORS_LEN_
+#define TEST_VECTORS_LEN_
+
+/* Maximum */
+#define MAX_KEY_LEN 64
+#define MAX_IV_LEN 32
+#define MAX_DATA_LEN 1000
+#define MAX_AAD_LEN 12
+#define MAX_DIGEST_LEN 64
+
+/* TDES-CBC */
+#define TDES_CBC_KEY_LEN 24
+#define TDES_CBC_IV_LEN 8
+
+/* TDES-ECB */
+#define TDES_ECB_KEY_LEN 24
+
+/* AES common */
+
+#define AES128_KEY_LEN 16
+
+#define AES192_KEY_LEN 24
+
+#define AES256_KEY_LEN 32
+
+/* AES-CBC */
+#define AES_CBC_IV_LEN 16
+
+/* AES-CTR */
+#define AES_CTR_IV_LEN 16
+
+/* AES-CFB128 */
+#define AES_CFB128_IV_LEN 16
+
+/* AES-XTS */
+#define AES128_XTS_KEY_LEN 32
+#define AES256_XTS_KEY_LEN 64
+#define AES_XTS_IV_LEN 16
+
+/* AES-GCM */
+#define AES_GCM_IV_LEN 12
+#define AES_GCM_DIGEST_LEN 16
+
+/* HMAC-MD5 */
+#define HMAC_MD5_KEY_LEN 16
+#define HMAC_MD5_96_CHECK_LEN 12
+#define HMAC_MD5_CHECK_LEN 16
+
+/* HMAC-SHA1 */
+#define HMAC_SHA1_KEY_LEN 20
+#define HMAC_SHA1_96_CHECK_LEN 12
+#define HMAC_SHA1_CHECK_LEN 20
+
+/* HMAC-SHA224 */
+#define HMAC_SHA224_KEY_LEN 28
+#define HMAC_SHA224_CHECK_LEN 28
+
+/* HMAC-SHA256 */
+#define HMAC_SHA256_KEY_LEN 32
+#define HMAC_SHA256_128_CHECK_LEN 16
+#define HMAC_SHA256_CHECK_LEN 32
+
+/* HMAC-SHA384 */
+#define HMAC_SHA384_KEY_LEN 48
+#define HMAC_SHA384_192_CHECK_LEN 24
+#define HMAC_SHA384_CHECK_LEN 48
+
+/* HMAC-SHA512 */
+#define HMAC_SHA512_KEY_LEN 64
+#define HMAC_SHA512_256_CHECK_LEN 32
+#define HMAC_SHA512_CHECK_LEN 64
+
+/* ChaCha20-Poly1305 */
+#define CHACHA20_POLY1305_KEY_LEN 32
+#define CHACHA20_POLY1305_IV_LEN 12
+#define CHACHA20_POLY1305_CHECK_LEN 16
+
+/* AES-XCBC-MAC */
+#define AES_XCBC_MAC_KEY_LEN 16
+#define AES_XCBC_MAC_96_CHECK_LEN 12
+#define AES_XCBC_MAC_CHECK_LEN 16
+
+/* KASUMI_F8 */
+#define KASUMI_F8_KEY_LEN 16
+#define KASUMI_F8_IV_LEN 8
+
+/* SNOW3G_UEA2 */
+#define SNOW3G_UEA2_KEY_LEN 16
+#define SNOW3G_UEA2_IV_LEN 16
+
+/* AES_EEA2 */
+#define AES_EEA2_KEY_LEN 16
+#define AES_EEA2_IV_LEN 16
+
+/* ZUC_EEA3 */
+#define ZUC_EEA3_KEY_LEN 16
+#define ZUC_EEA3_IV_LEN 16
+
+/* ZUC_EEA3_256 */
+#define ZUC_EEA3_256_KEY_LEN 32
+#define ZUC_EEA3_256_IV_LEN 25
+
+/* KASUMI_F9 */
+#define KASUMI_F9_KEY_LEN 16
+#define KASUMI_F9_IV_LEN 9
+#define KASUMI_F9_DIGEST_LEN 4
+
+/* SNOW3G_UIA2 */
+#define SNOW3G_UIA2_KEY_LEN 16
+#define SNOW3G_UIA2_IV_LEN 16
+#define SNOW3G_UIA2_DIGEST_LEN 4
+
+/* AES_EIA2 */
+#define AES_EIA2_KEY_LEN 16
+#define AES_EIA2_IV_LEN 8
+#define AES_EIA2_DIGEST_LEN 4
+
+/* ZUC_EIA3 */
+#define ZUC_EIA3_KEY_LEN 16
+#define ZUC_EIA3_IV_LEN 16
+#define ZUC_EIA3_DIGEST_LEN 4
+
+/* ZUC_EIA3_256 */
+#define ZUC_EIA3_256_KEY_LEN 32
+#define ZUC_EIA3_256_IV_LEN 25
+
+/* MD5 */
+#define MD5_DIGEST_LEN 16
+
+/* SHA1 */
+#define SHA1_DIGEST_LEN 20
+
+/* SHA224 */
+#define SHA224_DIGEST_LEN 28
+
+/* SHA256 */
+#define SHA256_DIGEST_LEN 32
+
+/* SHA384 */
+#define SHA384_DIGEST_LEN 48
+
+/* SHA512 */
+#define SHA512_DIGEST_LEN 64
+
+#endif
diff --git a/test/validation/api/crypto/util.c b/test/validation/api/crypto/util.c
new file mode 100644
index 000000000..557e5e951
--- /dev/null
+++ b/test/validation/api/crypto/util.c
@@ -0,0 +1,310 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include <odp/helper/odph_api.h>
+#include "util.h"
+
+struct suite_context_s suite_context;
+
+const char *auth_alg_name(odp_auth_alg_t auth)
+{
+ switch (auth) {
+ case ODP_AUTH_ALG_NULL:
+ return "ODP_AUTH_ALG_NULL";
+ case ODP_AUTH_ALG_MD5_HMAC:
+ return "ODP_AUTH_ALG_MD5_HMAC";
+ case ODP_AUTH_ALG_SHA1_HMAC:
+ return "ODP_AUTH_ALG_SHA1_HMAC";
+ case ODP_AUTH_ALG_SHA224_HMAC:
+ return "ODP_AUTH_ALG_SHA224_HMAC";
+ case ODP_AUTH_ALG_SHA256_HMAC:
+ return "ODP_AUTH_ALG_SHA256_HMAC";
+ case ODP_AUTH_ALG_SHA384_HMAC:
+ return "ODP_AUTH_ALG_SHA384_HMAC";
+ case ODP_AUTH_ALG_SHA512_HMAC:
+ return "ODP_AUTH_ALG_SHA512_HMAC";
+ case ODP_AUTH_ALG_AES_XCBC_MAC:
+ return "ODP_AUTH_ALG_AES_XCBC_MAC";
+ case ODP_AUTH_ALG_AES_GCM:
+ return "ODP_AUTH_ALG_AES_GCM";
+ case ODP_AUTH_ALG_AES_GMAC:
+ return "ODP_AUTH_ALG_AES_GMAC";
+ case ODP_AUTH_ALG_AES_CCM:
+ return "ODP_AUTH_ALG_AES_CCM";
+ case ODP_AUTH_ALG_AES_CMAC:
+ return "ODP_AUTH_ALG_AES_CMAC";
+ case ODP_AUTH_ALG_CHACHA20_POLY1305:
+ return "ODP_AUTH_ALG_CHACHA20_POLY1305";
+ case ODP_AUTH_ALG_KASUMI_F9:
+ return "ODP_AUTH_ALG_KASUMI_F9";
+ case ODP_AUTH_ALG_SNOW3G_UIA2:
+ return "ODP_AUTH_ALG_SNOW3G_UIA2";
+ case ODP_AUTH_ALG_AES_EIA2:
+ return "ODP_AUTH_ALG_AES_EIA2";
+ case ODP_AUTH_ALG_ZUC_EIA3:
+ return "ODP_AUTH_ALG_ZUC_EIA3";
+ case ODP_AUTH_ALG_MD5:
+ return "ODP_AUTH_ALG_MD5";
+ case ODP_AUTH_ALG_SHA1:
+ return "ODP_AUTH_ALG_SHA1";
+ case ODP_AUTH_ALG_SHA224:
+ return "ODP_AUTH_ALG_SHA224";
+ case ODP_AUTH_ALG_SHA256:
+ return "ODP_AUTH_ALG_SHA256";
+ case ODP_AUTH_ALG_SHA384:
+ return "ODP_AUTH_ALG_SHA384";
+ case ODP_AUTH_ALG_SHA512:
+ return "ODP_AUTH_ALG_SHA512";
+ default:
+ return "Unknown";
+ }
+}
+
+const char *cipher_alg_name(odp_cipher_alg_t cipher)
+{
+ switch (cipher) {
+ case ODP_CIPHER_ALG_NULL:
+ return "ODP_CIPHER_ALG_NULL";
+ case ODP_CIPHER_ALG_DES:
+ return "ODP_CIPHER_ALG_DES";
+ case ODP_CIPHER_ALG_3DES_CBC:
+ return "ODP_CIPHER_ALG_3DES_CBC";
+ case ODP_CIPHER_ALG_3DES_ECB:
+ return "ODP_CIPHER_ALG_3DES_ECB";
+ case ODP_CIPHER_ALG_AES_CBC:
+ return "ODP_CIPHER_ALG_AES_CBC";
+ case ODP_CIPHER_ALG_AES_CTR:
+ return "ODP_CIPHER_ALG_AES_CTR";
+ case ODP_CIPHER_ALG_AES_ECB:
+ return "ODP_CIPHER_ALG_AES_ECB";
+ case ODP_CIPHER_ALG_AES_CFB128:
+ return "ODP_CIPHER_ALG_AES_CFB128";
+ case ODP_CIPHER_ALG_AES_XTS:
+ return "ODP_CIPHER_ALG_AES_XTS";
+ case ODP_CIPHER_ALG_AES_GCM:
+ return "ODP_CIPHER_ALG_AES_GCM";
+ case ODP_CIPHER_ALG_AES_CCM:
+ return "ODP_CIPHER_ALG_AES_CCM";
+ case ODP_CIPHER_ALG_CHACHA20_POLY1305:
+ return "ODP_CIPHER_ALG_CHACHA20_POLY1305";
+ case ODP_CIPHER_ALG_KASUMI_F8:
+ return "ODP_CIPHER_ALG_KASUMI_F8";
+ case ODP_CIPHER_ALG_SNOW3G_UEA2:
+ return "ODP_CIPHER_ALG_SNOW3G_UEA2";
+ case ODP_CIPHER_ALG_AES_EEA2:
+ return "ODP_CIPHER_ALG_AES_EEA2";
+ case ODP_CIPHER_ALG_ZUC_EEA3:
+ return "ODP_CIPHER_ALG_ZUC_EEA3";
+ default:
+ return "Unknown";
+ }
+}
+
+int check_alg_support(odp_cipher_alg_t cipher, odp_auth_alg_t auth)
+{
+ odp_crypto_capability_t capability;
+
+ memset(&capability, 0, sizeof(odp_crypto_capability_t));
+ if (odp_crypto_capability(&capability)) {
+ ODPH_ERR("odp_crypto_capability() failed\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ if (suite_context.queue != ODP_QUEUE_INVALID) {
+ if (suite_context.q_type == ODP_QUEUE_TYPE_PLAIN &&
+ capability.queue_type_plain == 0)
+ return ODP_TEST_INACTIVE;
+ if (suite_context.q_type == ODP_QUEUE_TYPE_SCHED &&
+ capability.queue_type_sched == 0)
+ return ODP_TEST_INACTIVE;
+ }
+
+ if (suite_context.op_mode == ODP_CRYPTO_SYNC &&
+ capability.sync_mode == ODP_SUPPORT_NO)
+ return ODP_TEST_INACTIVE;
+ if (suite_context.op_mode == ODP_CRYPTO_ASYNC &&
+ capability.async_mode == ODP_SUPPORT_NO)
+ return ODP_TEST_INACTIVE;
+
+ /* Cipher algorithms */
+ switch (cipher) {
+ case ODP_CIPHER_ALG_NULL:
+ if (!capability.ciphers.bit.null)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_DES:
+ if (!capability.ciphers.bit.des)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_3DES_CBC:
+ if (!capability.ciphers.bit.trides_cbc)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_3DES_ECB:
+ if (!capability.ciphers.bit.trides_ecb)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_AES_CBC:
+ if (!capability.ciphers.bit.aes_cbc)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_AES_CTR:
+ if (!capability.ciphers.bit.aes_ctr)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_AES_ECB:
+ if (!capability.ciphers.bit.aes_ecb)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_AES_CFB128:
+ if (!capability.ciphers.bit.aes_cfb128)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_AES_XTS:
+ if (!capability.ciphers.bit.aes_xts)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_AES_GCM:
+ if (!capability.ciphers.bit.aes_gcm)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_AES_CCM:
+ if (!capability.ciphers.bit.aes_ccm)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_CHACHA20_POLY1305:
+ if (!capability.ciphers.bit.chacha20_poly1305)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_KASUMI_F8:
+ if (!capability.ciphers.bit.kasumi_f8)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_SNOW3G_UEA2:
+ if (!capability.ciphers.bit.snow3g_uea2)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_AES_EEA2:
+ if (!capability.ciphers.bit.aes_eea2)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_ZUC_EEA3:
+ if (!capability.ciphers.bit.zuc_eea3)
+ return ODP_TEST_INACTIVE;
+ break;
+ default:
+ ODPH_ERR("Unsupported cipher algorithm\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ /* Authentication algorithms */
+ switch (auth) {
+ case ODP_AUTH_ALG_NULL:
+ if (!capability.auths.bit.null)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_MD5_HMAC:
+ if (!capability.auths.bit.md5_hmac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA1_HMAC:
+ if (!capability.auths.bit.sha1_hmac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA224_HMAC:
+ if (!capability.auths.bit.sha224_hmac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA256_HMAC:
+ if (!capability.auths.bit.sha256_hmac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA384_HMAC:
+ if (!capability.auths.bit.sha384_hmac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA512_HMAC:
+ if (!capability.auths.bit.sha512_hmac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_AES_XCBC_MAC:
+ if (!capability.auths.bit.aes_xcbc_mac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_AES_GCM:
+ if (!capability.auths.bit.aes_gcm)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_AES_GMAC:
+ if (!capability.auths.bit.aes_gmac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_AES_CCM:
+ if (!capability.auths.bit.aes_ccm)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_AES_CMAC:
+ if (!capability.auths.bit.aes_cmac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_CHACHA20_POLY1305:
+ if (!capability.auths.bit.chacha20_poly1305)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_KASUMI_F9:
+ if (!capability.auths.bit.kasumi_f9)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SNOW3G_UIA2:
+ if (!capability.auths.bit.snow3g_uia2)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_AES_EIA2:
+ if (!capability.auths.bit.aes_eia2)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_ZUC_EIA3:
+ if (!capability.auths.bit.zuc_eia3)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_MD5:
+ if (!capability.auths.bit.md5)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA1:
+ if (!capability.auths.bit.sha1)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA224:
+ if (!capability.auths.bit.sha224)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA256:
+ if (!capability.auths.bit.sha256)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA384:
+ if (!capability.auths.bit.sha384)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA512:
+ if (!capability.auths.bit.sha512)
+ return ODP_TEST_INACTIVE;
+ break;
+ default:
+ ODPH_ERR("Unsupported authentication algorithm\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
diff --git a/test/validation/api/crypto/util.h b/test/validation/api/crypto/util.h
new file mode 100644
index 000000000..5cba21890
--- /dev/null
+++ b/test/validation/api/crypto/util.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef UTIL_H
+#define UTIL_H
+
+#include <stdint.h>
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+
+struct suite_context_s {
+ odp_crypto_op_mode_t op_mode;
+ odp_pool_t pool;
+ odp_queue_t queue;
+ odp_queue_type_t q_type;
+ odp_event_t (*compl_queue_deq)(void);
+ int partial_test;
+};
+
+extern struct suite_context_s suite_context;
+
+const char *auth_alg_name(odp_auth_alg_t auth);
+
+const char *cipher_alg_name(odp_cipher_alg_t cipher);
+
+/*
+ * Check if given cipher and authentication algorithms are supported
+ *
+ * cipher Cipher algorithm
+ * auth Authentication algorithm
+ *
+ * returns ODP_TEST_ACTIVE when both algorithms are supported or
+ * ODP_TEST_INACTIVE when either algorithm is not supported
+ */
+int check_alg_support(odp_cipher_alg_t cipher, odp_auth_alg_t auth);
+
+static inline void fill_with_pattern(uint8_t *buf, uint32_t len)
+{
+ static uint8_t value;
+
+ for (uint32_t n = 0; n < len; n++)
+ buf[n] = value++;
+}
+
+#endif
diff --git a/test/validation/api/dma/.gitignore b/test/validation/api/dma/.gitignore
new file mode 100644
index 000000000..cc14794d5
--- /dev/null
+++ b/test/validation/api/dma/.gitignore
@@ -0,0 +1 @@
+dma_main
diff --git a/test/validation/api/dma/Makefile.am b/test/validation/api/dma/Makefile.am
new file mode 100644
index 000000000..795825c6b
--- /dev/null
+++ b/test/validation/api/dma/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = dma_main
+dma_main_SOURCES = dma.c
diff --git a/test/validation/api/dma/dma.c b/test/validation/api/dma/dma.c
new file mode 100644
index 000000000..efc7fa039
--- /dev/null
+++ b/test/validation/api/dma/dma.c
@@ -0,0 +1,1705 @@
+/* Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include "odp_cunit_common.h"
+
+#define COMPL_POOL_NAME "DMA compl pool"
+
+#define MIN_SEG_LEN 1024
+#define SHM_ALIGN ODP_CACHE_LINE_SIZE
+#define RETRIES 5
+#define TIMEOUT 5
+#define OFFSET 10
+#define TRAILER 10
+#define MULTI 1
+#define RESULT 1
+#define USER_DATA 0xdeadbeef
+#define ELEM_NUM 10u
+#define UAREA 0xaa
+
+typedef struct global_t {
+ odp_dma_capability_t dma_capa;
+ odp_shm_t shm;
+ int disabled;
+ uint8_t *src_addr;
+ uint8_t *dst_addr;
+ uint32_t data_size;
+ uint32_t len;
+ odp_pool_t pkt_pool;
+ uint32_t pkt_len;
+ odp_queue_t queue;
+ odp_pool_t compl_pool;
+ uint32_t event_count;
+ uint32_t cache_size;
+
+} global_t;
+
+typedef struct {
+ uint32_t count;
+ uint8_t mark[ELEM_NUM];
+} uarea_init_t;
+
+static global_t global;
+
+static int dma_suite_init(void)
+{
+ odp_shm_t shm;
+ odp_pool_param_t pool_param;
+ odp_dma_pool_param_t dma_pool_param;
+ odp_pool_capability_t pool_capa;
+ odp_queue_param_t queue_param;
+ uint32_t shm_len, pkt_len;
+ void *addr;
+
+ memset(&global, 0, sizeof(global_t));
+ global.shm = ODP_SHM_INVALID;
+ global.pkt_pool = ODP_POOL_INVALID;
+ global.queue = ODP_QUEUE_INVALID;
+ global.compl_pool = ODP_POOL_INVALID;
+
+ if (odp_dma_capability(&global.dma_capa)) {
+ ODPH_ERR("DMA capability failed\n");
+ return -1;
+ }
+
+ if (global.dma_capa.max_sessions == 0) {
+ global.disabled = 1;
+ ODPH_DBG("DMA test disabled\n");
+ return 0;
+ }
+
+ shm_len = MIN_SEG_LEN * global.dma_capa.max_segs * global.dma_capa.max_transfers;
+ shm = odp_shm_reserve("DMA test", shm_len, SHM_ALIGN, 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("SHM reserve failed\n");
+ return -1;
+ }
+
+ addr = odp_shm_addr(shm);
+
+ if (addr == NULL) {
+ ODPH_ERR("SHM addr failed\n");
+ return -1;
+ }
+
+ global.shm = shm;
+ global.data_size = shm_len / 2;
+ global.src_addr = addr;
+ global.dst_addr = (uint8_t *)global.src_addr + global.data_size;
+ global.len = global.data_size - OFFSET - TRAILER;
+
+ if (odp_pool_capability(&pool_capa)) {
+ ODPH_ERR("Pool capa failed\n");
+ return -1;
+ }
+
+ pkt_len = pool_capa.pkt.max_len;
+ if (pkt_len == 0)
+ pkt_len = 4000;
+
+ pkt_len = ODPH_MIN(pkt_len, global.dma_capa.max_seg_len);
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_PACKET;
+ pool_param.pkt.num = global.dma_capa.max_src_segs + global.dma_capa.max_dst_segs;
+ pool_param.pkt.len = pkt_len;
+ pool_param.pkt.max_len = pkt_len;
+
+ global.pkt_len = pkt_len;
+ global.pkt_pool = odp_pool_create("DMA test pkt pool", &pool_param);
+
+ if (global.pkt_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Packet pool create failed\n");
+ return -1;
+ }
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ queue_param.sched.prio = odp_schedule_default_prio();
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ global.queue = odp_queue_create("DMA test queue", &queue_param);
+
+ if (global.queue == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Queue create failed\n");
+ return -1;
+ }
+
+ if (global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_EVENT) {
+ if (global.dma_capa.pool.max_num < global.dma_capa.max_transfers) {
+ ODPH_ERR("Too small DMA compl pool %u\n", global.dma_capa.pool.max_num);
+ return -1;
+ }
+
+ odp_dma_pool_param_init(&dma_pool_param);
+ dma_pool_param.num = global.dma_capa.max_transfers;
+ global.cache_size = dma_pool_param.cache_size;
+
+ global.compl_pool = odp_dma_pool_create(COMPL_POOL_NAME, &dma_pool_param);
+
+ if (global.compl_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Completion pool create failed\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int dma_suite_term(void)
+{
+ if (global.compl_pool != ODP_POOL_INVALID &&
+ odp_pool_destroy(global.compl_pool)) {
+ ODPH_ERR("Completion pool destroy failed\n");
+ return -1;
+ }
+
+ if (global.queue != ODP_QUEUE_INVALID &&
+ odp_queue_destroy(global.queue)) {
+ ODPH_ERR("Queue destroy failed\n");
+ return -1;
+ }
+
+ if (global.pkt_pool != ODP_POOL_INVALID &&
+ odp_pool_destroy(global.pkt_pool)) {
+ ODPH_ERR("Packet pool destroy failed\n");
+ return -1;
+ }
+
+ if (global.shm != ODP_SHM_INVALID &&
+ odp_shm_free(global.shm)) {
+ ODPH_ERR("SHM free failed\n");
+ return -1;
+ }
+
+ return odp_cunit_print_inactive();
+}
+
+static void test_dma_capability(void)
+{
+ odp_dma_capability_t capa;
+
+ memset(&capa, 0, sizeof(odp_dma_capability_t));
+ CU_ASSERT_FATAL(odp_dma_capability(&capa) == 0);
+
+ if (capa.max_sessions == 0)
+ return;
+
+ CU_ASSERT(capa.max_transfers > 0);
+ CU_ASSERT(capa.max_src_segs > 0);
+ CU_ASSERT(capa.max_dst_segs > 0);
+ CU_ASSERT(capa.max_segs > 1);
+ CU_ASSERT(capa.max_segs > capa.max_src_segs);
+ CU_ASSERT(capa.max_segs > capa.max_dst_segs);
+ CU_ASSERT(capa.max_seg_len > 0);
+ CU_ASSERT(capa.compl_mode_mask & ODP_DMA_COMPL_SYNC);
+
+ if (capa.compl_mode_mask & ODP_DMA_COMPL_EVENT) {
+ odp_pool_capability_t pool_capa;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&pool_capa) == 0);
+
+ CU_ASSERT(capa.queue_type_sched || capa.queue_type_plain);
+ CU_ASSERT(capa.pool.max_pools > 0 && capa.pool.max_pools <= pool_capa.max_pools);
+ CU_ASSERT(capa.pool.max_num > 0);
+ CU_ASSERT(capa.pool.min_cache_size <= capa.pool.max_cache_size);
+ }
+}
+
+static void test_dma_param(uint8_t fill)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_compl_param_t compl_param;
+ odp_dma_pool_param_t dma_pool_param;
+
+ memset(&dma_param, fill, sizeof(dma_param));
+ odp_dma_param_init(&dma_param);
+ CU_ASSERT(dma_param.direction == ODP_DMA_MAIN_TO_MAIN);
+ CU_ASSERT(dma_param.type == ODP_DMA_TYPE_COPY);
+ CU_ASSERT(dma_param.mt_mode == ODP_DMA_MT_SAFE);
+ CU_ASSERT(dma_param.order == ODP_DMA_ORDER_NONE);
+
+ memset(&trs_param, fill, sizeof(trs_param));
+ odp_dma_transfer_param_init(&trs_param);
+ CU_ASSERT(trs_param.src_format == ODP_DMA_FORMAT_ADDR);
+ CU_ASSERT(trs_param.dst_format == ODP_DMA_FORMAT_ADDR);
+ CU_ASSERT(trs_param.num_src == 1);
+ CU_ASSERT(trs_param.num_dst == 1);
+
+ memset(&compl_param, fill, sizeof(compl_param));
+ odp_dma_compl_param_init(&compl_param);
+ CU_ASSERT(compl_param.user_ptr == NULL);
+
+ memset(&dma_pool_param, fill, sizeof(dma_pool_param));
+ odp_dma_pool_param_init(&dma_pool_param);
+ CU_ASSERT(dma_pool_param.uarea_init.init_fn == NULL);
+ CU_ASSERT(dma_pool_param.uarea_init.args == NULL);
+ CU_ASSERT(dma_pool_param.uarea_size == 0);
+ CU_ASSERT(dma_pool_param.cache_size <= global.dma_capa.pool.max_cache_size);
+ CU_ASSERT(dma_pool_param.cache_size >= global.dma_capa.pool.min_cache_size);
+}
+
+static void test_dma_param_init(void)
+{
+ test_dma_param(0);
+ test_dma_param(0xff);
+}
+
+static void test_dma_debug(void)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_t dma, dma2;
+ uint64_t u64;
+ const char *name = "dma_debug";
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = ODP_DMA_COMPL_SYNC;
+ dma = odp_dma_create(name, &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ dma2 = odp_dma_lookup(name);
+ CU_ASSERT(dma2 != ODP_DMA_INVALID);
+ CU_ASSERT(dma2 == dma);
+
+ u64 = odp_dma_to_u64(dma);
+ CU_ASSERT(u64 != odp_dma_to_u64(ODP_DMA_INVALID));
+ printf("\n DMA handle: 0x%" PRIx64 "\n", u64);
+
+ odp_dma_print(dma);
+
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_same_name_null(void)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_t dma_a, dma_b;
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = ODP_DMA_COMPL_SYNC;
+ dma_a = odp_dma_create(NULL, &dma_param);
+
+ CU_ASSERT_FATAL(dma_a != ODP_DMA_INVALID);
+
+ dma_b = odp_dma_create(NULL, &dma_param);
+
+ CU_ASSERT_FATAL(dma_b != ODP_DMA_INVALID);
+ CU_ASSERT(odp_dma_to_u64(dma_a) != odp_dma_to_u64(dma_b));
+ CU_ASSERT(odp_dma_destroy(dma_a) == 0);
+ CU_ASSERT(odp_dma_destroy(dma_b) == 0);
+}
+
+static void test_dma_same_name_named(void)
+{
+ odp_dma_param_t dma_param;
+ const char *name = "DMA session";
+ odp_dma_t dma, dma_a, dma_b;
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = ODP_DMA_COMPL_SYNC;
+ dma_a = odp_dma_create(name, &dma_param);
+
+ CU_ASSERT_FATAL(dma_a != ODP_DMA_INVALID);
+
+ dma = odp_dma_lookup(name);
+
+ CU_ASSERT(odp_dma_to_u64(dma) == odp_dma_to_u64(dma_a));
+
+ dma_b = odp_dma_create(name, &dma_param);
+
+ CU_ASSERT_FATAL(dma_b != ODP_DMA_INVALID);
+
+ dma = odp_dma_lookup(name);
+
+ CU_ASSERT(odp_dma_to_u64(dma) == odp_dma_to_u64(dma_a) ||
+ odp_dma_to_u64(dma) == odp_dma_to_u64(dma_b));
+ CU_ASSERT(odp_dma_to_u64(dma_a) != odp_dma_to_u64(dma_b));
+ CU_ASSERT(odp_dma_destroy(dma_a) == 0);
+ CU_ASSERT(odp_dma_destroy(dma_b) == 0);
+}
+
+static void test_dma_compl_pool(void)
+{
+ odp_pool_t pool;
+ odp_pool_info_t pool_info;
+ odp_dma_compl_t compl[global.dma_capa.max_transfers];
+ odp_event_t ev;
+ uint64_t u64;
+ int ret;
+ uint32_t i, j;
+ const char *name = COMPL_POOL_NAME;
+
+ CU_ASSERT_FATAL(global.compl_pool != ODP_POOL_INVALID);
+
+ pool = odp_pool_lookup(name);
+ CU_ASSERT(pool == global.compl_pool);
+
+ memset(&pool_info, 0x55, sizeof(odp_pool_info_t));
+ ret = odp_pool_info(global.compl_pool, &pool_info);
+ CU_ASSERT(ret == 0);
+ CU_ASSERT(strcmp(pool_info.name, name) == 0);
+ CU_ASSERT(pool_info.pool_ext == 0);
+ CU_ASSERT(pool_info.type == ODP_POOL_DMA_COMPL);
+ CU_ASSERT(pool_info.dma_pool_param.num == global.dma_capa.max_transfers);
+ CU_ASSERT(pool_info.dma_pool_param.uarea_size == 0);
+ CU_ASSERT(pool_info.dma_pool_param.cache_size == global.cache_size);
+
+ for (i = 0; i < global.dma_capa.max_transfers; i++) {
+ compl[i] = odp_dma_compl_alloc(global.compl_pool);
+
+ u64 = odp_dma_compl_to_u64(compl[i]);
+ CU_ASSERT(u64 != odp_dma_compl_to_u64(ODP_DMA_COMPL_INVALID));
+
+ if (compl[i] == ODP_DMA_COMPL_INVALID)
+ break;
+
+ /* No source pool for DMA completion events */
+ ev = odp_dma_compl_to_event(compl[i]);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_pool(ev) == ODP_POOL_INVALID);
+
+ printf("\n DMA compl handle: 0x%" PRIx64 "\n", u64);
+ odp_dma_compl_print(compl[i]);
+ }
+
+ for (j = 0; j < i; j++)
+ odp_dma_compl_free(compl[j]);
+}
+
+static void test_dma_compl_pool_same_name(void)
+{
+ odp_dma_pool_param_t dma_pool_param;
+ odp_pool_t pool, pool_a, pool_b;
+ const char *name = COMPL_POOL_NAME;
+
+ pool_a = global.compl_pool;
+
+ pool = odp_pool_lookup(name);
+ CU_ASSERT(pool == pool_a);
+
+ odp_dma_pool_param_init(&dma_pool_param);
+ dma_pool_param.num = global.dma_capa.max_transfers;
+
+ /* Second pool with the same name */
+ pool_b = odp_dma_pool_create(name, &dma_pool_param);
+ CU_ASSERT_FATAL(pool_b != ODP_POOL_INVALID);
+
+ pool = odp_pool_lookup(name);
+ CU_ASSERT(pool == pool_a || pool == pool_b);
+
+ CU_ASSERT_FATAL(odp_pool_destroy(pool_b) == 0);
+}
+
+static void test_dma_compl_pool_max_pools(void)
+{
+ odp_dma_pool_param_t dma_pool_param;
+ /* Max pools minus the ones already created in global init */
+ uint32_t num = global.dma_capa.pool.max_pools - 2, i, j;
+ odp_pool_t pools[num];
+ int ret;
+
+ odp_dma_pool_param_init(&dma_pool_param);
+ dma_pool_param.num = global.dma_capa.max_transfers;
+
+ for (i = 0; i < num; i++) {
+ pools[i] = odp_dma_pool_create(NULL, &dma_pool_param);
+ CU_ASSERT(pools[i] != ODP_POOL_INVALID);
+
+ if (pools[i] == ODP_POOL_INVALID) {
+ ODPH_ERR("DMA completion pool create failed: %u / %u\n", i, num);
+ break;
+ }
+ }
+
+ for (j = 0; j < i; j++) {
+ ret = odp_pool_destroy(pools[j]);
+ CU_ASSERT(ret == 0);
+
+ if (ret == -1)
+ ODPH_ERR("DMA completion pool destroy failed: %u / %u\n", j, i);
+ }
+}
+
+static void test_dma_compl_user_area(void)
+{
+ odp_dma_pool_param_t dma_pool_param;
+ uint32_t num = ODPH_MIN(ELEM_NUM, global.dma_capa.pool.max_num),
+ size = global.dma_capa.pool.max_uarea_size, i;
+ odp_pool_t pool;
+ odp_dma_compl_t compl_evs[num];
+ void *addr, *prev = NULL;
+
+ odp_dma_pool_param_init(&dma_pool_param);
+ dma_pool_param.num = num;
+ dma_pool_param.uarea_size = size;
+ pool = odp_dma_pool_create(NULL, &dma_pool_param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < num; i++) {
+ odp_event_t ev;
+ int flag = 0;
+
+ compl_evs[i] = odp_dma_compl_alloc(pool);
+
+ if (compl_evs[i] == ODP_DMA_COMPL_INVALID)
+ break;
+
+ addr = odp_dma_compl_user_area(compl_evs[i]);
+
+ CU_ASSERT_FATAL(addr != NULL);
+ CU_ASSERT(prev != addr);
+
+ ev = odp_dma_compl_to_event(compl_evs[i]);
+ CU_ASSERT(odp_event_user_area(ev) == addr);
+ CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == addr);
+ CU_ASSERT(flag < 0);
+
+ prev = addr;
+ memset(addr, 0, size);
+ }
+
+ CU_ASSERT(i == num);
+
+ for (uint32_t j = 0; j < i; j++)
+ odp_dma_compl_free(compl_evs[j]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void init_event_uarea(void *uarea, uint32_t size, void *args, uint32_t index)
+{
+ uarea_init_t *data = args;
+
+ data->count++;
+ data->mark[index] = 1;
+ memset(uarea, UAREA, size);
+}
+
+static void test_dma_compl_user_area_init(void)
+{
+ odp_dma_pool_param_t dma_pool_param;
+ uint32_t num = ODPH_MIN(ELEM_NUM, global.dma_capa.pool.max_num), i;
+ odp_pool_t pool;
+ uarea_init_t data;
+ odp_dma_compl_t compl_evs[num];
+ uint8_t *uarea;
+
+ memset(&data, 0, sizeof(uarea_init_t));
+ odp_dma_pool_param_init(&dma_pool_param);
+ dma_pool_param.uarea_init.init_fn = init_event_uarea;
+ dma_pool_param.uarea_init.args = &data;
+ dma_pool_param.num = num;
+ dma_pool_param.uarea_size = 1;
+ pool = odp_dma_pool_create(NULL, &dma_pool_param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ CU_ASSERT(data.count == num);
+
+ for (i = 0; i < num; i++) {
+ CU_ASSERT(data.mark[i] == 1);
+
+ compl_evs[i] = odp_dma_compl_alloc(pool);
+
+ CU_ASSERT(compl_evs[i] != ODP_DMA_COMPL_INVALID);
+
+ if (compl_evs[i] == ODP_DMA_COMPL_INVALID)
+ break;
+
+ uarea = odp_dma_compl_user_area(compl_evs[i]);
+
+ CU_ASSERT(*uarea == UAREA);
+ }
+
+ for (uint32_t j = 0; j < i; j++)
+ odp_dma_compl_free(compl_evs[j]);
+
+ odp_pool_destroy(pool);
+}
+
+static void init_source(uint8_t *src, uint32_t len)
+{
+ uint32_t i;
+
+ for (i = 0; i < len; i++)
+ src[i] = i;
+}
+
+static int check_equal(uint8_t *src, uint8_t *dst, uint32_t len)
+{
+ uint32_t i;
+
+ for (i = 0; i < len; i++)
+ if (src[i] != dst[i])
+ return -1;
+
+ return 0;
+}
+
+static int check_zero(uint8_t *ptr, uint32_t len)
+{
+ uint32_t i;
+
+ for (i = 0; i < len; i++)
+ if (ptr[i])
+ return -1;
+
+ return 0;
+}
+
+static int do_transfer(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param, int multi, int res)
+{
+ int i, ret;
+ odp_dma_result_t result;
+ const odp_dma_transfer_param_t *trs_ptr[1] = {trs_param};
+ odp_dma_result_t *result_ptr[1] = {&result};
+
+ memset(&result, 0, sizeof(odp_dma_result_t));
+
+ for (i = 0; i < RETRIES; i++) {
+ if (!multi && !res)
+ ret = odp_dma_transfer(dma, trs_param, NULL);
+ else if (!multi && res)
+ ret = odp_dma_transfer(dma, trs_param, &result);
+ else if (multi && !res)
+ ret = odp_dma_transfer_multi(dma, trs_ptr, NULL, 1);
+ else
+ ret = odp_dma_transfer_multi(dma, trs_ptr, result_ptr, 1);
+
+ if (ret)
+ break;
+ }
+
+ CU_ASSERT(ret == 1);
+
+ if (res)
+ CU_ASSERT(result.success);
+
+ return ret;
+}
+
+static int do_transfer_async(odp_dma_t dma, odp_dma_transfer_param_t *trs_param,
+ odp_dma_compl_mode_t compl_mode, int multi)
+{
+ int num_trs = multi ? multi : 1;
+ odp_dma_compl_param_t compl_param[num_trs];
+ const odp_dma_compl_param_t *compl_ptr[num_trs];
+ const odp_dma_transfer_param_t *trs_ptr[num_trs];
+ odp_event_t ev;
+ odp_dma_compl_t compl;
+ int i, j, ret, done;
+ uint32_t user_data = USER_DATA;
+ odp_dma_result_t result;
+ uint64_t wait_ns = 500 * ODP_TIME_MSEC_IN_NS;
+ uint64_t sched_wait = odp_schedule_wait_time(wait_ns);
+ void *user_ptr = &user_data;
+
+ for (i = 0; i < num_trs; i++) {
+ odp_dma_compl_param_init(&compl_param[i]);
+ compl_param[i].compl_mode = compl_mode;
+
+ if (compl_mode == ODP_DMA_COMPL_EVENT) {
+ compl = odp_dma_compl_alloc(global.compl_pool);
+
+ CU_ASSERT(compl != ODP_DMA_COMPL_INVALID);
+ if (compl == ODP_DMA_COMPL_INVALID)
+ return -1;
+
+ compl_param[i].event = odp_dma_compl_to_event(compl);
+ compl_param[i].queue = global.queue;
+ } else if (compl_mode == ODP_DMA_COMPL_POLL) {
+ compl_param[i].transfer_id = odp_dma_transfer_id_alloc(dma);
+
+ CU_ASSERT(compl_param[i].transfer_id != ODP_DMA_TRANSFER_ID_INVALID);
+ if (compl_param[i].transfer_id == ODP_DMA_TRANSFER_ID_INVALID)
+ return -1;
+ } else if (compl_mode != ODP_DMA_COMPL_NONE) {
+ ODPH_ERR("Wrong compl mode: %u\n", compl_mode);
+ return -1;
+ }
+
+ compl_param[i].user_ptr = user_ptr;
+
+ if (multi) {
+ trs_ptr[i] = &trs_param[i];
+ compl_ptr[i] = &compl_param[i];
+ }
+ }
+
+ for (i = 0; i < RETRIES; i++) {
+ if (multi)
+ ret = odp_dma_transfer_start_multi(dma, trs_ptr, compl_ptr, num_trs);
+ else
+ ret = odp_dma_transfer_start(dma, trs_param, compl_param);
+
+ if (ret)
+ break;
+ }
+
+ CU_ASSERT(ret == num_trs);
+
+ if (ret < 1)
+ return ret;
+
+ for (i = 0; i < ret; i++) {
+ memset(&result, 0, sizeof(odp_dma_result_t));
+
+ if (compl_mode == ODP_DMA_COMPL_POLL) {
+ for (j = 0; j < TIMEOUT; j++) {
+ done = odp_dma_transfer_done(dma, compl_param[i].transfer_id,
+ &result);
+ if (done)
+ break;
+
+ odp_time_wait_ns(wait_ns);
+ }
+
+ CU_ASSERT(done == 1);
+ CU_ASSERT(result.success);
+ CU_ASSERT(result.user_ptr == user_ptr);
+ CU_ASSERT(user_data == USER_DATA);
+
+ odp_dma_transfer_id_free(dma, compl_param[i].transfer_id);
+ } else if (compl_mode == ODP_DMA_COMPL_EVENT) {
+ odp_queue_t from = ODP_QUEUE_INVALID;
+
+ for (j = 0; j < TIMEOUT; j++) {
+ ev = odp_schedule(&from, sched_wait);
+ if (ev != ODP_EVENT_INVALID)
+ break;
+ }
+
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+ if (ev == ODP_EVENT_INVALID)
+ return -1;
+
+ CU_ASSERT(from == global.queue);
+ CU_ASSERT(odp_event_type(ev) == ODP_EVENT_DMA_COMPL);
+
+ compl = odp_dma_compl_from_event(ev);
+ CU_ASSERT(compl != ODP_DMA_COMPL_INVALID);
+
+ CU_ASSERT(odp_dma_compl_result(compl, &result) == 0);
+ CU_ASSERT(result.success);
+ CU_ASSERT(result.user_ptr == user_ptr);
+ CU_ASSERT(user_data == USER_DATA);
+
+ /* Test also without result struct output */
+ CU_ASSERT(odp_dma_compl_result(compl, NULL) == 0);
+
+ /* Test compl event print on the first event */
+ if (global.event_count == 0) {
+ printf("\n\n");
+ odp_dma_compl_print(compl);
+ }
+
+ /* Test both ways to free the event */
+ if (global.event_count % 2)
+ odp_event_free(ev);
+ else
+ odp_dma_compl_free(compl);
+
+ global.event_count++;
+ }
+ }
+
+ return 1;
+}
+
+static void test_dma_addr_to_addr(odp_dma_compl_mode_t compl_mode_mask, uint32_t num,
+ int multi, int res)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_t dma;
+ odp_dma_seg_t src_seg[num];
+ odp_dma_seg_t dst_seg[num];
+ int ret;
+ uint32_t i, cur_len;
+ uint8_t *src = global.src_addr + OFFSET;
+ uint8_t *dst = global.dst_addr + OFFSET;
+ uint32_t seg_len = ODPH_MIN(global.len / num, global.dma_capa.max_seg_len);
+ uint32_t len = seg_len * num;
+ uint32_t offset = 0;
+
+ init_source(global.src_addr, global.data_size);
+ memset(global.dst_addr, 0, global.data_size);
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = compl_mode_mask;
+ dma = odp_dma_create("addr_to_addr", &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ memset(src_seg, 0, sizeof(src_seg));
+ memset(dst_seg, 0, sizeof(dst_seg));
+
+ for (i = 0; i < num; i++) {
+ cur_len = seg_len;
+ if (i == num - 1)
+ cur_len = len - seg_len * i;
+
+ src_seg[i].addr = src + offset;
+ src_seg[i].len = cur_len;
+ dst_seg[i].addr = dst + offset;
+ dst_seg[i].len = cur_len;
+ offset += cur_len;
+ }
+
+ odp_dma_transfer_param_init(&trs_param);
+ trs_param.num_src = num;
+ trs_param.num_dst = num;
+ trs_param.src_seg = src_seg;
+ trs_param.dst_seg = dst_seg;
+
+ if (compl_mode_mask == ODP_DMA_COMPL_SYNC)
+ ret = do_transfer(dma, &trs_param, multi, res);
+ else
+ ret = do_transfer_async(dma, &trs_param, compl_mode_mask, multi);
+
+ if (ret > 0) {
+ CU_ASSERT(check_equal(src, dst, len) == 0);
+ CU_ASSERT(check_zero(global.dst_addr, OFFSET) == 0);
+ CU_ASSERT(check_zero(dst + len, global.len - len + TRAILER) == 0);
+ }
+
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_addr_to_addr_trs(odp_dma_compl_mode_t compl_mode_mask, uint32_t num_trs,
+ int multi, int res)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_t dma;
+ odp_dma_seg_t src_seg;
+ odp_dma_seg_t dst_seg;
+ int compl_none;
+ uint32_t i, cur_len;
+ odp_dma_compl_mode_t compl_mode;
+ uint8_t *src = global.src_addr + OFFSET;
+ uint8_t *dst = global.dst_addr + OFFSET;
+ uint32_t trs_len = ODPH_MIN(global.len / num_trs, global.dma_capa.max_seg_len);
+ uint32_t len = trs_len * num_trs;
+ uint32_t offset = 0;
+ int ret = -1;
+
+ compl_none = 0;
+ if (compl_mode_mask & ODP_DMA_COMPL_NONE)
+ compl_none = 1;
+
+ init_source(global.src_addr, global.data_size);
+ memset(global.dst_addr, 0, global.data_size);
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = compl_mode_mask;
+ dma = odp_dma_create("addr_to_addr", &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ odp_dma_transfer_param_init(&trs_param);
+ trs_param.src_seg = &src_seg;
+ trs_param.dst_seg = &dst_seg;
+
+ memset(&src_seg, 0, sizeof(src_seg));
+ memset(&dst_seg, 0, sizeof(dst_seg));
+
+ for (i = 0; i < num_trs; i++) {
+ compl_mode = compl_mode_mask;
+ if (compl_none)
+ compl_mode = ODP_DMA_COMPL_NONE;
+
+ cur_len = trs_len;
+ if (i == num_trs - 1) {
+ cur_len = len - trs_len * i;
+ compl_mode = compl_mode_mask & ~ODP_DMA_COMPL_NONE;
+ }
+
+ src_seg.addr = src + offset;
+ src_seg.len = cur_len;
+ dst_seg.addr = dst + offset;
+ dst_seg.len = cur_len;
+ offset += cur_len;
+
+ if (compl_mode_mask == ODP_DMA_COMPL_SYNC)
+ ret = do_transfer(dma, &trs_param, multi, res);
+ else
+ ret = do_transfer_async(dma, &trs_param, compl_mode, multi);
+
+ if (ret < 1)
+ break;
+ }
+
+ if (ret > 0) {
+ CU_ASSERT(check_equal(src, dst, len) == 0);
+ CU_ASSERT(check_zero(global.dst_addr, OFFSET) == 0);
+ CU_ASSERT(check_zero(dst + len, global.len - len + TRAILER) == 0);
+ }
+
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_addr_to_addr_max_trs(odp_dma_compl_mode_t compl_mode_mask)
+{
+ odp_dma_param_t dma_param;
+ uint32_t num_trs = global.dma_capa.max_transfers;
+ odp_dma_transfer_param_t trs_param[num_trs];
+ odp_dma_t dma;
+ odp_dma_seg_t src_seg[num_trs];
+ odp_dma_seg_t dst_seg[num_trs];
+ int ret;
+ uint32_t i, cur_len;
+ uint8_t *src = global.src_addr + OFFSET;
+ uint8_t *dst = global.dst_addr + OFFSET;
+ uint32_t seg_len = ODPH_MIN(global.len / num_trs, global.dma_capa.max_seg_len);
+ uint32_t len = seg_len * num_trs;
+ uint32_t offset = 0;
+
+ init_source(global.src_addr, global.data_size);
+ memset(global.dst_addr, 0, global.data_size);
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = compl_mode_mask;
+ dma = odp_dma_create("addr_to_addr", &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ memset(src_seg, 0, sizeof(src_seg));
+ memset(dst_seg, 0, sizeof(dst_seg));
+
+ for (i = 0; i < num_trs; i++) {
+ cur_len = seg_len;
+ if (i == num_trs - 1)
+ cur_len = len - seg_len * i;
+
+ src_seg[i].addr = src + offset;
+ src_seg[i].len = cur_len;
+ dst_seg[i].addr = dst + offset;
+ dst_seg[i].len = cur_len;
+ offset += cur_len;
+ }
+
+ for (i = 0; i < num_trs; i++) {
+ odp_dma_transfer_param_init(&trs_param[i]);
+ trs_param[i].num_src = 1;
+ trs_param[i].num_dst = 1;
+ trs_param[i].src_seg = &src_seg[i];
+ trs_param[i].dst_seg = &dst_seg[i];
+ }
+
+ ret = do_transfer_async(dma, trs_param, compl_mode_mask, num_trs);
+
+ if (ret > 0) {
+ CU_ASSERT(check_equal(src, dst, len) == 0);
+ CU_ASSERT(check_zero(global.dst_addr, OFFSET) == 0);
+ CU_ASSERT(check_zero(dst + len, global.len - len + TRAILER) == 0);
+ }
+
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_addr_to_pkt(odp_dma_compl_mode_t compl_mode_mask, int multi)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_t dma;
+ odp_dma_seg_t src_seg;
+ odp_dma_seg_t dst_seg;
+ int ret;
+ uint8_t *src, *pkt_data;
+ odp_packet_t pkt;
+ uint32_t len, seg_len;
+
+ init_source(global.src_addr, global.data_size);
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = compl_mode_mask;
+ dma = odp_dma_create("addr_to_pkt", &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ pkt = odp_packet_alloc(global.pkt_pool, global.pkt_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ seg_len = odp_packet_seg_len(pkt);
+ pkt_data = odp_packet_data(pkt);
+ memset(pkt_data, 0, seg_len);
+ CU_ASSERT_FATAL(seg_len > OFFSET + TRAILER);
+
+ len = seg_len - OFFSET - TRAILER;
+ if (len > global.len)
+ len = global.len;
+
+ src = global.src_addr + OFFSET;
+
+ memset(&src_seg, 0, sizeof(odp_dma_seg_t));
+ memset(&dst_seg, 0, sizeof(odp_dma_seg_t));
+ src_seg.addr = src;
+ src_seg.len = len;
+ dst_seg.packet = pkt;
+ dst_seg.offset = OFFSET;
+ dst_seg.len = len;
+
+ odp_dma_transfer_param_init(&trs_param);
+ trs_param.src_format = ODP_DMA_FORMAT_ADDR;
+ trs_param.dst_format = ODP_DMA_FORMAT_PACKET;
+ trs_param.src_seg = &src_seg;
+ trs_param.dst_seg = &dst_seg;
+
+ if (compl_mode_mask == ODP_DMA_COMPL_SYNC)
+ ret = do_transfer(dma, &trs_param, multi, 0);
+ else
+ ret = do_transfer_async(dma, &trs_param, compl_mode_mask, multi);
+
+ if (ret > 0) {
+ uint8_t *dst = pkt_data + OFFSET;
+
+ CU_ASSERT(check_equal(src, dst, len) == 0);
+ CU_ASSERT(check_zero(pkt_data, OFFSET) == 0);
+ CU_ASSERT(check_zero(dst + len, TRAILER) == 0);
+ }
+
+ odp_packet_free(pkt);
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_pkt_to_addr(odp_dma_compl_mode_t compl_mode_mask, int multi)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_t dma;
+ odp_dma_seg_t src_seg;
+ odp_dma_seg_t dst_seg;
+ int ret;
+ uint8_t *dst, *pkt_data;
+ odp_packet_t pkt;
+ uint32_t len, seg_len;
+
+ memset(global.dst_addr, 0, global.data_size);
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = compl_mode_mask;
+ dma = odp_dma_create("pkt_to_addr", &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ pkt = odp_packet_alloc(global.pkt_pool, global.pkt_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ seg_len = odp_packet_seg_len(pkt);
+ pkt_data = odp_packet_data(pkt);
+ init_source(pkt_data, seg_len);
+
+ CU_ASSERT_FATAL(seg_len > OFFSET + TRAILER);
+
+ len = seg_len - OFFSET - TRAILER;
+ if (len > global.len)
+ len = global.len;
+
+ dst = global.dst_addr + OFFSET;
+
+ memset(&src_seg, 0, sizeof(odp_dma_seg_t));
+ memset(&dst_seg, 0, sizeof(odp_dma_seg_t));
+ src_seg.packet = pkt;
+ src_seg.offset = OFFSET;
+ src_seg.len = len;
+ dst_seg.addr = dst;
+ dst_seg.len = len;
+
+ odp_dma_transfer_param_init(&trs_param);
+ trs_param.src_format = ODP_DMA_FORMAT_PACKET;
+ trs_param.dst_format = ODP_DMA_FORMAT_ADDR;
+ trs_param.src_seg = &src_seg;
+ trs_param.dst_seg = &dst_seg;
+
+ if (compl_mode_mask == ODP_DMA_COMPL_SYNC)
+ ret = do_transfer(dma, &trs_param, multi, 0);
+ else
+ ret = do_transfer_async(dma, &trs_param, compl_mode_mask, multi);
+
+ if (ret > 0) {
+ uint8_t *src = pkt_data + OFFSET;
+
+ CU_ASSERT(check_equal(src, dst, len) == 0);
+ CU_ASSERT(check_zero(global.dst_addr, OFFSET) == 0);
+ CU_ASSERT(check_zero(dst + len, TRAILER) == 0);
+ }
+
+ odp_packet_free(pkt);
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_pkt_to_pkt(odp_dma_compl_mode_t compl_mode_mask, int multi)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_t dma;
+ odp_dma_seg_t src_seg;
+ odp_dma_seg_t dst_seg;
+ int ret;
+ uint8_t *pkt_data, *pkt_data_2;
+ odp_packet_t pkt, pkt_2;
+ uint32_t len, seg_len, seg_len_2;
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = compl_mode_mask;
+ dma = odp_dma_create("pkt_to_pkt", &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ pkt = odp_packet_alloc(global.pkt_pool, global.pkt_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ pkt_2 = odp_packet_alloc(global.pkt_pool, global.pkt_len);
+ CU_ASSERT_FATAL(pkt_2 != ODP_PACKET_INVALID);
+
+ seg_len = odp_packet_seg_len(pkt);
+ pkt_data = odp_packet_data(pkt);
+ init_source(pkt_data, seg_len);
+
+ seg_len_2 = odp_packet_seg_len(pkt_2);
+ pkt_data_2 = odp_packet_data(pkt_2);
+ memset(pkt_data_2, 0, seg_len_2);
+
+ CU_ASSERT_FATAL(seg_len > OFFSET + TRAILER);
+
+ if (seg_len > seg_len_2)
+ seg_len = seg_len_2;
+
+ len = seg_len - OFFSET - TRAILER;
+
+ memset(&src_seg, 0, sizeof(odp_dma_seg_t));
+ memset(&dst_seg, 0, sizeof(odp_dma_seg_t));
+ src_seg.packet = pkt;
+ src_seg.offset = OFFSET;
+ src_seg.len = len;
+ dst_seg.packet = pkt_2;
+ dst_seg.offset = OFFSET;
+ dst_seg.len = len;
+
+ odp_dma_transfer_param_init(&trs_param);
+ trs_param.src_format = ODP_DMA_FORMAT_PACKET;
+ trs_param.dst_format = ODP_DMA_FORMAT_PACKET;
+ trs_param.src_seg = &src_seg;
+ trs_param.dst_seg = &dst_seg;
+
+ if (compl_mode_mask == ODP_DMA_COMPL_SYNC)
+ ret = do_transfer(dma, &trs_param, multi, 0);
+ else
+ ret = do_transfer_async(dma, &trs_param, compl_mode_mask, multi);
+
+ if (ret > 0) {
+ uint8_t *src = pkt_data + OFFSET;
+ uint8_t *dst = pkt_data_2 + OFFSET;
+
+ CU_ASSERT(check_equal(src, dst, len) == 0);
+ CU_ASSERT(check_zero(pkt_data_2, OFFSET) == 0);
+ CU_ASSERT(check_zero(dst + len, TRAILER) == 0);
+ }
+
+ odp_packet_free(pkt);
+ odp_packet_free(pkt_2);
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_pkt_segs_to_addr_sync(void)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_t dma;
+ odp_dma_seg_t src_seg;
+ odp_dma_seg_t dst_seg;
+ int ret;
+ uint8_t *dst;
+ odp_packet_t pkt;
+ uint32_t i, len, num_segs;
+ uint32_t pkt_len = ODPH_MIN(global.pkt_len, global.len);
+
+ memset(global.dst_addr, 0, global.data_size);
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = ODP_DMA_COMPL_SYNC;
+ dma = odp_dma_create("pkt_segs_to_addr", &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ pkt = odp_packet_alloc(global.pkt_pool, pkt_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ num_segs = odp_packet_num_segs(pkt);
+ if (num_segs > global.dma_capa.max_src_segs)
+ num_segs = global.dma_capa.max_src_segs;
+
+ init_source(global.src_addr, global.data_size);
+ CU_ASSERT_FATAL(odp_packet_copy_from_mem(pkt, 0, pkt_len, global.src_addr) == 0);
+
+ len = pkt_len - OFFSET - TRAILER;
+ dst = global.dst_addr + OFFSET;
+
+ memset(&src_seg, 0, sizeof(odp_dma_seg_t));
+ memset(&dst_seg, 0, sizeof(odp_dma_seg_t));
+ src_seg.packet = pkt;
+ src_seg.offset = OFFSET;
+ src_seg.len = len;
+ dst_seg.addr = dst;
+ dst_seg.len = len;
+
+ odp_dma_transfer_param_init(&trs_param);
+ trs_param.src_format = ODP_DMA_FORMAT_PACKET;
+ trs_param.dst_format = ODP_DMA_FORMAT_ADDR;
+ trs_param.src_seg = &src_seg;
+ trs_param.dst_seg = &dst_seg;
+
+ for (i = 0; i < RETRIES; i++) {
+ ret = odp_dma_transfer(dma, &trs_param, NULL);
+
+ if (ret)
+ break;
+ }
+
+ CU_ASSERT(ret > 0);
+
+ if (ret > 0) {
+ odp_packet_seg_t pkt_seg = odp_packet_first_seg(pkt);
+ uint8_t *src = odp_packet_data(pkt);
+ uint32_t seg_len = odp_packet_seg_len(pkt);
+
+ src += OFFSET;
+ seg_len -= OFFSET;
+
+ for (i = 0; i < num_segs; i++) {
+ if (i == (num_segs - 1))
+ seg_len -= TRAILER;
+
+ CU_ASSERT(check_equal(src, dst, seg_len) == 0);
+
+ dst += seg_len;
+ pkt_seg = odp_packet_next_seg(pkt, pkt_seg);
+ if (pkt_seg != ODP_PACKET_SEG_INVALID) {
+ src = odp_packet_seg_data(pkt, pkt_seg);
+ seg_len = odp_packet_seg_data_len(pkt, pkt_seg);
+ }
+ }
+
+ CU_ASSERT(check_zero(global.dst_addr, OFFSET) == 0);
+ CU_ASSERT(check_zero(global.dst_addr + OFFSET + len, TRAILER) == 0);
+ }
+
+ odp_packet_free(pkt);
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static int check_sync(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int check_session_count(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.dma_capa.max_sessions > 1)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_event(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_EVENT)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_event_user_area(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if ((global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_EVENT) &&
+ global.dma_capa.pool.max_uarea_size > 0)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_event_user_area_init(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.dma_capa.pool.max_uarea_size > 0 && global.dma_capa.pool.uarea_persistence)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_scheduled(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.dma_capa.queue_type_sched &&
+ (global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_EVENT))
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_poll(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_POLL)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_sched_none(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.dma_capa.queue_type_sched &&
+ (global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_EVENT) &&
+ (global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_NONE))
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_poll_none(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_POLL &&
+ global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_NONE)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static void test_dma_addr_to_addr_sync(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_SYNC, 1, 0, 0);
+}
+
+static void test_dma_addr_to_addr_sync_mtrs(void)
+{
+ test_dma_addr_to_addr_trs(ODP_DMA_COMPL_SYNC, global.dma_capa.max_transfers * 2, 0, 0);
+}
+
+static void test_dma_addr_to_addr_sync_mseg(void)
+{
+ if (global.dma_capa.max_src_segs > 1 && global.dma_capa.max_dst_segs > 1)
+ test_dma_addr_to_addr(ODP_DMA_COMPL_SYNC, 2, 0, 0);
+
+ if (global.dma_capa.max_src_segs > 2 && global.dma_capa.max_dst_segs > 2)
+ test_dma_addr_to_addr(ODP_DMA_COMPL_SYNC, 3, 0, 0);
+}
+
+static void test_dma_addr_to_addr_sync_res(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_SYNC, 1, 0, RESULT);
+}
+
+static void get_seg_lens(uint32_t max_len, uint32_t *src, uint32_t *dst)
+{
+ uint32_t src_segs = *src, dst_segs = *dst, denom = ODPH_MIN(src_segs, dst_segs);
+
+ max_len = ODPH_MIN(max_len / denom, global.dma_capa.max_seg_len) * denom;
+ *src = max_len / src_segs;
+ *dst = *src * src_segs / dst_segs + *src * src_segs % dst_segs;
+}
+
+static void test_dma_addr_to_addr_sync_max_seg(void)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_t dma;
+ odp_dma_seg_t src_seg[global.dma_capa.max_src_segs];
+ odp_dma_seg_t dst_seg[global.dma_capa.max_dst_segs];
+ uint32_t src_len = global.dma_capa.max_src_segs, dst_len = global.dma_capa.max_dst_segs,
+ len;
+ int ret;
+
+ init_source(global.src_addr, global.data_size);
+ memset(global.dst_addr, 0, global.data_size);
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = ODP_DMA_COMPL_SYNC;
+ dma = odp_dma_create("addr_to_addr_max_seg", &dma_param);
+
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ get_seg_lens(global.len, &src_len, &dst_len);
+
+ for (uint32_t i = 0; i < global.dma_capa.max_src_segs; i++) {
+ uint8_t *addr = global.src_addr + i * src_len;
+
+ memset(&src_seg[i], 0, sizeof(odp_dma_seg_t));
+ src_seg[i].addr = addr;
+ src_seg[i].len = src_len;
+ }
+
+ len = src_len * global.dma_capa.max_src_segs;
+
+ for (uint32_t i = 0; i < global.dma_capa.max_dst_segs; i++) {
+ uint8_t *addr = global.dst_addr + i * dst_len;
+
+ memset(&dst_seg[i], 0, sizeof(odp_dma_seg_t));
+ dst_seg[i].addr = addr;
+ dst_seg[i].len = ODPH_MIN(len, dst_len);
+ len -= dst_len;
+ }
+
+ odp_dma_transfer_param_init(&trs_param);
+ trs_param.src_format = ODP_DMA_FORMAT_ADDR;
+ trs_param.dst_format = ODP_DMA_FORMAT_ADDR;
+ trs_param.num_src = global.dma_capa.max_src_segs;
+ trs_param.num_dst = global.dma_capa.max_dst_segs;
+ trs_param.src_seg = src_seg;
+ trs_param.dst_seg = dst_seg;
+ ret = do_transfer(dma, &trs_param, 0, 0);
+
+ if (ret > 0) {
+ len = src_len * global.dma_capa.max_src_segs;
+
+ CU_ASSERT(check_equal(global.src_addr, global.dst_addr, len) == 0);
+ }
+
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_addr_to_pkt_sync(void)
+{
+ test_dma_addr_to_pkt(ODP_DMA_COMPL_SYNC, 0);
+}
+
+static void test_dma_pkt_to_addr_sync(void)
+{
+ test_dma_pkt_to_addr(ODP_DMA_COMPL_SYNC, 0);
+}
+
+static void test_dma_pkt_to_pkt_sync(void)
+{
+ test_dma_pkt_to_pkt(ODP_DMA_COMPL_SYNC, 0);
+}
+
+static void test_dma_pkt_to_pkt_sync_max_seg(void)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_t dma;
+ odp_packet_t pkt;
+ odp_dma_seg_t src_seg[global.dma_capa.max_src_segs];
+ odp_dma_seg_t dst_seg[global.dma_capa.max_dst_segs];
+ uint32_t src_len = global.dma_capa.max_src_segs, dst_len = global.dma_capa.max_dst_segs,
+ len;
+ int ret;
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = ODP_DMA_COMPL_SYNC;
+ dma = odp_dma_create("pkt_to_pkt_max_seg", &dma_param);
+
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ pkt = odp_packet_alloc(global.pkt_pool, global.pkt_len);
+
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ get_seg_lens(odp_packet_seg_len(pkt), &src_len, &dst_len);
+ odp_packet_free(pkt);
+
+ for (uint32_t i = 0; i < global.dma_capa.max_src_segs; i++) {
+ pkt = odp_packet_alloc(global.pkt_pool, src_len);
+
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ init_source(odp_packet_data(pkt), src_len);
+ memset(&src_seg[i], 0, sizeof(odp_dma_seg_t));
+ src_seg[i].packet = pkt;
+ src_seg[i].len = src_len;
+ }
+
+ len = src_len * global.dma_capa.max_src_segs;
+
+ for (uint32_t i = 0; i < global.dma_capa.max_dst_segs; i++) {
+ pkt = odp_packet_alloc(global.pkt_pool, dst_len);
+
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ memset(odp_packet_data(pkt), 0, dst_len);
+ memset(&dst_seg[i], 0, sizeof(odp_dma_seg_t));
+ dst_seg[i].packet = pkt;
+ dst_seg[i].len = ODPH_MIN(len, dst_len);
+ len -= dst_len;
+ }
+
+ odp_dma_transfer_param_init(&trs_param);
+ trs_param.src_format = ODP_DMA_FORMAT_PACKET;
+ trs_param.dst_format = ODP_DMA_FORMAT_PACKET;
+ trs_param.num_src = global.dma_capa.max_src_segs;
+ trs_param.num_dst = global.dma_capa.max_dst_segs;
+ trs_param.src_seg = src_seg;
+ trs_param.dst_seg = dst_seg;
+ ret = do_transfer(dma, &trs_param, 0, 0);
+
+ if (ret > 0) {
+ len = src_len * global.dma_capa.max_src_segs;
+ uint8_t src[len], dst[len];
+
+ for (uint32_t i = 0; i < global.dma_capa.max_src_segs; i++) {
+ memcpy(src + i * src_len, odp_packet_data(src_seg[i].packet),
+ src_seg[i].len);
+ odp_packet_free(src_seg[i].packet);
+ }
+
+ for (uint32_t i = 0; i < global.dma_capa.max_dst_segs; i++) {
+ memcpy(dst + i * dst_len, odp_packet_data(dst_seg[i].packet),
+ dst_seg[i].len);
+ odp_packet_free(dst_seg[i].packet);
+ }
+
+ CU_ASSERT(check_equal(src, dst, len) == 0);
+ }
+
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_addr_to_addr_poll(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_POLL, 1, 0, 0);
+}
+
+static void test_dma_addr_to_addr_poll_mtrs(void)
+{
+ test_dma_addr_to_addr_trs(ODP_DMA_COMPL_POLL, 2, 0, 0);
+}
+
+static void test_dma_addr_to_addr_poll_mseg(void)
+{
+ if (global.dma_capa.max_src_segs > 1 && global.dma_capa.max_dst_segs > 1)
+ test_dma_addr_to_addr(ODP_DMA_COMPL_POLL, 2, 0, 0);
+
+ if (global.dma_capa.max_src_segs > 2 && global.dma_capa.max_dst_segs > 2)
+ test_dma_addr_to_addr(ODP_DMA_COMPL_POLL, 3, 0, 0);
+}
+
+static void test_dma_addr_to_pkt_poll(void)
+{
+ test_dma_addr_to_pkt(ODP_DMA_COMPL_POLL, 0);
+}
+
+static void test_dma_pkt_to_addr_poll(void)
+{
+ test_dma_pkt_to_addr(ODP_DMA_COMPL_POLL, 0);
+}
+
+static void test_dma_pkt_to_pkt_poll(void)
+{
+ test_dma_pkt_to_pkt(ODP_DMA_COMPL_POLL, 0);
+}
+
+static void test_dma_addr_to_addr_event(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_EVENT, 1, 0, 0);
+}
+
+static void test_dma_addr_to_addr_event_mtrs(void)
+{
+ test_dma_addr_to_addr_trs(ODP_DMA_COMPL_EVENT, 2, 0, 0);
+}
+
+static void test_dma_addr_to_addr_event_mseg(void)
+{
+ if (global.dma_capa.max_src_segs > 1 && global.dma_capa.max_dst_segs > 1)
+ test_dma_addr_to_addr(ODP_DMA_COMPL_EVENT, 2, 0, 0);
+
+ if (global.dma_capa.max_src_segs > 2 && global.dma_capa.max_dst_segs > 2)
+ test_dma_addr_to_addr(ODP_DMA_COMPL_EVENT, 3, 0, 0);
+}
+
+static void test_dma_addr_to_pkt_event(void)
+{
+ test_dma_addr_to_pkt(ODP_DMA_COMPL_EVENT, 0);
+}
+
+static void test_dma_pkt_to_addr_event(void)
+{
+ test_dma_pkt_to_addr(ODP_DMA_COMPL_EVENT, 0);
+}
+
+static void test_dma_pkt_to_pkt_event(void)
+{
+ test_dma_pkt_to_pkt(ODP_DMA_COMPL_EVENT, 0);
+}
+
+static void test_dma_addr_to_addr_poll_none(void)
+{
+ test_dma_addr_to_addr_trs(ODP_DMA_COMPL_POLL | ODP_DMA_COMPL_NONE, 2, 0, 0);
+}
+
+static void test_dma_addr_to_addr_event_none(void)
+{
+ test_dma_addr_to_addr_trs(ODP_DMA_COMPL_EVENT | ODP_DMA_COMPL_NONE, 2, 0, 0);
+}
+
+static void test_dma_multi_addr_to_addr_sync(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_SYNC, 1, MULTI, 0);
+}
+
+static void test_dma_multi_addr_to_addr_sync_res(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_SYNC, 1, MULTI, RESULT);
+}
+
+static void test_dma_multi_addr_to_pkt_sync(void)
+{
+ test_dma_addr_to_pkt(ODP_DMA_COMPL_SYNC, MULTI);
+}
+
+static void test_dma_multi_pkt_to_addr_sync(void)
+{
+ test_dma_pkt_to_addr(ODP_DMA_COMPL_SYNC, MULTI);
+}
+
+static void test_dma_multi_pkt_to_pkt_sync(void)
+{
+ test_dma_pkt_to_pkt(ODP_DMA_COMPL_SYNC, MULTI);
+}
+
+static void test_dma_multi_addr_to_addr_poll(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_POLL, 1, MULTI, 0);
+}
+
+static void test_dma_multi_addr_to_addr_poll_max_trs(void)
+{
+ test_dma_addr_to_addr_max_trs(ODP_DMA_COMPL_POLL);
+}
+
+static void test_dma_multi_addr_to_pkt_poll(void)
+{
+ test_dma_addr_to_pkt(ODP_DMA_COMPL_POLL, MULTI);
+}
+
+static void test_dma_multi_pkt_to_addr_poll(void)
+{
+ test_dma_pkt_to_addr(ODP_DMA_COMPL_POLL, MULTI);
+}
+
+static void test_dma_multi_pkt_to_pkt_poll(void)
+{
+ test_dma_pkt_to_pkt(ODP_DMA_COMPL_POLL, MULTI);
+}
+
+static void test_dma_multi_addr_to_addr_event(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_EVENT, 1, MULTI, 0);
+}
+
+static void test_dma_multi_addr_to_addr_event_max_trs(void)
+{
+ test_dma_addr_to_addr_max_trs(ODP_DMA_COMPL_EVENT);
+}
+
+static void test_dma_multi_addr_to_pkt_event(void)
+{
+ test_dma_addr_to_pkt(ODP_DMA_COMPL_EVENT, MULTI);
+}
+
+static void test_dma_multi_pkt_to_addr_event(void)
+{
+ test_dma_pkt_to_addr(ODP_DMA_COMPL_EVENT, MULTI);
+}
+
+static void test_dma_multi_pkt_to_pkt_event(void)
+{
+ test_dma_pkt_to_pkt(ODP_DMA_COMPL_EVENT, MULTI);
+}
+
+odp_testinfo_t dma_suite[] = {
+ ODP_TEST_INFO(test_dma_capability),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_param_init, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_debug, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_same_name_null, check_session_count),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_same_name_named, check_session_count),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_compl_pool, check_event),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_compl_pool_same_name, check_event),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_compl_pool_max_pools, check_event),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_compl_user_area, check_event_user_area),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_compl_user_area_init, check_event_user_area_init),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_sync_mtrs, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_sync_mseg, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_sync_res, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_sync_max_seg, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_pkt_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_to_addr_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_to_pkt_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_to_pkt_sync_max_seg, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_poll_mtrs, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_poll_mseg, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_pkt_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_to_addr_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_to_pkt_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_event_mtrs, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_event_mseg, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_pkt_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_to_addr_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_to_pkt_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_poll_none, check_poll_none),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_event_none, check_sched_none),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_addr_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_addr_sync_res, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_pkt_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_pkt_to_addr_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_pkt_to_pkt_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_addr_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_addr_poll_max_trs, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_pkt_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_pkt_to_addr_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_pkt_to_pkt_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_addr_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_addr_event_max_trs, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_pkt_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_pkt_to_addr_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_pkt_to_pkt_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_segs_to_addr_sync, check_sync),
+ ODP_TEST_INFO_NULL
+};
+
+odp_suiteinfo_t dma_suites[] = {
+ {"DMA", dma_suite_init, dma_suite_term, dma_suite},
+ ODP_SUITE_INFO_NULL
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(dma_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/errno/.gitignore b/test/validation/api/errno/.gitignore
index 12256e38c..12256e38c 100644
--- a/test/common_plat/validation/api/errno/.gitignore
+++ b/test/validation/api/errno/.gitignore
diff --git a/test/validation/api/errno/Makefile.am b/test/validation/api/errno/Makefile.am
new file mode 100644
index 000000000..de13afbfb
--- /dev/null
+++ b/test/validation/api/errno/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = errno_main
+errno_main_SOURCES = errno.c
diff --git a/test/common_plat/validation/api/errno/errno.c b/test/validation/api/errno/errno.c
index e3b6ced54..70708ce01 100644
--- a/test/common_plat/validation/api/errno/errno.c
+++ b/test/validation/api/errno/errno.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -6,9 +6,8 @@
#include <odp_api.h>
#include "odp_cunit_common.h"
-#include "errno.h"
-void errno_test_odp_errno_sunny_day(void)
+static void errno_test_odp_errno_sunny_day(void)
{
int my_errno;
@@ -29,12 +28,12 @@ odp_suiteinfo_t errno_suites[] = {
ODP_SUITE_INFO_NULL,
};
-int errno_main(int argc, char *argv[])
+int main(int argc, char *argv[])
{
int ret;
/* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
+ if (odp_cunit_parse_options(&argc, argv))
return -1;
ret = odp_cunit_register(errno_suites);
diff --git a/test/validation/api/event/.gitignore b/test/validation/api/event/.gitignore
new file mode 100644
index 000000000..05d34d7c8
--- /dev/null
+++ b/test/validation/api/event/.gitignore
@@ -0,0 +1 @@
+event_main
diff --git a/test/validation/api/event/Makefile.am b/test/validation/api/event/Makefile.am
new file mode 100644
index 000000000..0d26035ed
--- /dev/null
+++ b/test/validation/api/event/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = event_main
+event_main_SOURCES = event.c
diff --git a/test/validation/api/event/event.c b/test/validation/api/event/event.c
new file mode 100644
index 000000000..fbcc08d6f
--- /dev/null
+++ b/test/validation/api/event/event.c
@@ -0,0 +1,473 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+
+#define NUM_EVENTS 100
+#define EVENT_SIZE 100
+#define EVENT_BURST 10
+
+static void event_test_free(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ int i;
+ odp_buffer_t buf;
+ odp_packet_t pkt;
+ odp_timeout_t tmo;
+ odp_event_subtype_t subtype;
+ odp_event_t event[EVENT_BURST];
+
+ /* Buffer events */
+ odp_pool_param_init(&pool_param);
+ pool_param.buf.num = NUM_EVENTS;
+ pool_param.buf.size = EVENT_SIZE;
+ pool_param.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("event_free", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < EVENT_BURST; i++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT(odp_event_is_valid(odp_buffer_to_event(buf)) == 1);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ event[i] = odp_buffer_to_event(buf);
+ CU_ASSERT(odp_event_type(event[i]) == ODP_EVENT_BUFFER);
+ CU_ASSERT(odp_event_subtype(event[i]) == ODP_EVENT_NO_SUBTYPE);
+ CU_ASSERT(odp_event_types(event[i], &subtype) ==
+ ODP_EVENT_BUFFER);
+ CU_ASSERT(subtype == ODP_EVENT_NO_SUBTYPE);
+ }
+
+ for (i = 0; i < EVENT_BURST; i++)
+ odp_event_free(event[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+
+ /* Packet events */
+ odp_pool_param_init(&pool_param);
+ pool_param.pkt.num = NUM_EVENTS;
+ pool_param.pkt.len = EVENT_SIZE;
+ pool_param.type = ODP_POOL_PACKET;
+
+ pool = odp_pool_create("event_free", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < EVENT_BURST; i++) {
+ pkt = odp_packet_alloc(pool, EVENT_SIZE);
+ CU_ASSERT(odp_event_is_valid(odp_packet_to_event(pkt)) == 1);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ event[i] = odp_packet_to_event(pkt);
+ CU_ASSERT(odp_event_type(event[i]) == ODP_EVENT_PACKET);
+ CU_ASSERT(odp_event_subtype(event[i]) ==
+ ODP_EVENT_PACKET_BASIC);
+ CU_ASSERT(odp_event_types(event[i], &subtype) ==
+ ODP_EVENT_PACKET);
+ CU_ASSERT(subtype == ODP_EVENT_PACKET_BASIC);
+ }
+
+ for (i = 0; i < EVENT_BURST; i++)
+ odp_event_free(event[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+
+ /* Timeout events */
+ odp_pool_param_init(&pool_param);
+ pool_param.tmo.num = NUM_EVENTS;
+ pool_param.type = ODP_POOL_TIMEOUT;
+
+ pool = odp_pool_create("event_free", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < EVENT_BURST; i++) {
+ tmo = odp_timeout_alloc(pool);
+ CU_ASSERT(odp_event_is_valid(odp_timeout_to_event(tmo)) == 1);
+ CU_ASSERT_FATAL(tmo != ODP_TIMEOUT_INVALID);
+ event[i] = odp_timeout_to_event(tmo);
+ CU_ASSERT(odp_event_type(event[i]) == ODP_EVENT_TIMEOUT);
+ CU_ASSERT(odp_event_subtype(event[i]) == ODP_EVENT_NO_SUBTYPE);
+ CU_ASSERT(odp_event_types(event[i], &subtype) ==
+ ODP_EVENT_TIMEOUT);
+ CU_ASSERT(subtype == ODP_EVENT_NO_SUBTYPE);
+ }
+
+ for (i = 0; i < EVENT_BURST; i++)
+ odp_event_free(event[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void event_test_free_multi(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ int i, j;
+ odp_buffer_t buf;
+ odp_packet_t pkt;
+ odp_timeout_t tmo;
+ odp_event_t event[EVENT_BURST];
+
+ /* Buffer events */
+ odp_pool_param_init(&pool_param);
+ pool_param.buf.num = NUM_EVENTS;
+ pool_param.buf.size = EVENT_SIZE;
+ pool_param.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("event_free", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (j = 0; j < 2; j++) {
+ for (i = 0; i < EVENT_BURST; i++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ event[i] = odp_buffer_to_event(buf);
+ }
+
+ if (j == 0)
+ odp_event_free_multi(event, EVENT_BURST);
+ else
+ odp_event_free_sp(event, EVENT_BURST);
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+
+ /* Packet events */
+ odp_pool_param_init(&pool_param);
+ pool_param.pkt.num = NUM_EVENTS;
+ pool_param.pkt.len = EVENT_SIZE;
+ pool_param.type = ODP_POOL_PACKET;
+
+ pool = odp_pool_create("event_free", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (j = 0; j < 2; j++) {
+ for (i = 0; i < EVENT_BURST; i++) {
+ pkt = odp_packet_alloc(pool, EVENT_SIZE);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ event[i] = odp_packet_to_event(pkt);
+ }
+
+ if (j == 0)
+ odp_event_free_multi(event, EVENT_BURST);
+ else
+ odp_event_free_sp(event, EVENT_BURST);
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+
+ /* Timeout events */
+ odp_pool_param_init(&pool_param);
+ pool_param.tmo.num = NUM_EVENTS;
+ pool_param.type = ODP_POOL_TIMEOUT;
+
+ pool = odp_pool_create("event_free", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (j = 0; j < 2; j++) {
+ for (i = 0; i < EVENT_BURST; i++) {
+ tmo = odp_timeout_alloc(pool);
+ CU_ASSERT_FATAL(tmo != ODP_TIMEOUT_INVALID);
+ event[i] = odp_timeout_to_event(tmo);
+ }
+
+ if (j == 0)
+ odp_event_free_multi(event, EVENT_BURST);
+ else
+ odp_event_free_sp(event, EVENT_BURST);
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void event_test_free_multi_mixed(void)
+{
+ odp_pool_t pool1, pool2, pool3;
+ odp_pool_param_t pool_param;
+ int i, j;
+ odp_buffer_t buf;
+ odp_packet_t pkt;
+ odp_timeout_t tmo;
+ odp_event_t event[3 * EVENT_BURST];
+
+ /* Buffer events */
+ odp_pool_param_init(&pool_param);
+ pool_param.buf.num = NUM_EVENTS;
+ pool_param.buf.size = EVENT_SIZE;
+ pool_param.type = ODP_POOL_BUFFER;
+
+ pool1 = odp_pool_create("event_free1", &pool_param);
+ CU_ASSERT_FATAL(pool1 != ODP_POOL_INVALID);
+
+ /* Packet events */
+ odp_pool_param_init(&pool_param);
+ pool_param.pkt.num = NUM_EVENTS;
+ pool_param.pkt.len = EVENT_SIZE;
+ pool_param.type = ODP_POOL_PACKET;
+
+ pool2 = odp_pool_create("event_free2", &pool_param);
+ CU_ASSERT_FATAL(pool2 != ODP_POOL_INVALID);
+
+ /* Timeout events */
+ odp_pool_param_init(&pool_param);
+ pool_param.tmo.num = NUM_EVENTS;
+ pool_param.type = ODP_POOL_TIMEOUT;
+
+ pool3 = odp_pool_create("event_free3", &pool_param);
+ CU_ASSERT_FATAL(pool3 != ODP_POOL_INVALID);
+
+ for (j = 0; j < 2; j++) {
+ for (i = 0; i < 3 * EVENT_BURST;) {
+ buf = odp_buffer_alloc(pool1);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ event[i] = odp_buffer_to_event(buf);
+ i++;
+ pkt = odp_packet_alloc(pool2, EVENT_SIZE);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ event[i] = odp_packet_to_event(pkt);
+ i++;
+ tmo = odp_timeout_alloc(pool3);
+ CU_ASSERT_FATAL(tmo != ODP_TIMEOUT_INVALID);
+ event[i] = odp_timeout_to_event(tmo);
+ i++;
+ }
+
+ if (j == 0)
+ odp_event_free_multi(event, 3 * EVENT_BURST);
+ else
+ odp_event_free_sp(event, 3 * EVENT_BURST);
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool1) == 0);
+ CU_ASSERT(odp_pool_destroy(pool2) == 0);
+ CU_ASSERT(odp_pool_destroy(pool3) == 0);
+}
+
+#define NUM_TYPE_TEST 6
+
+static void type_test_init(odp_pool_t *buf_pool, odp_pool_t *pkt_pool,
+ odp_event_t buf_event[],
+ odp_event_t pkt_event[],
+ odp_event_t event[])
+{
+ odp_pool_t pool1, pool2;
+ odp_pool_param_t pool_param;
+ int i;
+ odp_buffer_t buf;
+ odp_packet_t pkt;
+
+ /* Buffer events */
+ odp_pool_param_init(&pool_param);
+ pool_param.buf.num = NUM_EVENTS;
+ pool_param.buf.size = EVENT_SIZE;
+ pool_param.type = ODP_POOL_BUFFER;
+
+ pool1 = odp_pool_create("event_type_buf", &pool_param);
+ CU_ASSERT_FATAL(pool1 != ODP_POOL_INVALID);
+
+ for (i = 0; i < NUM_TYPE_TEST; i++) {
+ buf = odp_buffer_alloc(pool1);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ buf_event[i] = odp_buffer_to_event(buf);
+ }
+
+ /* Packet events */
+ odp_pool_param_init(&pool_param);
+ pool_param.pkt.num = NUM_EVENTS;
+ pool_param.pkt.len = EVENT_SIZE;
+ pool_param.type = ODP_POOL_PACKET;
+
+ pool2 = odp_pool_create("event_type_pkt", &pool_param);
+ CU_ASSERT_FATAL(pool2 != ODP_POOL_INVALID);
+
+ for (i = 0; i < NUM_TYPE_TEST; i++) {
+ pkt = odp_packet_alloc(pool2, EVENT_SIZE);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ pkt_event[i] = odp_packet_to_event(pkt);
+ }
+
+ /* 1 buf, 1 pkt, 2 buf, 2 pkt, 3 buf, 3 pkt */
+ event[0] = buf_event[0];
+ event[1] = pkt_event[0];
+ event[2] = buf_event[1];
+ event[3] = buf_event[2];
+ event[4] = pkt_event[1];
+ event[5] = pkt_event[2];
+ event[6] = buf_event[3];
+ event[7] = buf_event[4];
+ event[8] = buf_event[5];
+ event[9] = pkt_event[3];
+ event[10] = pkt_event[4];
+ event[11] = pkt_event[5];
+
+ *buf_pool = pool1;
+ *pkt_pool = pool2;
+}
+
+static void event_test_type_multi(void)
+{
+ odp_pool_t buf_pool, pkt_pool;
+ odp_event_type_t type;
+ int num;
+ odp_event_t buf_event[NUM_TYPE_TEST];
+ odp_event_t pkt_event[NUM_TYPE_TEST];
+ odp_event_t event[2 * NUM_TYPE_TEST];
+
+ type_test_init(&buf_pool, &pkt_pool, buf_event, pkt_event, event);
+
+ num = odp_event_type_multi(&event[0], 12, &type);
+ CU_ASSERT(num == 1);
+ CU_ASSERT(type == ODP_EVENT_BUFFER);
+
+ num = odp_event_type_multi(&event[1], 11, &type);
+ CU_ASSERT(num == 1);
+ CU_ASSERT(type == ODP_EVENT_PACKET);
+
+ num = odp_event_type_multi(&event[2], 10, &type);
+ CU_ASSERT(num == 2);
+ CU_ASSERT(type == ODP_EVENT_BUFFER);
+
+ num = odp_event_type_multi(&event[4], 8, &type);
+ CU_ASSERT(num == 2);
+ CU_ASSERT(type == ODP_EVENT_PACKET);
+
+ num = odp_event_type_multi(&event[6], 6, &type);
+ CU_ASSERT(num == 3);
+ CU_ASSERT(type == ODP_EVENT_BUFFER);
+
+ num = odp_event_type_multi(&event[9], 3, &type);
+ CU_ASSERT(num == 3);
+ CU_ASSERT(type == ODP_EVENT_PACKET);
+
+ odp_event_free_multi(buf_event, NUM_TYPE_TEST);
+ odp_event_free_multi(pkt_event, NUM_TYPE_TEST);
+
+ CU_ASSERT(odp_pool_destroy(buf_pool) == 0);
+ CU_ASSERT(odp_pool_destroy(pkt_pool) == 0);
+}
+
+static void event_test_types_multi(void)
+{
+ odp_pool_t buf_pool, pkt_pool;
+ odp_event_t buf_event[NUM_TYPE_TEST];
+ odp_event_t pkt_event[NUM_TYPE_TEST];
+ odp_event_t event[2 * NUM_TYPE_TEST];
+ odp_event_type_t event_types[2 * NUM_TYPE_TEST];
+ odp_event_subtype_t event_subtypes[2 * NUM_TYPE_TEST];
+ int i;
+
+ type_test_init(&buf_pool, &pkt_pool, buf_event, pkt_event, event);
+
+ /* Only buffers */
+ odp_event_types_multi(buf_event, event_types, event_subtypes, NUM_TYPE_TEST);
+ for (i = 0; i < NUM_TYPE_TEST; i++) {
+ CU_ASSERT(event_types[i] == ODP_EVENT_BUFFER);
+ CU_ASSERT(event_subtypes[i] == ODP_EVENT_NO_SUBTYPE);
+ }
+
+ /* Only packets */
+ odp_event_types_multi(pkt_event, event_types, event_subtypes, NUM_TYPE_TEST);
+ for (i = 0; i < NUM_TYPE_TEST; i++) {
+ CU_ASSERT(event_types[i] == ODP_EVENT_PACKET);
+ CU_ASSERT(event_subtypes[i] == ODP_EVENT_PACKET_BASIC);
+ }
+
+ /* Mixed events: B P BB PP BBB PPP */
+ odp_event_types_multi(event, event_types, NULL, 2 * NUM_TYPE_TEST);
+ for (i = 0; i < 2 * NUM_TYPE_TEST; i++) {
+ if (i == 0 || i == 2 || i == 3 || i == 6 || i == 7 || i == 8) {
+ /* CU_ASSERT requires extra brackets */
+ CU_ASSERT(event_types[i] == ODP_EVENT_BUFFER);
+ } else {
+ CU_ASSERT(event_types[i] == ODP_EVENT_PACKET);
+ }
+ }
+
+ odp_event_free_multi(buf_event, NUM_TYPE_TEST);
+ odp_event_free_multi(pkt_event, NUM_TYPE_TEST);
+
+ CU_ASSERT(odp_pool_destroy(buf_pool) == 0);
+ CU_ASSERT(odp_pool_destroy(pkt_pool) == 0);
+}
+
+static void event_test_filter_packet(void)
+{
+ odp_pool_t buf_pool, pkt_pool;
+ int i, num_pkt, num_rem;
+ int num = 2 * NUM_TYPE_TEST;
+ odp_event_t buf_event[NUM_TYPE_TEST];
+ odp_event_t pkt_event[NUM_TYPE_TEST];
+ odp_event_t event[num];
+ odp_packet_t packet[num];
+ odp_event_t remain[num];
+
+ type_test_init(&buf_pool, &pkt_pool, buf_event, pkt_event, event);
+
+ for (i = 0; i < num; i++) {
+ packet[i] = ODP_PACKET_INVALID;
+ remain[i] = ODP_EVENT_INVALID;
+ }
+
+ num_pkt = odp_event_filter_packet(event, packet, remain, num);
+ CU_ASSERT(num_pkt == NUM_TYPE_TEST);
+
+ for (i = 0; i < num_pkt; i++)
+ CU_ASSERT(packet[i] != ODP_PACKET_INVALID);
+
+ num_rem = num - num_pkt;
+ CU_ASSERT(num_rem == NUM_TYPE_TEST);
+
+ for (i = 0; i < num_rem; i++) {
+ CU_ASSERT(remain[i] != ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_type(remain[i]) == ODP_EVENT_BUFFER);
+ }
+
+ odp_event_free_multi(event, num);
+
+ CU_ASSERT(odp_pool_destroy(buf_pool) == 0);
+ CU_ASSERT(odp_pool_destroy(pkt_pool) == 0);
+}
+
+static void event_test_is_valid(void)
+{
+ CU_ASSERT(odp_event_is_valid(ODP_EVENT_INVALID) == 0);
+ CU_ASSERT(odp_buffer_is_valid(ODP_BUFFER_INVALID) == 0);
+ CU_ASSERT(odp_packet_is_valid(ODP_PACKET_INVALID) == 0);
+ CU_ASSERT(odp_packet_vector_valid(ODP_PACKET_VECTOR_INVALID) == 0);
+}
+
+odp_testinfo_t event_suite[] = {
+ ODP_TEST_INFO(event_test_free),
+ ODP_TEST_INFO(event_test_free_multi),
+ ODP_TEST_INFO(event_test_free_multi_mixed),
+ ODP_TEST_INFO(event_test_type_multi),
+ ODP_TEST_INFO(event_test_types_multi),
+ ODP_TEST_INFO(event_test_filter_packet),
+ ODP_TEST_INFO(event_test_is_valid),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t event_suites[] = {
+ {"Event", NULL, NULL, event_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(event_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/hash/.gitignore b/test/validation/api/hash/.gitignore
index 6d0bc9314..6d0bc9314 100644
--- a/test/common_plat/validation/api/hash/.gitignore
+++ b/test/validation/api/hash/.gitignore
diff --git a/test/validation/api/hash/Makefile.am b/test/validation/api/hash/Makefile.am
new file mode 100644
index 000000000..0d843ea74
--- /dev/null
+++ b/test/validation/api/hash/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = hash_main
+hash_main_SOURCES = hash.c
diff --git a/test/validation/api/hash/hash.c b/test/validation/api/hash/hash.c
new file mode 100644
index 000000000..a935ef7ac
--- /dev/null
+++ b/test/validation/api/hash/hash.c
@@ -0,0 +1,765 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#include <odp_cunit_common.h>
+#include <test_packet_ipv4_with_crc.h>
+
+/* Commonly used CRC check string */
+#define CHECK_STR "123456789"
+#define CHECK_LEN 9
+
+#define CRC32C_INIT 0xffffffff
+#define CRC32C_XOR 0xffffffff
+#define CRC32_INIT 0xffffffff
+#define CRC32_XOR 0xffffffff
+
+/* When Ethernet frame CRC is included into the CRC32 calculation,
+ * the result should match this value. */
+#define ETHCRC_CHECK_VAL 0xdebb20e3
+
+typedef struct hash_test_vector_t {
+ const uint8_t *data;
+ uint32_t len;
+
+ union {
+ uint32_t u32;
+ uint8_t u8[4];
+ } result;
+
+} hash_test_vector_t;
+
+/*
+ * Test vectors 0-4 from RFC 7143.
+ */
+static const uint8_t test_data_0[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static const uint8_t test_data_1[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+
+static const uint8_t test_data_2[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
+};
+
+static const uint8_t test_data_3[] = {
+ 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
+ 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
+ 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
+ 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00
+};
+
+static const uint8_t test_data_4[] = {
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18,
+ 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+/* Various length strings. Terminating null character is not included into
+ * crc calculation. */
+static const uint8_t test_data_5[] = "abcd";
+
+static const uint8_t test_data_6[] = "abcdefgh";
+
+static const uint8_t test_data_7[] =
+ "The quick brown fox jumps over the lazy dog.";
+
+static const uint8_t test_data_8[] = "a";
+
+static const uint8_t test_data_9[] = "ab";
+
+static const uint8_t test_data_10[] = "abc";
+
+static const uint8_t test_data_11[] = "abcdefg";
+
+static const uint8_t test_data_12[] = "The five boxing wizards jump quickly.";
+
+static const uint8_t test_data_13[] = CHECK_STR;
+
+static const hash_test_vector_t crc32c_test_vector[] = {
+ { .data = test_data_0,
+ .len = sizeof(test_data_0),
+ .result.u32 = 0x8a9136aa
+ },
+ { .data = test_data_1,
+ .len = sizeof(test_data_1),
+ .result.u32 = 0x62a8ab43
+ },
+ { .data = test_data_2,
+ .len = sizeof(test_data_2),
+ .result.u32 = 0x46dd794e
+ },
+ { .data = test_data_3,
+ .len = sizeof(test_data_3),
+ .result.u32 = 0x113fdb5c
+ },
+ { .data = test_data_4,
+ .len = sizeof(test_data_4),
+ .result.u32 = 0xd9963a56
+ },
+ { .data = test_data_5,
+ .len = sizeof(test_data_5) - 1,
+ .result.u32 = 0x92c80a31
+ },
+ { .data = test_data_6,
+ .len = sizeof(test_data_6) - 1,
+ .result.u32 = 0x0a9421b7
+ },
+ { .data = test_data_7,
+ .len = sizeof(test_data_7) - 1,
+ .result.u32 = 0x190097b3
+ },
+ { .data = test_data_8,
+ .len = sizeof(test_data_8) - 1,
+ .result.u32 = 0xc1d04330
+ },
+ { .data = test_data_9,
+ .len = sizeof(test_data_9) - 1,
+ .result.u32 = 0xe2a22936
+ },
+ { .data = test_data_10,
+ .len = sizeof(test_data_10) - 1,
+ .result.u32 = 0x364b3fb7
+ },
+ { .data = test_data_11,
+ .len = sizeof(test_data_11) - 1,
+ .result.u32 = 0xe627f441
+ },
+ { .data = test_data_12,
+ .len = sizeof(test_data_12) - 1,
+ .result.u32 = 0xded3059a
+ },
+ { .data = test_data_13,
+ .len = sizeof(test_data_13) - 1,
+ .result.u32 = 0xe3069283
+ }
+};
+
+static const hash_test_vector_t crc32_test_vector[] = {
+ { .data = test_data_0,
+ .len = sizeof(test_data_0),
+ .result.u32 = 0x190a55ad
+ },
+ { .data = test_data_1,
+ .len = sizeof(test_data_1),
+ .result.u32 = 0xff6cab0b
+ },
+ { .data = test_data_2,
+ .len = sizeof(test_data_2),
+ .result.u32 = 0x91267e8a
+ },
+ { .data = test_data_3,
+ .len = sizeof(test_data_3),
+ .result.u32 = 0x9ab0ef72
+ },
+ { .data = test_data_4,
+ .len = sizeof(test_data_4),
+ .result.u32 = 0x51e17412
+ },
+ { .data = test_data_5,
+ .len = sizeof(test_data_5) - 1,
+ .result.u32 = 0xed82cd11
+ },
+ { .data = test_data_6,
+ .len = sizeof(test_data_6) - 1,
+ .result.u32 = 0xaeef2a50
+ },
+ { .data = test_data_7,
+ .len = sizeof(test_data_7) - 1,
+ .result.u32 = 0x519025e9
+ },
+ { .data = test_data_8,
+ .len = sizeof(test_data_8) - 1,
+ .result.u32 = 0xe8b7be43
+ },
+ { .data = test_data_9,
+ .len = sizeof(test_data_9) - 1,
+ .result.u32 = 0x9e83486d
+ },
+ { .data = test_data_10,
+ .len = sizeof(test_data_10) - 1,
+ .result.u32 = 0x352441c2
+ },
+ { .data = test_data_11,
+ .len = sizeof(test_data_11) - 1,
+ .result.u32 = 0x312a6aa6
+ },
+ { .data = test_data_12,
+ .len = sizeof(test_data_12) - 1,
+ .result.u32 = 0xde912acd
+ },
+ { .data = test_data_13,
+ .len = sizeof(test_data_13) - 1,
+ .result.u32 = 0xcbf43926
+ }
+};
+
+static void hash_test_crc32c(void)
+{
+ uint32_t ret, result;
+ int i;
+ int num = ODPH_ARRAY_SIZE(crc32c_test_vector);
+
+ for (i = 0; i < num; i++) {
+ ret = odp_hash_crc32c(crc32c_test_vector[i].data,
+ crc32c_test_vector[i].len,
+ CRC32C_INIT);
+
+ result = CRC32C_XOR ^ ret;
+ CU_ASSERT(result == crc32c_test_vector[i].result.u32);
+ }
+}
+
+static void hash_test_crc32(void)
+{
+ uint32_t ret, result;
+ int i;
+ int num = ODPH_ARRAY_SIZE(crc32_test_vector);
+
+ for (i = 0; i < num; i++) {
+ ret = odp_hash_crc32(crc32_test_vector[i].data,
+ crc32_test_vector[i].len,
+ CRC32_INIT);
+
+ result = CRC32_XOR ^ ret;
+ CU_ASSERT(result == crc32_test_vector[i].result.u32);
+ }
+}
+
+static void hash_test_ethernet_crc32(void)
+{
+ uint32_t ret;
+
+ ret = odp_hash_crc32(test_packet_ipv4_udp_64_crc,
+ sizeof(test_packet_ipv4_udp_64_crc), CRC32_INIT);
+ CU_ASSERT(ret == ETHCRC_CHECK_VAL);
+
+ ret = odp_hash_crc32(test_packet_ipv4_udp_68_crc,
+ sizeof(test_packet_ipv4_udp_68_crc), CRC32_INIT);
+ CU_ASSERT(ret == ETHCRC_CHECK_VAL);
+
+ ret = odp_hash_crc32(test_packet_ipv4_udp_70_crc,
+ sizeof(test_packet_ipv4_udp_70_crc), CRC32_INIT);
+ CU_ASSERT(ret == ETHCRC_CHECK_VAL);
+
+ ret = odp_hash_crc32(test_packet_ipv4_udp_71_crc,
+ sizeof(test_packet_ipv4_udp_71_crc), CRC32_INIT);
+ CU_ASSERT(ret == ETHCRC_CHECK_VAL);
+
+ ret = odp_hash_crc32(test_packet_ipv4_udp_287_crc,
+ sizeof(test_packet_ipv4_udp_287_crc), CRC32_INIT);
+ CU_ASSERT(ret == ETHCRC_CHECK_VAL);
+
+ ret = odp_hash_crc32(test_packet_ipv4_udp_400_crc,
+ sizeof(test_packet_ipv4_udp_400_crc), CRC32_INIT);
+ CU_ASSERT(ret == ETHCRC_CHECK_VAL);
+
+ ret = odp_hash_crc32(test_packet_ipv4_udp_503_crc,
+ sizeof(test_packet_ipv4_udp_503_crc), CRC32_INIT);
+ CU_ASSERT(ret == ETHCRC_CHECK_VAL);
+}
+
+static void hash_test_ethernet_crc32_odd_align(void)
+{
+ uint32_t ret, size;
+ const uint32_t max_size = sizeof(test_packet_ipv4_udp_503_crc);
+ uint8_t buf[max_size + 1] ODP_ALIGNED(8);
+
+ memset(buf, 0, sizeof(buf));
+
+ size = sizeof(test_packet_ipv4_udp_64_crc);
+ memcpy(&buf[1], test_packet_ipv4_udp_64_crc, size);
+ ret = odp_hash_crc32(&buf[1], size, CRC32_INIT);
+ CU_ASSERT(ret == ETHCRC_CHECK_VAL);
+
+ size = sizeof(test_packet_ipv4_udp_68_crc);
+ memcpy(&buf[1], test_packet_ipv4_udp_68_crc, size);
+ ret = odp_hash_crc32(&buf[1], size, CRC32_INIT);
+ CU_ASSERT(ret == ETHCRC_CHECK_VAL);
+
+ size = sizeof(test_packet_ipv4_udp_70_crc);
+ memcpy(&buf[1], test_packet_ipv4_udp_70_crc, size);
+ ret = odp_hash_crc32(&buf[1], size, CRC32_INIT);
+ CU_ASSERT(ret == ETHCRC_CHECK_VAL);
+
+ size = sizeof(test_packet_ipv4_udp_71_crc);
+ memcpy(&buf[1], test_packet_ipv4_udp_71_crc, size);
+ ret = odp_hash_crc32(&buf[1], size, CRC32_INIT);
+ CU_ASSERT(ret == ETHCRC_CHECK_VAL);
+
+ size = sizeof(test_packet_ipv4_udp_287_crc);
+ memcpy(&buf[1], test_packet_ipv4_udp_287_crc, size);
+ ret = odp_hash_crc32(&buf[1], size, CRC32_INIT);
+ CU_ASSERT(ret == ETHCRC_CHECK_VAL);
+
+ size = sizeof(test_packet_ipv4_udp_400_crc);
+ memcpy(&buf[1], test_packet_ipv4_udp_400_crc, size);
+ ret = odp_hash_crc32(&buf[1], size, CRC32_INIT);
+ CU_ASSERT(ret == ETHCRC_CHECK_VAL);
+
+ size = sizeof(test_packet_ipv4_udp_503_crc);
+ memcpy(&buf[1], test_packet_ipv4_udp_503_crc, size);
+ ret = odp_hash_crc32(&buf[1], size, CRC32_INIT);
+ CU_ASSERT(ret == ETHCRC_CHECK_VAL);
+}
+
+/*
+ * Test various commonly used 32 bit CRCs. Used CRC names, parameters and
+ * check values can be found e.g. here:
+ * http://reveng.sourceforge.net/crc-catalogue
+ */
+static void hash_test_crc32_generic(void)
+{
+ uint64_t result_u64;
+ uint32_t result_u32, result;
+ int i, num;
+ odp_hash_crc_param_t crc_param;
+
+ memset(&crc_param, 0, sizeof(odp_hash_crc_param_t));
+ crc_param.width = 32;
+ crc_param.xor_out = 0;
+
+ /* CRC-32 */
+ crc_param.poly = 0x04c11db7;
+ crc_param.reflect_in = 1;
+ crc_param.reflect_out = 1;
+ num = ODPH_ARRAY_SIZE(crc32_test_vector);
+
+ for (i = 0; i < num; i++) {
+ if (odp_hash_crc_gen64(crc32_test_vector[i].data,
+ crc32_test_vector[i].len,
+ CRC32_INIT,
+ &crc_param, &result_u64)) {
+ printf("CRC-32 not supported\n.");
+ break;
+ }
+
+ result_u32 = CRC32_XOR ^ result_u64;
+ CU_ASSERT(result_u32 == crc32_test_vector[i].result.u32);
+ }
+
+ /* CRC-32C */
+ crc_param.poly = 0x1edc6f41;
+ crc_param.reflect_in = 1;
+ crc_param.reflect_out = 1;
+ num = ODPH_ARRAY_SIZE(crc32c_test_vector);
+
+ for (i = 0; i < num; i++) {
+ if (odp_hash_crc_gen64(crc32c_test_vector[i].data,
+ crc32c_test_vector[i].len,
+ CRC32C_INIT,
+ &crc_param, &result_u64)) {
+ printf("CRC-32C not supported\n.");
+ break;
+ }
+
+ result_u32 = CRC32C_XOR ^ result_u64;
+ CU_ASSERT(result_u32 == crc32c_test_vector[i].result.u32);
+ }
+
+ /* CRC-32/AUTOSAR */
+ crc_param.poly = 0xf4acfb13;
+ crc_param.reflect_in = 1;
+ crc_param.reflect_out = 1;
+ result = 0x1697d06a;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0xffffffff,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0xffffffff ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-32/AUTOSAR not supported\n.");
+ }
+
+ /* CRC-32/BZIP2 */
+ crc_param.poly = 0x04c11db7;
+ crc_param.reflect_in = 0;
+ crc_param.reflect_out = 0;
+ result = 0xfc891918;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0xffffffff,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0xffffffff ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-32/BZIP2 not supported\n.");
+ }
+
+ /* CRC-32D */
+ crc_param.poly = 0xa833982b;
+ crc_param.reflect_in = 1;
+ crc_param.reflect_out = 1;
+ result = 0x87315576;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0xffffffff,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0xffffffff ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-32D not supported\n.");
+ }
+
+ /* CRC-32/MPEG-2 */
+ crc_param.poly = 0x04c11db7;
+ crc_param.reflect_in = 0;
+ crc_param.reflect_out = 0;
+ result = 0x0376e6e7;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0xffffffff,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-32/MPEG-2 not supported\n.");
+ }
+
+ /* CRC-32/POSIX */
+ crc_param.poly = 0x04c11db7;
+ crc_param.reflect_in = 0;
+ crc_param.reflect_out = 0;
+ result = 0x765e7680;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0x0,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0xffffffff ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-32/POSIX not supported\n.");
+ }
+
+ /* CRC-32/POSIX - with XOR parameter used */
+ crc_param.xor_out = 0xffffffff;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0x0,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-32/POSIX (with XOR) not supported\n.");
+ }
+
+ crc_param.xor_out = 0;
+
+ /* CRC-32Q */
+ crc_param.poly = 0x814141ab;
+ crc_param.reflect_in = 0;
+ crc_param.reflect_out = 0;
+ result = 0x3010bf7f;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0x0,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-32Q not supported\n.");
+ }
+
+ /* CRC-32/JAMCRC */
+ crc_param.poly = 0x04c11db7;
+ crc_param.reflect_in = 1;
+ crc_param.reflect_out = 1;
+ result = 0x340bc6d9;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0xffffffff,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-32/JAMCRC not supported\n.");
+ }
+
+ /* CRC-32/XFER */
+ crc_param.poly = 0x000000af;
+ crc_param.reflect_in = 0;
+ crc_param.reflect_out = 0;
+ result = 0xbd0be338;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0x0,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-32/XFER not supported\n.");
+ }
+}
+
+static void hash_test_crc24_generic(void)
+{
+ uint64_t result_u64;
+ uint32_t result_u32, result;
+ odp_hash_crc_param_t crc_param;
+
+ memset(&crc_param, 0, sizeof(odp_hash_crc_param_t));
+ crc_param.width = 24;
+ crc_param.xor_out = 0;
+
+ /* CRC-24 */
+ crc_param.poly = 0x864cfb;
+ crc_param.reflect_in = 0;
+ crc_param.reflect_out = 0;
+ result = 0x21cf02;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0xb704ce,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-24 not supported\n.");
+ }
+
+ /* CRC-24/FLEXRAY-A */
+ crc_param.poly = 0x5d6dcb;
+ crc_param.reflect_in = 0;
+ crc_param.reflect_out = 0;
+ result = 0x7979bd;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0xfedcba,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-24/FLEXRAY-A not supported\n.");
+ }
+
+ /* CRC-24/FLEXRAY-B */
+ result = 0x1f23b8;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0xabcdef,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-24/FLEXRAY-B not supported\n.");
+ }
+
+ /* CRC-24/INTERLAKEN */
+ crc_param.poly = 0x328b63;
+ crc_param.reflect_in = 0;
+ crc_param.reflect_out = 0;
+ result = 0xb4f3e6;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0xffffff,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0xffffff ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-24/INTERLAKEN not supported\n.");
+ }
+
+ /* CRC-24/LTE-A */
+ crc_param.poly = 0x864cfb;
+ crc_param.reflect_in = 0;
+ crc_param.reflect_out = 0;
+ result = 0xcde703;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0x0,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-24/LTE-A not supported\n.");
+ }
+
+ /* CRC-24/LTE-B */
+ crc_param.poly = 0x800063;
+ crc_param.reflect_in = 0;
+ crc_param.reflect_out = 0;
+ result = 0x23ef52;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0x0,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-24/LTE-B not supported\n.");
+ }
+
+ /* CRC-24/BLE */
+ crc_param.poly = 0x00065b;
+ crc_param.reflect_in = 1;
+ crc_param.reflect_out = 1;
+ result = 0xc25a56;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0x555555,
+ &crc_param, &result_u64) == 0) {
+ result_u32 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u32 == result);
+ } else {
+ printf("CRC-24/BLE not supported\n.");
+ }
+}
+
+static void hash_test_crc16_generic(void)
+{
+ uint64_t result_u64;
+ uint16_t result_u16, result;
+ odp_hash_crc_param_t crc_param;
+
+ memset(&crc_param, 0, sizeof(odp_hash_crc_param_t));
+ crc_param.width = 16;
+ crc_param.xor_out = 0;
+
+ /* CRC-16/ARC */
+ crc_param.poly = 0x8005;
+ crc_param.reflect_in = 1;
+ crc_param.reflect_out = 1;
+ result = 0xbb3d;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0x0,
+ &crc_param, &result_u64) == 0) {
+ result_u16 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u16 == result);
+ } else {
+ printf("CRC-16/ARC not supported\n.");
+ }
+
+ /* CRC-16/UMTS */
+ crc_param.poly = 0x8005;
+ crc_param.reflect_in = 0;
+ crc_param.reflect_out = 0;
+ result = 0xfee8;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0x0,
+ &crc_param, &result_u64) == 0) {
+ result_u16 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u16 == result);
+ } else {
+ printf("CRC-16/UMTS not supported\n.");
+ }
+
+ /* CRC-16/CDMA2000 */
+ crc_param.poly = 0xc867;
+ crc_param.reflect_in = 0;
+ crc_param.reflect_out = 0;
+ result = 0x4c06;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0xffff,
+ &crc_param, &result_u64) == 0) {
+ result_u16 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u16 == result);
+ } else {
+ printf("CRC-16/CDMA2000 not supported\n.");
+ }
+
+ /* CRC-16/GENIBUS */
+ crc_param.poly = 0x1021;
+ crc_param.reflect_in = 0;
+ crc_param.reflect_out = 0;
+ result = 0xd64e;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0xffff,
+ &crc_param, &result_u64) == 0) {
+ result_u16 = 0xffff ^ result_u64;
+ CU_ASSERT(result_u16 == result);
+ } else {
+ printf("CRC-16/GENIBUS not supported\n.");
+ }
+
+ /* CRC-16/T10-DIF */
+ crc_param.poly = 0x8bb7;
+ crc_param.reflect_in = 0;
+ crc_param.reflect_out = 0;
+ result = 0xd0db;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0x0,
+ &crc_param, &result_u64) == 0) {
+ result_u16 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u16 == result);
+ } else {
+ printf("CRC-16/T10-DIF not supported\n.");
+ }
+
+ /* CRC-16/USB */
+ crc_param.poly = 0x8005;
+ crc_param.reflect_in = 1;
+ crc_param.reflect_out = 1;
+ result = 0xb4c8;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0xffff,
+ &crc_param, &result_u64) == 0) {
+ result_u16 = 0xffff ^ result_u64;
+ CU_ASSERT(result_u16 == result);
+ } else {
+ printf("CRC-16/USB not supported\n.");
+ }
+
+ /* CRC-16/CCITT */
+ crc_param.poly = 0x1021;
+ crc_param.reflect_in = 1;
+ crc_param.reflect_out = 1;
+ result = 0x2189;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0x0,
+ &crc_param, &result_u64) == 0) {
+ result_u16 = 0x0 ^ result_u64;
+ CU_ASSERT(result_u16 == result);
+ } else {
+ printf("CRC-16/CCITT not supported\n.");
+ }
+
+ /* CRC-16/X-25 */
+ crc_param.poly = 0x1021;
+ crc_param.reflect_in = 1;
+ crc_param.reflect_out = 1;
+ result = 0x906e;
+
+ if (odp_hash_crc_gen64(CHECK_STR, CHECK_LEN, 0xffff,
+ &crc_param, &result_u64) == 0) {
+ result_u16 = 0xffff ^ result_u64;
+ CU_ASSERT(result_u16 == result);
+ } else {
+ printf("CRC-16/X25 not supported\n.");
+ }
+}
+
+odp_testinfo_t hash_suite[] = {
+ ODP_TEST_INFO(hash_test_crc32c),
+ ODP_TEST_INFO(hash_test_crc32),
+ ODP_TEST_INFO(hash_test_ethernet_crc32),
+ ODP_TEST_INFO(hash_test_ethernet_crc32_odd_align),
+ ODP_TEST_INFO(hash_test_crc32_generic),
+ ODP_TEST_INFO(hash_test_crc24_generic),
+ ODP_TEST_INFO(hash_test_crc16_generic),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t hash_suites[] = {
+ {"Hash", NULL, NULL, hash_suite},
+ ODP_SUITE_INFO_NULL
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(hash_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/api/hints/.gitignore b/test/validation/api/hints/.gitignore
new file mode 100644
index 000000000..586f429bc
--- /dev/null
+++ b/test/validation/api/hints/.gitignore
@@ -0,0 +1 @@
+hints_main
diff --git a/test/validation/api/hints/Makefile.am b/test/validation/api/hints/Makefile.am
new file mode 100644
index 000000000..bcc77f606
--- /dev/null
+++ b/test/validation/api/hints/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = hints_main
+hints_main_SOURCES = hints.c
diff --git a/test/validation/api/hints/hints.c b/test/validation/api/hints/hints.c
new file mode 100644
index 000000000..4c049f33b
--- /dev/null
+++ b/test/validation/api/hints/hints.c
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 Nokia
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+
+#include <stdint.h>
+#include <stdlib.h>
+
+ODP_NORETURN static void test_noreturn(void)
+{
+ abort();
+}
+
+int test_weak(void);
+
+ODP_WEAK_SYMBOL int test_weak(void)
+{
+ return 0;
+}
+
+ODP_COLD_CODE static int test_cold(void)
+{
+ return -1;
+}
+
+ODP_HOT_CODE static int test_hot(void)
+{
+ return 1;
+}
+
+ODP_PRINTF_FORMAT(2, 3)
+static int test_printf_format(int level ODP_UNUSED, const char *fmt ODP_UNUSED, ...)
+{
+ return 0;
+}
+
+static void test_hints(void)
+{
+ volatile int val = 1;
+
+ if (odp_unlikely(!val))
+ test_noreturn();
+
+ test_weak();
+ test_cold();
+
+ if (odp_likely(val))
+ test_hot();
+
+ test_printf_format(0, "test");
+}
+
+static void test_prefetch(void)
+{
+ const int rounds = 10;
+ uint64_t data[rounds];
+
+ for (int i = 0; i < rounds; i++)
+ odp_prefetch(&data[i]);
+
+ for (int i = 0; i < rounds; i++)
+ odp_prefetch_store(&data[i]);
+}
+
+odp_testinfo_t hints_suite[] = {
+ ODP_TEST_INFO(test_hints),
+ ODP_TEST_INFO(test_prefetch),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t align_suites[] = {
+ {"hints", NULL, NULL, hints_suite},
+ ODP_SUITE_INFO_NULL
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* Parse common options */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(align_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/api/init/.gitignore b/test/validation/api/init/.gitignore
new file mode 100644
index 000000000..f0ce98bdd
--- /dev/null
+++ b/test/validation/api/init/.gitignore
@@ -0,0 +1 @@
+init_main
diff --git a/test/validation/api/init/Makefile.am b/test/validation/api/init/Makefile.am
new file mode 100644
index 000000000..7465f683f
--- /dev/null
+++ b/test/validation/api/init/Makefile.am
@@ -0,0 +1,15 @@
+include ../Makefile.inc
+
+test_PROGRAMS = init_main
+init_main_SOURCES = init_main.c
+
+EXTRA_DIST = \
+ init_defaults.sh \
+ init_abort.sh \
+ init_log.sh \
+ init_num_thr.sh \
+ init_feature_enabled.sh \
+ init_feature_disabled.sh \
+ init_log_thread.sh \
+ init_test_param_init.sh \
+ init_test_term_abnormal.sh
diff --git a/test/validation/api/init/init_abort.sh b/test/validation/api/init/init_abort.sh
new file mode 100755
index 000000000..27796fcf9
--- /dev/null
+++ b/test/validation/api/init/init_abort.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+TEST_DIR="${TEST_DIR:-$(dirname $0)/..}/init"
+$TEST_DIR/init_main$EXEEXT 1
diff --git a/test/validation/api/init/init_defaults.sh b/test/validation/api/init/init_defaults.sh
new file mode 100755
index 000000000..2215a65a9
--- /dev/null
+++ b/test/validation/api/init/init_defaults.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+TEST_DIR="${TEST_DIR:-$(dirname $0)/..}/init"
+$TEST_DIR/init_main$EXEEXT 0
diff --git a/test/validation/api/init/init_feature_disabled.sh b/test/validation/api/init/init_feature_disabled.sh
new file mode 100755
index 000000000..e538429b6
--- /dev/null
+++ b/test/validation/api/init/init_feature_disabled.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+TEST_DIR="${TEST_DIR:-$(dirname $0)/..}/init"
+$TEST_DIR/init_main$EXEEXT 5
diff --git a/test/validation/api/init/init_feature_enabled.sh b/test/validation/api/init/init_feature_enabled.sh
new file mode 100755
index 000000000..18237cbf9
--- /dev/null
+++ b/test/validation/api/init/init_feature_enabled.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+TEST_DIR="${TEST_DIR:-$(dirname $0)/..}/init"
+$TEST_DIR/init_main$EXEEXT 4
diff --git a/test/validation/api/init/init_log.sh b/test/validation/api/init/init_log.sh
new file mode 100755
index 000000000..cf4177ed5
--- /dev/null
+++ b/test/validation/api/init/init_log.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+TEST_DIR="${TEST_DIR:-$(dirname $0)/..}/init"
+$TEST_DIR/init_main$EXEEXT 2
diff --git a/test/validation/api/init/init_log_thread.sh b/test/validation/api/init/init_log_thread.sh
new file mode 100755
index 000000000..b0bb02220
--- /dev/null
+++ b/test/validation/api/init/init_log_thread.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+TEST_DIR="${TEST_DIR:-$(dirname $0)/..}/init"
+$TEST_DIR/init_main$EXEEXT 6
diff --git a/test/validation/api/init/init_main.c b/test/validation/api/init/init_main.c
new file mode 100644
index 000000000..ab1db421b
--- /dev/null
+++ b/test/validation/api/init/init_main.c
@@ -0,0 +1,325 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2019-2024 Nokia
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include <odp_cunit_common.h>
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+/* Replacement abort function */
+static void ODP_NORETURN my_abort_func(void)
+{
+ abort();
+}
+
+/* Replacement log function */
+ODP_PRINTF_FORMAT(2, 3)
+static int my_log_func(odp_log_level_t level __attribute__((unused)),
+ const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+ r = vfprintf(stderr, fmt, args);
+ va_end(args);
+
+ return r;
+}
+
+static uint32_t my_log_thread_func_count;
+
+/* Thread specific log function */
+ODP_PRINTF_FORMAT(2, 3)
+static int my_log_thread_func(odp_log_level_t level, const char *fmt, ...)
+{
+ (void)level;
+ (void)fmt;
+
+ my_log_thread_func_count++;
+
+ return 0;
+}
+
+static void test_param_init(uint8_t fill)
+{
+ odp_init_t param;
+
+ memset(&param, fill, sizeof(param));
+ odp_init_param_init(&param);
+ CU_ASSERT(param.mem_model == ODP_MEM_MODEL_THREAD);
+ CU_ASSERT(param.shm.max_memory == 0);
+}
+
+static void init_test_param_init(void)
+{
+ test_param_init(0);
+ test_param_init(0xff);
+}
+
+static void init_test_defaults(void)
+{
+ int ret;
+ odp_instance_t instance;
+ odp_instance_t current_instance;
+ odp_init_t param;
+
+ odp_init_param_init(&param);
+
+ ret = odp_init_global(&instance, &param, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_init_local(instance, ODP_THREAD_WORKER);
+ CU_ASSERT_FATAL(ret == 0);
+
+ CU_ASSERT_FATAL(odp_instance(&current_instance) == 0);
+ CU_ASSERT(memcmp(&current_instance, &instance, sizeof(odp_instance_t)) == 0);
+
+ ret = odp_term_local();
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_term_global(instance);
+ CU_ASSERT(ret == 0);
+}
+
+static void init_test_abort(void)
+{
+ int ret;
+ odp_instance_t instance;
+ odp_init_t param;
+
+ odp_init_param_init(&param);
+ param.abort_fn = &my_abort_func;
+
+ ret = odp_init_global(&instance, &param, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_init_local(instance, ODP_THREAD_WORKER);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_term_local();
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_term_global(instance);
+ CU_ASSERT(ret == 0);
+}
+
+static void init_test_log(void)
+{
+ int ret;
+ odp_instance_t instance;
+ odp_init_t param;
+
+ odp_init_param_init(&param);
+ param.log_fn = &my_log_func;
+
+ ret = odp_init_global(&instance, &param, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_init_local(instance, ODP_THREAD_WORKER);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_term_local();
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_term_global(instance);
+ CU_ASSERT(ret == 0);
+}
+
+static void init_test_log_thread(void)
+{
+ int ret;
+ odp_instance_t instance;
+ odp_init_t param;
+
+ odp_init_param_init(&param);
+
+ ret = odp_init_global(&instance, &param, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_init_local(instance, ODP_THREAD_WORKER);
+ CU_ASSERT_FATAL(ret == 0);
+
+ /* Test that our print function is called when set. */
+ odp_log_thread_fn_set(my_log_thread_func);
+ my_log_thread_func_count = 0;
+ odp_sys_info_print();
+ CU_ASSERT(my_log_thread_func_count != 0);
+
+ /* Test that our print function is not called when not set. */
+ odp_log_thread_fn_set(NULL);
+ my_log_thread_func_count = 0;
+ odp_sys_info_print();
+ CU_ASSERT(my_log_thread_func_count == 0);
+
+ ret = odp_term_local();
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_term_global(instance);
+ CU_ASSERT(ret == 0);
+}
+
+static void init_test_num_thr(void)
+{
+ int ret;
+ odp_instance_t instance;
+ odp_init_t param;
+
+ odp_init_param_init(&param);
+ param.mem_model = ODP_MEM_MODEL_THREAD;
+ param.num_worker = 1;
+ param.num_control = 1;
+ param.worker_cpus = NULL;
+ param.control_cpus = NULL;
+
+ ret = odp_init_global(&instance, &param, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_init_local(instance, ODP_THREAD_WORKER);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_term_local();
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_term_global(instance);
+ CU_ASSERT(ret == 0);
+}
+
+static void init_test_feature(int disable)
+{
+ int ret;
+ odp_instance_t instance;
+ odp_init_t param;
+
+ odp_init_param_init(&param);
+ param.not_used.all_feat = 0;
+
+ if (disable) {
+ param.not_used.feat.cls = 1;
+ param.not_used.feat.compress = 1;
+ param.not_used.feat.crypto = 1;
+ param.not_used.feat.ipsec = 1;
+ param.not_used.feat.schedule = 1;
+ param.not_used.feat.stash = 1;
+ param.not_used.feat.time = 1;
+ param.not_used.feat.timer = 1;
+ param.not_used.feat.tm = 1;
+ }
+
+ ret = odp_init_global(&instance, &param, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_init_local(instance, ODP_THREAD_CONTROL);
+ CU_ASSERT_FATAL(ret == 0);
+
+ /* Print system and SHM information into test log. It may show
+ * e.g. memory usage difference when features are disabled. */
+ odp_sys_info_print();
+ odp_shm_print_all();
+
+ ret = odp_term_local();
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_term_global(instance);
+ CU_ASSERT(ret == 0);
+}
+
+static void init_test_feature_enabled(void)
+{
+ init_test_feature(0);
+}
+
+static void init_test_feature_disabled(void)
+{
+ init_test_feature(1);
+}
+
+static void init_test_term_abnormal(void)
+{
+ int ret;
+ odp_instance_t instance;
+
+ ret = odp_init_global(&instance, NULL, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_init_local(instance, ODP_THREAD_WORKER);
+ CU_ASSERT_FATAL(ret == 0);
+
+ /* odp_term_abnormal() is allowed to fail */
+ ret = odp_term_abnormal(instance, 0, NULL);
+
+ if (ret < 0)
+ ODPH_ERR("Failed to perform all abnormal termination actions: %d\n", ret);
+}
+
+odp_testinfo_t testinfo[] = {
+ ODP_TEST_INFO(init_test_defaults),
+ ODP_TEST_INFO(init_test_abort),
+ ODP_TEST_INFO(init_test_log),
+ ODP_TEST_INFO(init_test_num_thr),
+ ODP_TEST_INFO(init_test_feature_enabled),
+ ODP_TEST_INFO(init_test_feature_disabled),
+ ODP_TEST_INFO(init_test_log_thread),
+ ODP_TEST_INFO(init_test_param_init),
+ ODP_TEST_INFO(init_test_term_abnormal)
+};
+
+odp_testinfo_t init_suite[] = {
+ ODP_TEST_INFO_NULL,
+ ODP_TEST_INFO_NULL
+};
+
+odp_suiteinfo_t init_suites[] = {
+ {"Init", NULL, NULL, init_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+static int fill_testinfo(odp_testinfo_t *info, unsigned int test_case)
+{
+ if (test_case >= ODPH_ARRAY_SIZE(testinfo)) {
+ ODPH_ERR("Bad test case number %u\n", test_case);
+ return -1;
+ }
+
+ *info = testinfo[test_case];
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ int ret;
+ int test_id;
+
+ /* Parse common options */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ if (argc < 2) {
+ ODPH_ERR("Usage: init_main <test case number>\n");
+ return -1;
+ }
+ test_id = atoi(argv[1]);
+
+ if (fill_testinfo(&init_suite[0], test_id))
+ return -1;
+
+ /* Prevent default ODP init */
+ odp_cunit_register_global_init(NULL);
+ odp_cunit_register_global_term(NULL);
+
+ /* Register the tests */
+ ret = odp_cunit_register(init_suites);
+
+ /* Run the tests */
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/api/init/init_num_thr.sh b/test/validation/api/init/init_num_thr.sh
new file mode 100755
index 000000000..3889d2a14
--- /dev/null
+++ b/test/validation/api/init/init_num_thr.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+TEST_DIR="${TEST_DIR:-$(dirname $0)/..}/init"
+$TEST_DIR/init_main$EXEEXT 3
diff --git a/test/validation/api/init/init_test_param_init.sh b/test/validation/api/init/init_test_param_init.sh
new file mode 100755
index 000000000..afb4d17a2
--- /dev/null
+++ b/test/validation/api/init/init_test_param_init.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+TEST_DIR="${TEST_DIR:-$(dirname $0)/..}/init"
+$TEST_DIR/init_main$EXEEXT 7
diff --git a/test/validation/api/init/init_test_term_abnormal.sh b/test/validation/api/init/init_test_term_abnormal.sh
new file mode 100755
index 000000000..b3edd8391
--- /dev/null
+++ b/test/validation/api/init/init_test_term_abnormal.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+TEST_DIR="${TEST_DIR:-$(dirname $0)/..}/init"
+$TEST_DIR/init_main$EXEEXT 8
diff --git a/test/validation/api/ipsec/.gitignore b/test/validation/api/ipsec/.gitignore
new file mode 100644
index 000000000..2def047f3
--- /dev/null
+++ b/test/validation/api/ipsec/.gitignore
@@ -0,0 +1 @@
+ipsec_main
diff --git a/test/validation/api/ipsec/Makefile.am b/test/validation/api/ipsec/Makefile.am
new file mode 100644
index 000000000..51b50dd02
--- /dev/null
+++ b/test/validation/api/ipsec/Makefile.am
@@ -0,0 +1,25 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestipsec.la
+libtestipsec_la_SOURCES = \
+ test_vectors.h \
+ reass_test_vectors.h \
+ ipsec_test_in.c \
+ ipsec_test_out.c \
+ ipsec.h \
+ ipsec.c \
+ reass_test_vectors.c
+
+test_PROGRAMS = \
+ ipsec_main
+
+ipsec_main_SOURCES = \
+ ipsec_main.c
+
+PRELDADD += libtestipsec.la
+
+EXTRA_DIST = \
+ ipsec_sync.sh \
+ ipsec_async.sh \
+ ipsec_inline_in.sh \
+ ipsec_inline_out.sh
diff --git a/test/validation/api/ipsec/ipsec.c b/test/validation/api/ipsec/ipsec.c
new file mode 100644
index 000000000..5ad7bd48d
--- /dev/null
+++ b/test/validation/api/ipsec/ipsec.c
@@ -0,0 +1,1589 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2018-2022, Nokia
+ * Copyright (c) 2020-2021, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include <unistd.h>
+#include <odp/helper/odph_api.h>
+
+#include "ipsec.h"
+
+#include "test_vectors.h"
+#include "reass_test_vectors.h"
+
+#define EVENT_BUFFER_SIZE 3
+
+struct buffered_event_s {
+ odp_queue_t from;
+ odp_event_t event;
+};
+
+static struct buffered_event_s sched_ev_buffer[EVENT_BUFFER_SIZE];
+struct suite_context_s suite_context;
+static odp_ipsec_capability_t capa;
+static int sched_ev_buffer_tail;
+odp_bool_t sa_expiry_notified;
+
+#define PKT_POOL_NUM 64
+#define EVENT_WAIT_TIME ODP_TIME_SEC_IN_NS
+#define STATUS_EVENT_WAIT_TIME ODP_TIME_MSEC_IN_NS
+#define SCHED_EVENT_RETRY_COUNT 2
+
+#define PACKET_USER_PTR ((void *)0x1212fefe)
+#define IPSEC_SA_CTX ((void *)0xfefefafa)
+
+static int ipsec_config(void);
+
+static odp_pktio_t pktio_create(odp_pool_t pool)
+{
+ odp_pktio_t pktio;
+ odp_pktio_param_t pktio_param;
+ odp_pktin_queue_param_t pktin_param;
+ odp_pktio_capability_t capa;
+
+ int ret;
+
+ if (pool == ODP_POOL_INVALID)
+ return ODP_PKTIO_INVALID;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE;
+
+ pktio = odp_pktio_open("loop", pool, &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID) {
+ ret = odp_pool_destroy(pool);
+ if (ret)
+ ODPH_ERR("Unable to destroy pool\n");
+ return ODP_PKTIO_INVALID;
+ }
+
+ if (odp_pktio_capability(pktio, &capa)) {
+ ODPH_ERR("Pktio capabilities failed\n");
+ return ODP_PKTIO_INVALID;
+ }
+
+ odp_pktin_queue_param_init(&pktin_param);
+ pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+
+ if (odp_pktin_queue_config(pktio, &pktin_param)) {
+ ODPH_ERR("Pktin queue config failed\n");
+ return ODP_PKTIO_INVALID;
+ }
+
+ if (odp_pktout_queue_config(pktio, NULL)) {
+ ODPH_ERR("Pktout queue config failed\n");
+ return ODP_PKTIO_INVALID;
+ }
+
+ return pktio;
+}
+
+static int pktio_start(odp_pktio_t pktio, odp_bool_t in, odp_bool_t out)
+{
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+
+ if (ODP_PKTIO_INVALID == pktio)
+ return -1;
+
+ if (odp_pktio_capability(pktio, &capa))
+ return -1;
+ /* If inline is not supported, return here. Tests will be marked as
+ * inactive when testing for IPsec capabilities. */
+ if (in && !capa.config.inbound_ipsec)
+ return 0;
+ if (out && !capa.config.outbound_ipsec)
+ return 0;
+
+ odp_pktio_config_init(&config);
+ config.parser.layer = ODP_PROTO_LAYER_ALL;
+ config.inbound_ipsec = in;
+ config.outbound_ipsec = out;
+
+ if (odp_pktio_config(pktio, &config))
+ return -1;
+ if (odp_pktio_start(pktio))
+ return -1;
+
+ suite_context.pktio = pktio;
+
+ return 1;
+}
+
+static int sched_event_buffer_add(odp_queue_t from, odp_event_t event)
+{
+ if (sched_ev_buffer_tail + 1 == EVENT_BUFFER_SIZE)
+ return -ENOMEM;
+
+ sched_ev_buffer[sched_ev_buffer_tail].from = from;
+ sched_ev_buffer[sched_ev_buffer_tail].event = event;
+ sched_ev_buffer_tail++;
+
+ return 0;
+}
+
+static odp_event_t sched_event_buffer_get(odp_queue_t from)
+{
+ odp_event_t ev;
+ int i, j;
+
+ if (odp_queue_type(from) == ODP_QUEUE_TYPE_PLAIN)
+ return ODP_EVENT_INVALID;
+
+ /* Look for a matching entry */
+ for (i = 0; i < sched_ev_buffer_tail; i++)
+ if (sched_ev_buffer[i].from == from)
+ break;
+
+ /* Remove entry from buffer */
+ if (i != sched_ev_buffer_tail) {
+ ev = sched_ev_buffer[i].event;
+
+ for (j = 1; i + j < sched_ev_buffer_tail; j++)
+ sched_ev_buffer[i + j - 1] = sched_ev_buffer[i + j];
+
+ sched_ev_buffer_tail--;
+ } else {
+ ev = ODP_EVENT_INVALID;
+ }
+
+ return ev;
+}
+
+static odp_event_t sched_queue_deq(odp_queue_t queue, uint64_t wait_ns)
+{
+ uint64_t wait = odp_schedule_wait_time(wait_ns);
+ odp_event_t ev = ODP_EVENT_INVALID;
+ odp_queue_t from;
+ int retry = 0;
+
+ /* Check if buffered events are available */
+ ev = sched_event_buffer_get(queue);
+ if (ODP_EVENT_INVALID != ev)
+ return ev;
+
+ do {
+ ev = odp_schedule(&from, wait);
+
+ if ((ev != ODP_EVENT_INVALID) && (from != queue)) {
+ CU_ASSERT_FATAL(0 == sched_event_buffer_add(from, ev));
+ ev = ODP_EVENT_INVALID;
+ }
+ } while (ev == ODP_EVENT_INVALID && (++retry < SCHED_EVENT_RETRY_COUNT));
+
+ return ev;
+}
+
+static odp_event_t plain_queue_deq(odp_queue_t queue, uint64_t wait_ns)
+{
+ odp_time_t cur, wait, next;
+ odp_event_t event;
+
+ wait = odp_time_local_from_ns(wait_ns);
+ next = odp_time_sum(odp_time_local(), wait);
+
+ do {
+ event = odp_queue_deq(queue);
+ cur = odp_time_local();
+ } while (event == ODP_EVENT_INVALID && odp_time_cmp(next, cur) >= 0);
+
+ return event;
+}
+
+static odp_event_t recv_event(odp_queue_t queue, uint64_t wait_ns)
+{
+ odp_event_t event;
+
+ if (odp_queue_type(queue) == ODP_QUEUE_TYPE_PLAIN)
+ event = plain_queue_deq(queue, wait_ns);
+ else
+ event = sched_queue_deq(queue, wait_ns);
+
+ return event;
+}
+
+static void pktio_stop(odp_pktio_t pktio)
+{
+ odp_queue_t queue = ODP_QUEUE_INVALID;
+
+ odp_pktin_event_queue(pktio, &queue, 1);
+
+ if (odp_pktio_stop(pktio))
+ ODPH_ERR("IPsec pktio stop failed\n");
+
+ while (1) {
+ odp_event_t ev = recv_event(queue, 0);
+
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ else
+ break;
+ }
+}
+
+int ipsec_check(odp_bool_t ah,
+ odp_cipher_alg_t cipher,
+ uint32_t cipher_bits,
+ odp_auth_alg_t auth,
+ uint32_t auth_bits)
+{
+ if ((ODP_IPSEC_OP_MODE_SYNC == suite_context.inbound_op_mode &&
+ ODP_SUPPORT_NO == capa.op_mode_sync) ||
+ (ODP_IPSEC_OP_MODE_SYNC == suite_context.outbound_op_mode &&
+ ODP_SUPPORT_NO == capa.op_mode_sync) ||
+ (ODP_IPSEC_OP_MODE_ASYNC == suite_context.inbound_op_mode &&
+ ODP_SUPPORT_NO == capa.op_mode_async) ||
+ (ODP_IPSEC_OP_MODE_ASYNC == suite_context.outbound_op_mode &&
+ ODP_SUPPORT_NO == capa.op_mode_async) ||
+ (ODP_IPSEC_OP_MODE_INLINE == suite_context.inbound_op_mode &&
+ ODP_SUPPORT_NO == capa.op_mode_inline_in) ||
+ (ODP_IPSEC_OP_MODE_INLINE == suite_context.outbound_op_mode &&
+ ODP_SUPPORT_NO == capa.op_mode_inline_out))
+ return ODP_TEST_INACTIVE;
+
+ if (!(ODP_IPSEC_OP_MODE_SYNC == suite_context.inbound_op_mode &&
+ ODP_IPSEC_OP_MODE_SYNC == suite_context.outbound_op_mode) &&
+ ODP_QUEUE_INVALID != suite_context.queue) {
+ if (suite_context.q_type == ODP_QUEUE_TYPE_PLAIN &&
+ !capa.queue_type_plain)
+ return ODP_TEST_INACTIVE;
+ if (suite_context.q_type == ODP_QUEUE_TYPE_SCHED &&
+ !capa.queue_type_sched)
+ return ODP_TEST_INACTIVE;
+ }
+
+ /* suite_context.pktio is set to ODP_PKTIO_INVALID in ipsec_suite_init()
+ * if the pktio device doesn't support inline IPsec processing. */
+ if (suite_context.pktio == ODP_PKTIO_INVALID &&
+ (ODP_IPSEC_OP_MODE_INLINE == suite_context.inbound_op_mode ||
+ ODP_IPSEC_OP_MODE_INLINE == suite_context.outbound_op_mode))
+ return ODP_TEST_INACTIVE;
+
+ if (ah && (ODP_SUPPORT_NO == capa.proto_ah))
+ return ODP_TEST_INACTIVE;
+
+ if (odph_ipsec_alg_check(&capa, cipher, cipher_bits / 8, auth,
+ auth_bits / 8) < 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+int ipsec_check_ah_sha256(void)
+{
+ return ipsec_check_ah(ODP_AUTH_ALG_SHA256_HMAC, 256);
+}
+
+int ipsec_check_esp_null_sha256(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_NULL, 0,
+ ODP_AUTH_ALG_SHA256_HMAC, 256);
+}
+
+int ipsec_check_esp_aes_cbc_128_null(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_AES_CBC, 128,
+ ODP_AUTH_ALG_NULL, 0);
+}
+
+int ipsec_check_esp_aes_cbc_128_sha1(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_AES_CBC, 128,
+ ODP_AUTH_ALG_SHA1_HMAC, 160);
+}
+
+int ipsec_check_esp_aes_cbc_128_sha256(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_AES_CBC, 128,
+ ODP_AUTH_ALG_SHA256_HMAC, 256);
+}
+
+int ipsec_check_esp_aes_cbc_128_sha384(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_AES_CBC, 128,
+ ODP_AUTH_ALG_SHA384_HMAC, 384);
+}
+
+int ipsec_check_esp_aes_cbc_128_sha512(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_AES_CBC, 128,
+ ODP_AUTH_ALG_SHA512_HMAC, 512);
+}
+
+int ipsec_check_esp_aes_ctr_128_null(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_AES_CTR, 128,
+ ODP_AUTH_ALG_NULL, 0);
+}
+
+int ipsec_check_esp_aes_gcm_128(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_AES_GCM, 128,
+ ODP_AUTH_ALG_AES_GCM, 0);
+}
+
+int ipsec_check_esp_aes_gcm_256(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_AES_GCM, 256,
+ ODP_AUTH_ALG_AES_GCM, 0);
+}
+
+int ipsec_check_ah_aes_gmac_128(void)
+{
+ return ipsec_check_ah(ODP_AUTH_ALG_AES_GMAC, 128);
+}
+
+int ipsec_check_esp_null_aes_gmac_128(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_NULL, 0,
+ ODP_AUTH_ALG_AES_GMAC, 128);
+}
+
+int ipsec_check_esp_chacha20_poly1305(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_CHACHA20_POLY1305, 256,
+ ODP_AUTH_ALG_CHACHA20_POLY1305, 0);
+}
+
+int ipsec_check_test_sa_update_seq_num(void)
+{
+ if (!capa.test.sa_operations.seq_num)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+int ipsec_check_esp_aes_gcm_128_reass_ipv4(void)
+{
+ if (suite_context.reass_ipv4)
+ return ipsec_check_esp(ODP_CIPHER_ALG_AES_GCM, 128,
+ ODP_AUTH_ALG_AES_GCM, 0);
+ return ODP_TEST_INACTIVE;
+}
+
+int ipsec_check_esp_aes_gcm_128_reass_ipv6(void)
+{
+ if (suite_context.reass_ipv6)
+ return ipsec_check_esp(ODP_CIPHER_ALG_AES_GCM, 128,
+ ODP_AUTH_ALG_AES_GCM, 0);
+ return ODP_TEST_INACTIVE;
+}
+
+int ipsec_check_esp_null_aes_xcbc(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_NULL, 0,
+ ODP_AUTH_ALG_AES_XCBC_MAC, 128);
+}
+
+void ipsec_sa_param_fill(odp_ipsec_sa_param_t *param,
+ odp_ipsec_dir_t dir,
+ odp_ipsec_protocol_t proto,
+ uint32_t spi,
+ odp_ipsec_tunnel_param_t *tun,
+ odp_cipher_alg_t cipher_alg,
+ const odp_crypto_key_t *cipher_key,
+ odp_auth_alg_t auth_alg,
+ const odp_crypto_key_t *auth_key,
+ const odp_crypto_key_t *cipher_key_extra,
+ const odp_crypto_key_t *auth_key_extra)
+{
+ odp_ipsec_sa_param_init(param);
+ param->dir = dir;
+ if (dir == ODP_IPSEC_DIR_INBOUND) {
+ param->inbound.lookup_mode = ODP_IPSEC_LOOKUP_SPI;
+ if (auth_alg == ODP_AUTH_ALG_NULL)
+ param->inbound.antireplay_ws = 0;
+ else
+ param->inbound.antireplay_ws = capa.max_antireplay_ws;
+ }
+ param->proto = proto;
+
+ if (tun) {
+ param->mode = ODP_IPSEC_MODE_TUNNEL;
+ if (dir == ODP_IPSEC_DIR_OUTBOUND)
+ param->outbound.tunnel = *tun;
+ } else {
+ param->mode = ODP_IPSEC_MODE_TRANSPORT;
+ }
+
+ param->spi = spi;
+
+ param->dest_queue = suite_context.queue;
+
+ param->context = IPSEC_SA_CTX;
+
+ param->crypto.cipher_alg = cipher_alg;
+ if (cipher_key)
+ param->crypto.cipher_key = *cipher_key;
+
+ param->crypto.auth_alg = auth_alg;
+ if (auth_key)
+ param->crypto.auth_key = *auth_key;
+
+ if (cipher_key_extra)
+ param->crypto.cipher_key_extra = *cipher_key_extra;
+
+ if (auth_key_extra)
+ param->crypto.auth_key_extra = *auth_key_extra;
+
+ /*
+ * Let's use arbitrary non-zero life time values to get life time
+ * checking code paths exercised. Let's not use very small values
+ * to avoid unexpected expiration with implementations that do
+ * not have packet-accurate life time checking but may report
+ * expiration a bit early.
+ */
+ param->lifetime.soft_limit.bytes = 900 * 1000;
+ param->lifetime.hard_limit.bytes = 1000 * 1000;
+ param->lifetime.soft_limit.packets = 9000 * 1000;
+ param->lifetime.hard_limit.packets = 10000 * 1000;
+}
+
+static void ipsec_status_event_handle(odp_event_t ev_status,
+ odp_ipsec_sa_t sa,
+ enum ipsec_test_sa_expiry sa_expiry)
+{
+ int flag = 0;
+ odp_ipsec_status_t status = {
+ .id = 0,
+ .sa = ODP_IPSEC_SA_INVALID,
+ .result = 0,
+ .warn.all = 0,
+ };
+
+ CU_ASSERT_FATAL(ODP_EVENT_INVALID != ev_status);
+ CU_ASSERT_EQUAL(1, odp_event_is_valid(ev_status));
+ CU_ASSERT_EQUAL_FATAL(ODP_EVENT_IPSEC_STATUS, odp_event_type(ev_status));
+
+ /* No user area or source pool for IPsec status events */
+ CU_ASSERT(odp_event_user_area(ev_status) == NULL);
+ CU_ASSERT(odp_event_user_area_and_flag(ev_status, &flag) == NULL);
+ CU_ASSERT(flag < 0);
+
+ CU_ASSERT(odp_event_pool(ev_status) == ODP_POOL_INVALID);
+
+ CU_ASSERT_EQUAL(0, odp_ipsec_status(&status, ev_status));
+ CU_ASSERT_EQUAL(ODP_IPSEC_STATUS_WARN, status.id);
+ CU_ASSERT_EQUAL(sa, status.sa);
+ CU_ASSERT_EQUAL(0, status.result);
+
+ if (IPSEC_TEST_EXPIRY_IGNORED != sa_expiry) {
+ if (IPSEC_TEST_EXPIRY_SOFT_PKT == sa_expiry) {
+ CU_ASSERT_EQUAL(1, status.warn.soft_exp_packets);
+ sa_expiry_notified = true;
+ } else if (IPSEC_TEST_EXPIRY_SOFT_BYTE == sa_expiry) {
+ CU_ASSERT_EQUAL(1, status.warn.soft_exp_bytes);
+ sa_expiry_notified = true;
+ }
+ }
+
+ odp_event_free(ev_status);
+}
+
+void ipsec_status_event_get(odp_ipsec_sa_t sa,
+ enum ipsec_test_sa_expiry sa_expiry)
+{
+ uint64_t wait_time = (sa_expiry == IPSEC_TEST_EXPIRY_IGNORED) ? 0 : STATUS_EVENT_WAIT_TIME;
+ odp_event_t ev;
+
+ ev = recv_event(suite_context.queue, wait_time);
+ if (ODP_EVENT_INVALID != ev)
+ ipsec_status_event_handle(ev, sa, sa_expiry);
+}
+
+void ipsec_sa_destroy(odp_ipsec_sa_t sa)
+{
+ odp_event_t event;
+ odp_ipsec_status_t status;
+ int ret;
+
+ CU_ASSERT_EQUAL(IPSEC_SA_CTX, odp_ipsec_sa_context(sa));
+
+ CU_ASSERT_EQUAL(ODP_IPSEC_OK, odp_ipsec_sa_disable(sa));
+
+ if (ODP_QUEUE_INVALID != suite_context.queue) {
+ event = recv_event(suite_context.queue, EVENT_WAIT_TIME);
+
+ CU_ASSERT(odp_event_is_valid(event) == 1);
+ CU_ASSERT_EQUAL(ODP_EVENT_IPSEC_STATUS, odp_event_type(event));
+
+ ret = odp_ipsec_status(&status, event);
+ CU_ASSERT(ret == 0);
+
+ if (ret == 0) {
+ CU_ASSERT_EQUAL(ODP_IPSEC_STATUS_SA_DISABLE, status.id);
+ CU_ASSERT_EQUAL(sa, status.sa);
+ CU_ASSERT_EQUAL(0, status.result);
+ CU_ASSERT_EQUAL(0, status.warn.all);
+ }
+
+ odp_event_free(event);
+ }
+
+ CU_ASSERT_EQUAL(ODP_IPSEC_OK, odp_ipsec_sa_destroy(sa));
+}
+
+odp_packet_t ipsec_packet(const ipsec_test_packet *itp)
+{
+ odp_packet_t pkt = odp_packet_alloc(suite_context.pool, itp->len);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_PACKET_INVALID, pkt);
+ if (ODP_PACKET_INVALID == pkt)
+ return pkt;
+
+ CU_ASSERT_EQUAL(0, odp_packet_copy_from_mem(pkt, 0, itp->len,
+ itp->data));
+ if (itp->l2_offset != ODP_PACKET_OFFSET_INVALID)
+ CU_ASSERT_EQUAL(0, odp_packet_l2_offset_set(pkt,
+ itp->l2_offset));
+ if (itp->l3_offset != ODP_PACKET_OFFSET_INVALID)
+ CU_ASSERT_EQUAL(0, odp_packet_l3_offset_set(pkt,
+ itp->l3_offset));
+ if (itp->l4_offset != ODP_PACKET_OFFSET_INVALID)
+ CU_ASSERT_EQUAL(0, odp_packet_l4_offset_set(pkt,
+ itp->l4_offset));
+
+ odp_packet_user_ptr_set(pkt, PACKET_USER_PTR);
+
+ return pkt;
+}
+
+static void check_l2_header(const ipsec_test_packet *itp, odp_packet_t pkt)
+{
+ uint32_t len = odp_packet_len(pkt);
+ uint8_t data[len];
+ uint32_t l2 = odp_packet_l2_offset(pkt);
+ uint32_t l3 = odp_packet_l3_offset(pkt);
+ uint32_t hdr_len;
+
+ if (!itp)
+ return;
+
+ hdr_len = itp->l3_offset - itp->l2_offset;
+
+ CU_ASSERT_FATAL(l2 != ODP_PACKET_OFFSET_INVALID);
+ CU_ASSERT_FATAL(l3 != ODP_PACKET_OFFSET_INVALID);
+ CU_ASSERT_EQUAL(l3 - l2, hdr_len);
+ odp_packet_copy_to_mem(pkt, 0, len, data);
+ CU_ASSERT_EQUAL(0, memcmp(data + l2,
+ itp->data + itp->l2_offset,
+ hdr_len));
+}
+
+/*
+ * Compare packages ignoring everything before L3 header
+ */
+static void ipsec_check_packet(const ipsec_test_packet *itp, odp_packet_t pkt,
+ odp_bool_t is_outbound)
+{
+ uint32_t len = (ODP_PACKET_INVALID == pkt) ? 1 : odp_packet_len(pkt);
+ uint32_t l3, l4;
+ uint8_t data[len];
+ const odph_ipv4hdr_t *itp_ip;
+ odph_ipv4hdr_t *ip;
+
+ if (NULL == itp)
+ return;
+
+ l3 = odp_packet_l3_offset(pkt);
+ l4 = odp_packet_l4_offset(pkt);
+ odp_packet_copy_to_mem(pkt, 0, len, data);
+
+ if (l3 == ODP_PACKET_OFFSET_INVALID) {
+ CU_ASSERT_EQUAL(itp->l3_offset, ODP_PACKET_OFFSET_INVALID);
+ CU_ASSERT_EQUAL(l4, ODP_PACKET_OFFSET_INVALID);
+
+ return;
+ }
+
+ CU_ASSERT_EQUAL(len - l3, itp->len - itp->l3_offset);
+ if (len - l3 != itp->len - itp->l3_offset)
+ return;
+
+ CU_ASSERT_EQUAL(l4 - l3, itp->l4_offset - itp->l3_offset);
+ if (l4 - l3 != itp->l4_offset - itp->l3_offset)
+ return;
+
+ ip = (odph_ipv4hdr_t *) &data[l3];
+ itp_ip = (const odph_ipv4hdr_t *) &itp->data[itp->l3_offset];
+ if (ODPH_IPV4HDR_VER(ip->ver_ihl) == ODPH_IPV4 &&
+ is_outbound &&
+ ip->id != itp_ip->id) {
+ /*
+ * IP ID value chosen by the implementation differs
+ * from the IP value in our test vector. This requires
+ * special handling in outbound checks.
+ */
+ /*
+ * Let's change IP ID and header checksum to same values
+ * as in the test vector to facilitate packet comparison.
+ */
+ CU_ASSERT(odph_ipv4_csum_valid(pkt));
+ ip->id = itp_ip->id;
+ ip->chksum = itp_ip->chksum;
+
+ if (ip->proto == ODPH_IPPROTO_AH) {
+ /*
+ * ID field is included in the authentication so
+ * we cannot check ICV against our test vector.
+ * Check packet data before the first possible
+ * location of the AH ICV field.
+ */
+ CU_ASSERT_EQUAL(0, memcmp(data + l3,
+ itp->data + itp->l3_offset,
+ ODPH_IPV4HDR_LEN + 12));
+ return;
+ }
+ }
+
+ CU_ASSERT_EQUAL(0, memcmp(data + l3,
+ itp->data + itp->l3_offset,
+ len - l3));
+}
+
+static int send_pkts(const ipsec_test_part part[], int num_part)
+{
+ odp_packet_t pkt[num_part];
+ odp_pktout_queue_t pktout;
+ int i;
+
+ if (odp_pktout_queue(suite_context.pktio, &pktout, 1) != 1) {
+ CU_FAIL_FATAL("No pktout queue");
+ return 0;
+ }
+
+ for (i = 0; i < num_part; i++)
+ pkt[i] = ipsec_packet(part[i].pkt_in);
+
+ CU_ASSERT_EQUAL(num_part, odp_pktout_send(pktout, pkt, num_part));
+
+ return num_part;
+}
+
+/* Receive async inbound packet */
+static odp_event_t recv_pkt_async_inbound(odp_ipsec_op_status_t status)
+{
+ odp_queue_t queue;
+
+ /*
+ * In case of SA lookup failure, the event is enqueued to the default
+ * queue specified during odp_ipsec_config()
+ */
+ if (status.error.sa_lookup == 0)
+ queue = suite_context.queue;
+ else
+ queue = suite_context.default_queue;
+
+ return recv_event(queue, EVENT_WAIT_TIME);
+}
+
+/* Receive inline processed packets */
+static int recv_pkts_inline(const ipsec_test_part *part,
+ odp_packet_t *pkto)
+{
+ odp_queue_t queue = ODP_QUEUE_INVALID;
+ int i;
+
+ CU_ASSERT_EQUAL_FATAL(1, odp_pktin_event_queue(suite_context.pktio,
+ &queue, 1));
+
+ for (i = 0; i < part->num_pkt;) {
+ odp_event_t ev;
+ odp_event_subtype_t subtype;
+
+ ev = recv_event(queue, 0);
+ if (ODP_EVENT_INVALID != ev) {
+ CU_ASSERT(odp_event_is_valid(ev) == 1);
+ CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
+ odp_event_types(ev, &subtype));
+ CU_ASSERT_EQUAL(ODP_EVENT_PACKET_BASIC,
+ subtype);
+ CU_ASSERT(part->out[i].status.error.sa_lookup);
+
+ pkto[i] = odp_packet_from_event(ev);
+ CU_ASSERT_FATAL(pkto[i] != ODP_PACKET_INVALID);
+ i++;
+ continue;
+ }
+
+ ev = recv_event(suite_context.queue, 0);
+ if (ODP_EVENT_INVALID != ev) {
+ odp_packet_t pkt;
+ int num_pkts = 0;
+ odp_packet_reass_status_t reass_status;
+ odp_packet_reass_info_t reass = {0};
+ odp_packet_reass_partial_state_t reass_state;
+ odp_packet_t frags[MAX_FRAGS];
+ int j;
+
+ CU_ASSERT(odp_event_is_valid(ev) == 1);
+ CU_ASSERT_EQUAL(ODP_EVENT_PACKET, odp_event_type(ev));
+ pkt = odp_packet_from_event(ev);
+
+ CU_ASSERT(!part->out[i].status.error.sa_lookup);
+
+ reass_status = odp_packet_reass_status(pkt);
+ CU_ASSERT(reass_status == part->out[i].reass_status);
+
+ switch (reass_status) {
+ case ODP_PACKET_REASS_COMPLETE:
+ CU_ASSERT(odp_packet_reass_info(pkt, &reass) == 0);
+ CU_ASSERT(part->out[i].num_frags == reass.num_frags);
+ /* FALLTHROUGH */
+ case ODP_PACKET_REASS_NONE:
+ pkto[i] = pkt;
+ num_pkts = 1;
+ break;
+ case ODP_PACKET_REASS_INCOMPLETE:
+ reass_state.num_frags = 0;
+ CU_ASSERT(0 ==
+ odp_packet_reass_partial_state(pkt, frags, &reass_state));
+ num_pkts = reass_state.num_frags;
+
+ CU_ASSERT_FATAL(i + num_pkts <= part->num_pkt);
+ for (j = 0; j < num_pkts; j++)
+ pkto[i + j] = frags[j];
+ break;
+ default:
+ CU_FAIL("Unknown reassembly status");
+ break;
+ }
+
+ for (; num_pkts > 0; num_pkts--)
+ CU_ASSERT(odp_packet_subtype(pkto[i++]) ==
+ ODP_EVENT_PACKET_IPSEC);
+
+ continue;
+ }
+ }
+
+ return i;
+}
+
+static int ipsec_process_in(const ipsec_test_part *part,
+ odp_ipsec_sa_t sa,
+ odp_packet_t *pkto)
+{
+ odp_ipsec_in_param_t param;
+ int num_out = part->num_pkt;
+ odp_packet_t pkt;
+ int i;
+
+ memset(&param, 0, sizeof(param));
+ if (!part->flags.lookup) {
+ param.num_sa = 1;
+ param.sa = &sa;
+ } else {
+ param.num_sa = 0;
+ param.sa = NULL;
+ }
+
+ if (ODP_IPSEC_OP_MODE_SYNC == suite_context.inbound_op_mode) {
+ pkt = ipsec_packet(part->pkt_in);
+ CU_ASSERT_EQUAL(part->num_pkt, odp_ipsec_in(&pkt, 1,
+ pkto, &num_out,
+ &param));
+ CU_ASSERT_EQUAL(num_out, part->num_pkt);
+ CU_ASSERT_FATAL(*pkto != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_subtype(*pkto) == ODP_EVENT_PACKET_IPSEC);
+ } else if (ODP_IPSEC_OP_MODE_ASYNC == suite_context.inbound_op_mode) {
+ int consumed;
+
+ pkt = ipsec_packet(part->pkt_in);
+ consumed = odp_ipsec_in_enq(&pkt, 1, &param);
+ CU_ASSERT_EQUAL(1, consumed);
+ if (consumed <= 0)
+ num_out = 0;
+
+ for (i = 0; i < num_out; i++) {
+ odp_event_t event;
+ odp_event_subtype_t subtype;
+
+ event = recv_pkt_async_inbound(part->out[i].status);
+
+ CU_ASSERT(odp_event_is_valid(event) == 1);
+ CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
+ odp_event_types(event, &subtype));
+ CU_ASSERT_EQUAL(ODP_EVENT_PACKET_IPSEC, subtype);
+ pkto[i] = odp_ipsec_packet_from_event(event);
+ CU_ASSERT_FATAL(pkto[i] != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_subtype(pkto[i]) ==
+ ODP_EVENT_PACKET_IPSEC);
+ }
+ } else {
+ CU_ASSERT_EQUAL(1, send_pkts(part, 1));
+ if (part->num_pkt)
+ CU_ASSERT_EQUAL(part->num_pkt, recv_pkts_inline(part, pkto));
+ }
+
+ return num_out;
+}
+
+static int ipsec_check_sa_expiry(enum ipsec_test_sa_expiry sa_expiry,
+ odp_ipsec_packet_result_t *result)
+{
+ if (sa_expiry == IPSEC_TEST_EXPIRY_IGNORED)
+ return 0;
+
+ if (!sa_expiry_notified) {
+ if (sa_expiry == IPSEC_TEST_EXPIRY_SOFT_PKT) {
+ if (result->status.warn.soft_exp_packets)
+ sa_expiry_notified = true;
+ } else if (sa_expiry == IPSEC_TEST_EXPIRY_SOFT_BYTE) {
+ if (result->status.warn.soft_exp_bytes)
+ sa_expiry_notified = true;
+ } else if (sa_expiry == IPSEC_TEST_EXPIRY_HARD_PKT) {
+ if (result->status.error.hard_exp_packets)
+ sa_expiry_notified = true;
+
+ return -1;
+ } else if (sa_expiry == IPSEC_TEST_EXPIRY_HARD_BYTE) {
+ if (result->status.error.hard_exp_bytes)
+ sa_expiry_notified = true;
+
+ return -1;
+ }
+ } else {
+ if (sa_expiry == IPSEC_TEST_EXPIRY_HARD_PKT) {
+ CU_ASSERT(result->status.error.hard_exp_packets);
+
+ return -1;
+ } else if (sa_expiry == IPSEC_TEST_EXPIRY_HARD_BYTE) {
+ CU_ASSERT(result->status.error.hard_exp_bytes);
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int ipsec_send_out_one(const ipsec_test_part *part,
+ odp_ipsec_sa_t sa,
+ odp_packet_t *pkto)
+{
+ odp_ipsec_out_param_t param;
+ int num_out = part->num_pkt;
+ odp_packet_t pkt;
+ int i;
+
+ pkt = ipsec_packet(part->pkt_in);
+
+ memset(&param, 0, sizeof(param));
+ param.num_sa = 1;
+ param.sa = &sa;
+ param.num_opt = part->num_opt;
+ param.opt = &part->opt;
+
+ if (ODP_IPSEC_OP_MODE_SYNC == suite_context.outbound_op_mode) {
+ CU_ASSERT_EQUAL(1, odp_ipsec_out(&pkt, 1, pkto, &num_out,
+ &param));
+ CU_ASSERT_FATAL(num_out == 1);
+ CU_ASSERT_FATAL(*pkto != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_subtype(*pkto) == ODP_EVENT_PACKET_IPSEC);
+ } else if (ODP_IPSEC_OP_MODE_ASYNC == suite_context.outbound_op_mode) {
+ num_out = odp_ipsec_out_enq(&pkt, 1, &param);
+ CU_ASSERT_EQUAL(1, num_out);
+
+ num_out = (num_out == 1) ? 1 : 0;
+
+ for (i = 0; i < num_out; i++) {
+ odp_event_t event;
+ odp_event_subtype_t subtype;
+
+ event = recv_event(suite_context.queue, EVENT_WAIT_TIME);
+
+ CU_ASSERT(odp_event_is_valid(event) == 1);
+ CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
+ odp_event_types(event, &subtype));
+ CU_ASSERT_EQUAL(ODP_EVENT_PACKET_IPSEC, subtype);
+ pkto[i] = odp_ipsec_packet_from_event(event);
+ CU_ASSERT_FATAL(pkto[i] != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_subtype(pkto[i]) ==
+ ODP_EVENT_PACKET_IPSEC);
+ }
+ } else {
+ struct odp_ipsec_out_inline_param_t inline_param;
+ uint32_t hdr_len;
+ odph_ethhdr_t hdr;
+ odp_queue_t queue = ODP_QUEUE_INVALID;
+
+ if (NULL != part->out[0].pkt_res) {
+ /*
+ * Take L2 header from the expected result.
+ * This way ethertype will be correct for input
+ * processing even with IPv4-in-IPv6-tunnels etc.
+ */
+ hdr_len = part->out[0].pkt_res->l3_offset;
+ CU_ASSERT_FATAL(hdr_len <= sizeof(hdr));
+ memcpy(&hdr, part->out[0].pkt_res->data, hdr_len);
+ } else {
+ hdr_len = 14;
+ memset(&hdr, 0xff, hdr_len);
+
+ if (part->out[0].l3_type == ODP_PROTO_L3_TYPE_IPV6) {
+ hdr.type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV6);
+ } else {
+ hdr.type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
+ }
+ }
+
+ if (part->flags.inline_hdr_in_packet) {
+ /*
+ * Provide the to-be-prepended header to ODP in the
+ * the packet data. Use nonzero L2 offset for better
+ * test coverage.
+ */
+ uint32_t new_l2_offset = 100;
+ uint32_t l3_offset = odp_packet_l3_offset(pkt);
+ uint32_t new_l3_offset = new_l2_offset + hdr_len;
+ uint32_t l4_offset = odp_packet_l4_offset(pkt);
+ int ret;
+
+ ret = odp_packet_trunc_head(&pkt, l3_offset,
+ NULL, NULL);
+ CU_ASSERT_FATAL(ret >= 0);
+ ret = odp_packet_extend_head(&pkt, new_l3_offset,
+ NULL, NULL);
+ CU_ASSERT_FATAL(ret >= 0);
+ odp_packet_l2_offset_set(pkt, new_l2_offset);
+ odp_packet_l3_offset_set(pkt, new_l3_offset);
+ odp_packet_copy_from_mem(pkt, new_l2_offset, hdr_len, &hdr);
+ if (l4_offset != ODP_PACKET_OFFSET_INVALID)
+ odp_packet_l4_offset_set(pkt, new_l3_offset +
+ l4_offset - l3_offset);
+
+ inline_param.outer_hdr.ptr = NULL;
+ } else {
+ inline_param.outer_hdr.ptr = (void *)&hdr;
+ }
+
+ inline_param.pktio = suite_context.pktio;
+ inline_param.tm_queue = ODP_TM_INVALID;
+ inline_param.outer_hdr.len = hdr_len;
+
+ CU_ASSERT_EQUAL(1, odp_ipsec_out_inline(&pkt, 1, &param,
+ &inline_param));
+ CU_ASSERT_EQUAL_FATAL(1,
+ odp_pktin_event_queue(suite_context.
+ pktio,
+ &queue, 1));
+
+ for (i = 0; i < num_out;) {
+ odp_event_t ev;
+ odp_event_subtype_t subtype;
+
+ ev = recv_event(queue, 0);
+ if (ODP_EVENT_INVALID != ev) {
+ CU_ASSERT(odp_event_is_valid(ev) == 1);
+ CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
+ odp_event_types(ev, &subtype));
+ CU_ASSERT_EQUAL(ODP_EVENT_PACKET_BASIC,
+ subtype);
+ CU_ASSERT(!part->out[i].status.error.all);
+
+ pkto[i] = odp_packet_from_event(ev);
+ CU_ASSERT_FATAL(pkto[i] != ODP_PACKET_INVALID);
+
+ if (part->out[i].sa_expiry != IPSEC_TEST_EXPIRY_NONE)
+ ipsec_status_event_get(sa, part->out[i].sa_expiry);
+
+ i++;
+ continue;
+ }
+
+ ev = recv_event(suite_context.queue, 0);
+ if (ODP_EVENT_INVALID != ev) {
+ odp_event_type_t ev_type;
+
+ CU_ASSERT(odp_event_is_valid(ev) == 1);
+ ev_type = odp_event_types(ev, &subtype);
+
+ if ((ODP_EVENT_IPSEC_STATUS == ev_type) &&
+ part->out[i].sa_expiry != IPSEC_TEST_EXPIRY_NONE) {
+ ipsec_status_event_handle(ev, sa, part->out[i].sa_expiry);
+ continue;
+ }
+
+ CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
+ ev_type);
+ CU_ASSERT_EQUAL(ODP_EVENT_PACKET_IPSEC,
+ subtype);
+
+ /* In the case of SA hard expiry tests, hard expiry error bits are
+ * expected to be set. The exact error bits expected to be set based
+ * on sa_expiry is checked eventually in ipsec_check_sa_expiry()
+ * from the caller of this function.
+ */
+ if (part->out[i].sa_expiry == IPSEC_TEST_EXPIRY_NONE)
+ CU_ASSERT(part->out[i].status.error.all);
+
+ pkto[i] = odp_ipsec_packet_from_event(ev);
+ CU_ASSERT_FATAL(pkto[i] != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_subtype(pkto[i]) ==
+ ODP_EVENT_PACKET_IPSEC);
+ i++;
+ continue;
+ }
+ }
+ }
+
+ return num_out;
+}
+
+int ipsec_test_sa_update_seq_num(odp_ipsec_sa_t sa, uint32_t seq_num)
+{
+ odp_ipsec_test_sa_operation_t sa_op;
+ odp_ipsec_test_sa_param_t sa_param;
+
+ sa_op = ODP_IPSEC_TEST_SA_UPDATE_SEQ_NUM;
+ sa_param.seq_num = seq_num;
+
+ return odp_ipsec_test_sa_update(sa, sa_op, &sa_param);
+}
+
+static void ipsec_pkt_seq_num_check(odp_packet_t pkt, uint32_t seq_num)
+{
+ uint32_t l3_off = odp_packet_l3_offset(pkt);
+ uint32_t l4_off;
+ odph_ipv4hdr_t ip;
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_PACKET_OFFSET_INVALID, l3_off);
+ CU_ASSERT_EQUAL_FATAL(0, odp_packet_copy_to_mem(pkt, l3_off, sizeof(ip),
+ &ip));
+
+ if (ODPH_IPV4HDR_VER(ip.ver_ihl) == ODPH_IPV4) {
+ l4_off = l3_off + (ODPH_IPV4HDR_IHL(ip.ver_ihl) * 4);
+
+ if (ip.proto == ODPH_IPPROTO_ESP) {
+ odph_esphdr_t esp;
+
+ odp_packet_copy_to_mem(pkt, l4_off, sizeof(esp), &esp);
+ CU_ASSERT_EQUAL(odp_be_to_cpu_32(esp.seq_no), seq_num);
+ } else if (ip.proto == ODPH_IPPROTO_AH) {
+ odph_ahhdr_t ah;
+
+ odp_packet_copy_to_mem(pkt, l4_off, sizeof(ah), &ah);
+ CU_ASSERT_EQUAL(odp_be_to_cpu_32(ah.seq_no), seq_num);
+ } else {
+ CU_FAIL("Unexpected IP Proto");
+ }
+ } else {
+ CU_FAIL("Unexpected IP Version");
+ }
+}
+
+/* Verify inbound processed one part */
+static void verify_in(const ipsec_test_part *part,
+ odp_ipsec_sa_t sa,
+ odp_packet_t *pkto)
+{
+ int i;
+
+ for (i = 0; i < part->num_pkt; i++) {
+ odp_ipsec_packet_result_t result;
+ void *expected_user_ptr = PACKET_USER_PTR;
+
+ if (ODP_EVENT_PACKET_IPSEC !=
+ odp_event_subtype(odp_packet_to_event(pkto[i]))) {
+ /* Inline packet failed SA lookup */
+ CU_ASSERT_EQUAL(1, part->out[i].status.error.sa_lookup);
+ } else {
+ CU_ASSERT_EQUAL(0, odp_ipsec_result(&result, pkto[i]));
+ CU_ASSERT_EQUAL(part->out[i].status.error.all,
+ result.status.error.all);
+
+ if (part->out[i].status.error.all != 0) {
+ odp_packet_free(pkto[i]);
+ return;
+ }
+
+ if (0 == result.status.error.all)
+ CU_ASSERT_EQUAL(0,
+ odp_packet_has_error(pkto[i]));
+ CU_ASSERT_EQUAL(suite_context.inbound_op_mode ==
+ ODP_IPSEC_OP_MODE_INLINE,
+ result.flag.inline_mode);
+ CU_ASSERT_EQUAL(sa, result.sa);
+ CU_ASSERT_EQUAL(part->out[i].status.warn.all,
+ result.status.warn.all);
+ if (ODP_IPSEC_SA_INVALID != sa)
+ CU_ASSERT_EQUAL(IPSEC_SA_CTX,
+ odp_ipsec_sa_context(sa));
+ if (suite_context.inbound_op_mode != ODP_IPSEC_OP_MODE_SYNC) {
+ uint32_t len;
+
+ if (part->out[i].orig_ip_len)
+ len = part->out[i].orig_ip_len;
+ else
+ len = part->pkt_in->len - part->pkt_in->l3_offset;
+
+ CU_ASSERT(result.orig_ip_len == 0 ||
+ result.orig_ip_len == len);
+ }
+ }
+ if (part->out[i].l3_type != ODP_PROTO_L3_TYPE_NONE)
+ ipsec_check_packet(part->out[i].pkt_res, pkto[i], false);
+ if (suite_context.inbound_op_mode == ODP_IPSEC_OP_MODE_INLINE)
+ expected_user_ptr = NULL;
+ CU_ASSERT(odp_packet_user_ptr(pkto[i]) == expected_user_ptr);
+
+ if (part->out[i].pkt_res != NULL &&
+ part->out[i].l3_type != _ODP_PROTO_L3_TYPE_UNDEF)
+ CU_ASSERT_EQUAL(part->out[i].l3_type,
+ odp_packet_l3_type(pkto[i]));
+ if (part->out[i].pkt_res != NULL &&
+ part->out[i].l4_type != _ODP_PROTO_L4_TYPE_UNDEF)
+ CU_ASSERT_EQUAL(part->out[i].l4_type,
+ odp_packet_l4_type(pkto[i]));
+ odp_packet_free(pkto[i]);
+ }
+}
+
+static void parse_ip(odp_packet_t pkt)
+{
+ uint8_t *ver_ihl;
+ odp_proto_t proto = ODP_PROTO_NONE;
+ uint32_t l3 = odp_packet_l3_offset(pkt);
+
+ ver_ihl = odp_packet_l3_ptr(pkt, NULL);
+ if ((*ver_ihl >> 4) == 4)
+ proto = ODP_PROTO_IPV4;
+ else if ((*ver_ihl >> 4) == 6)
+ proto = ODP_PROTO_IPV6;
+ else
+ CU_FAIL("Invalid IP version");
+
+ odp_packet_parse_param_t param = {
+ .proto = proto,
+ .last_layer = ODP_PROTO_LAYER_L4,
+ };
+ CU_ASSERT(odp_packet_parse(pkt, l3, &param) == 0);
+}
+
+int ipsec_check_out(const ipsec_test_part *part, odp_ipsec_sa_t sa,
+ odp_packet_t *pkto)
+{
+ int i;
+ int num_out;
+
+ num_out = ipsec_send_out_one(part, sa, pkto);
+
+ for (i = 0; i < num_out; i++) {
+ odp_ipsec_packet_result_t result;
+
+ if (ODP_EVENT_PACKET_IPSEC !=
+ odp_event_subtype(odp_packet_to_event(pkto[i]))) {
+ /* Inline packet went through loop */
+ CU_ASSERT_EQUAL(0, part->out[i].status.error.all);
+ CU_ASSERT(odp_packet_user_ptr(pkto[i]) == NULL);
+ /* L2 header must match the requested one */
+ check_l2_header(part->out[i].pkt_res, pkto[i]);
+ } else {
+ /* IPsec packet */
+ CU_ASSERT_EQUAL(0, odp_ipsec_result(&result, pkto[i]));
+
+ if (part->out[i].sa_expiry != IPSEC_TEST_EXPIRY_NONE)
+ if (ipsec_check_sa_expiry(part->out[i].sa_expiry, &result) != 0)
+ return num_out;
+
+ CU_ASSERT_EQUAL(part->out[i].status.error.all,
+ result.status.error.all);
+ if (0 == result.status.error.all)
+ CU_ASSERT_EQUAL(0,
+ odp_packet_has_error(pkto[i]));
+ CU_ASSERT_EQUAL(sa, result.sa);
+ CU_ASSERT_EQUAL(IPSEC_SA_CTX,
+ odp_ipsec_sa_context(sa));
+ CU_ASSERT(odp_packet_user_ptr(pkto[i]) == PACKET_USER_PTR);
+
+ /* Parse the packet to set L4 offset and type */
+ parse_ip(pkto[i]);
+ }
+
+ if (part->flags.test_sa_seq_num)
+ ipsec_pkt_seq_num_check(pkto[i], part->out[i].seq_num);
+
+ ipsec_check_packet(part->out[i].pkt_res,
+ pkto[i],
+ true);
+
+ /*
+ * If we did not have an expected packet to compare the
+ * result packet with, we will check the l3 and l4 types
+ * against the expected ones.
+ */
+ if (part->out[i].pkt_res == NULL) {
+ if (part->out[i].l3_type != _ODP_PROTO_L3_TYPE_UNDEF)
+ CU_ASSERT(part->out[i].l3_type ==
+ odp_packet_l3_type(pkto[i]));
+ if (part->out[i].l4_type != _ODP_PROTO_L4_TYPE_UNDEF)
+ CU_ASSERT(part->out[i].l4_type ==
+ odp_packet_l4_type(pkto[i]));
+ }
+ }
+ return num_out;
+}
+
+void ipsec_check_in_one(const ipsec_test_part *part, odp_ipsec_sa_t sa)
+{
+ odp_packet_t pkto[MAX_FRAGS] = {0};
+ int num_out;
+
+ num_out = ipsec_process_in(part, sa, pkto);
+ CU_ASSERT_EQUAL(num_out, part->num_pkt);
+
+ verify_in(part, sa, pkto);
+}
+
+void ipsec_check_out_one(const ipsec_test_part *part, odp_ipsec_sa_t sa)
+{
+ int num_out = part->num_pkt;
+ odp_packet_t pkto[num_out];
+ int i;
+
+ num_out = ipsec_check_out(part, sa, pkto);
+
+ for (i = 0; i < num_out; i++)
+ odp_packet_free(pkto[i]);
+}
+
+static int ipsec_suite_init(void)
+{
+ int rc = 0;
+
+ if (suite_context.pktio != ODP_PKTIO_INVALID)
+ rc = pktio_start(suite_context.pktio,
+ suite_context.inbound_op_mode ==
+ ODP_IPSEC_OP_MODE_INLINE,
+ suite_context.outbound_op_mode ==
+ ODP_IPSEC_OP_MODE_INLINE);
+ if (rc == 0)
+ suite_context.pktio = ODP_PKTIO_INVALID;
+
+ return rc < 0 ? -1 : 0;
+}
+
+void ipsec_test_packet_from_pkt(ipsec_test_packet *test_pkt, odp_packet_t *pkt)
+{
+ CU_ASSERT_FATAL(odp_packet_len(*pkt) <= sizeof(test_pkt->data));
+
+ test_pkt->len = odp_packet_len(*pkt);
+ test_pkt->l2_offset = odp_packet_l2_offset(*pkt);
+ test_pkt->l3_offset = odp_packet_l3_offset(*pkt);
+ test_pkt->l4_offset = odp_packet_l4_offset(*pkt);
+ odp_packet_copy_to_mem(*pkt, 0, test_pkt->len, test_pkt->data);
+ odp_packet_free(*pkt);
+}
+
+int ipsec_suite_term(void)
+{
+ if (suite_context.pktio != ODP_PKTIO_INVALID)
+ pktio_stop(suite_context.pktio);
+
+ if (ODP_QUEUE_INVALID != suite_context.queue) {
+ if (odp_queue_destroy(suite_context.queue))
+ ODPH_ERR("IPsec destq destroy failed\n");
+ }
+
+ if (odp_cunit_print_inactive())
+ return -1;
+
+ return 0;
+}
+
+static odp_queue_t sched_queue_create(const char *name)
+{
+ odp_queue_param_t qparam;
+
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.prio = odp_schedule_default_prio();
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qparam.sched.group = ODP_SCHED_GROUP_ALL;
+
+ return odp_queue_create(name, &qparam);
+}
+
+static odp_queue_t plain_queue_create(const char *name)
+{
+ return odp_queue_create(name, NULL);
+}
+
+int ipsec_suite_sync_init(void)
+{
+ suite_context.queue = ODP_QUEUE_INVALID;
+
+ /* q_type doesn't matter when queue handle is invalid. */
+ suite_context.q_type = ODP_QUEUE_TYPE_PLAIN;
+
+ return ipsec_suite_init();
+}
+
+int ipsec_suite_plain_init(void)
+{
+ odp_queue_t dest_queue;
+
+ dest_queue = plain_queue_create("ipsec-out");
+ if (ODP_QUEUE_INVALID == dest_queue) {
+ ODPH_ERR("IPsec destq creation failed\n");
+ return -1;
+ }
+
+ suite_context.queue = dest_queue;
+ suite_context.q_type = ODP_QUEUE_TYPE_PLAIN;
+
+ return ipsec_suite_init();
+}
+
+int ipsec_suite_sched_init(void)
+{
+ odp_queue_t dest_queue;
+
+ dest_queue = sched_queue_create("ipsec-out");
+ if (ODP_QUEUE_INVALID == dest_queue) {
+ ODPH_ERR("IPsec destq creation failed\n");
+ return -1;
+ }
+
+ suite_context.queue = dest_queue;
+ suite_context.q_type = ODP_QUEUE_TYPE_SCHED;
+
+ return ipsec_suite_init();
+}
+
+int ipsec_init(odp_instance_t *inst)
+{
+ odp_pool_param_t params;
+ odp_pool_capability_t pool_capa;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ suite_context.reass_ipv4 = false;
+ suite_context.reass_ipv6 = false;
+ suite_context.pool = ODP_POOL_INVALID;
+ suite_context.pktio = ODP_PKTIO_INVALID;
+ suite_context.default_queue = ODP_QUEUE_INVALID;
+
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("odph_options() failed\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
+ ODPH_ERR("odp_init_global() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("odp_init_local() failed\n");
+ return -1;
+ }
+
+ if (odp_schedule_config(NULL)) {
+ ODPH_ERR("odp_schedule_config() failed\n");
+ return -1;
+ }
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ ODPH_ERR("odp_pool_capability() failed\n");
+ return -1;
+ }
+
+ odp_pool_param_init(&params);
+ params.pkt.seg_len = MAX_PKT_LEN;
+ params.pkt.len = MAX_PKT_LEN;
+ params.pkt.num = PKT_POOL_NUM;
+ params.type = ODP_POOL_PACKET;
+
+ if (pool_capa.pkt.max_seg_len &&
+ MAX_PKT_LEN > pool_capa.pkt.max_seg_len) {
+ ODPH_ERR("Warning: small packet segment length\n");
+ params.pkt.seg_len = pool_capa.pkt.max_seg_len;
+ }
+
+ if (pool_capa.pkt.max_len &&
+ MAX_PKT_LEN > pool_capa.pkt.max_len) {
+ ODPH_ERR("Pool max packet length too small\n");
+ return -1;
+ }
+
+ suite_context.pool = odp_pool_create("packet_pool", &params);
+
+ if (suite_context.pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Packet pool creation failed\n");
+ return -1;
+ }
+
+ if (suite_context.inbound_op_mode == ODP_IPSEC_OP_MODE_INLINE ||
+ suite_context.outbound_op_mode == ODP_IPSEC_OP_MODE_INLINE) {
+ suite_context.pktio = pktio_create(suite_context.pool);
+ if (suite_context.pktio == ODP_PKTIO_INVALID) {
+ ODPH_ERR("IPsec pktio creation failed\n");
+ return -1;
+ }
+ }
+
+ return ipsec_config();
+}
+
+static int ipsec_config(void)
+{
+ odp_ipsec_config_t ipsec_config;
+
+ if (odp_ipsec_capability(&capa) < 0)
+ return -1;
+
+ /* If we can not setup IPsec due to mode being unsupported, don't
+ * return an error here. It is easier (and more correct) to filter that
+ * in test checking function and just say that the test is inactive. */
+ if ((ODP_IPSEC_OP_MODE_SYNC == suite_context.inbound_op_mode &&
+ ODP_SUPPORT_NO == capa.op_mode_sync) ||
+ (ODP_IPSEC_OP_MODE_SYNC == suite_context.outbound_op_mode &&
+ ODP_SUPPORT_NO == capa.op_mode_sync) ||
+ (ODP_IPSEC_OP_MODE_ASYNC == suite_context.inbound_op_mode &&
+ ODP_SUPPORT_NO == capa.op_mode_async) ||
+ (ODP_IPSEC_OP_MODE_ASYNC == suite_context.outbound_op_mode &&
+ ODP_SUPPORT_NO == capa.op_mode_async) ||
+ (ODP_IPSEC_OP_MODE_INLINE == suite_context.inbound_op_mode &&
+ ODP_SUPPORT_NO == capa.op_mode_inline_in) ||
+ (ODP_IPSEC_OP_MODE_INLINE == suite_context.outbound_op_mode &&
+ ODP_SUPPORT_NO == capa.op_mode_inline_out))
+ return 0;
+
+ if (suite_context.inbound_op_mode == ODP_IPSEC_OP_MODE_ASYNC ||
+ suite_context.inbound_op_mode == ODP_IPSEC_OP_MODE_INLINE) {
+ if (capa.queue_type_plain)
+ suite_context.default_queue = plain_queue_create("ipsec-default");
+ else if (capa.queue_type_sched)
+ suite_context.default_queue = sched_queue_create("ipsec-default");
+
+ if (ODP_QUEUE_INVALID == suite_context.default_queue) {
+ ODPH_ERR("IPsec defaultq creation failed\n");
+ return -1;
+ }
+ }
+
+ reass_test_vectors_init();
+
+ odp_ipsec_config_init(&ipsec_config);
+ ipsec_config.max_num_sa = capa.max_num_sa;
+ ipsec_config.inbound_mode = suite_context.inbound_op_mode;
+ ipsec_config.outbound_mode = suite_context.outbound_op_mode;
+ ipsec_config.outbound.all_chksum = ~0;
+ ipsec_config.inbound.default_queue = suite_context.default_queue;
+ ipsec_config.inbound.parse_level = ODP_PROTO_LAYER_ALL;
+ ipsec_config.inbound.chksums.all_chksum = ~0;
+ ipsec_config.stats_en = true;
+
+ ipsec_config.inbound.reassembly.max_wait_time = 100 * ODP_TIME_MSEC_IN_NS;
+ if (ipsec_config.inbound.reassembly.max_wait_time > capa.reassembly.max_wait_time)
+ ipsec_config.inbound.reassembly.max_wait_time = capa.reassembly.max_wait_time;
+
+ ipsec_config.inbound.reassembly.max_num_frags = MAX_FRAGS;
+
+ if (capa.reassembly.ip) {
+ ipsec_config.inbound.reassembly.en_ipv4 = true;
+ ipsec_config.inbound.reassembly.en_ipv6 = true;
+ }
+
+ if (capa.reassembly.ipv4)
+ ipsec_config.inbound.reassembly.en_ipv4 = true;
+
+ if (capa.reassembly.ipv6)
+ ipsec_config.inbound.reassembly.en_ipv6 = true;
+
+ if (ODP_IPSEC_OP_MODE_INLINE == suite_context.inbound_op_mode &&
+ !capa.reass_inline) {
+ ipsec_config.inbound.reassembly.en_ipv4 = false;
+ ipsec_config.inbound.reassembly.en_ipv6 = false;
+ }
+
+ if (ODP_IPSEC_OP_MODE_ASYNC == suite_context.inbound_op_mode &&
+ !capa.reass_async) {
+ ipsec_config.inbound.reassembly.en_ipv4 = false;
+ ipsec_config.inbound.reassembly.en_ipv6 = false;
+ }
+
+ if (ODP_IPSEC_OP_MODE_SYNC == suite_context.inbound_op_mode) {
+ ipsec_config.inbound.reassembly.en_ipv4 = false;
+ ipsec_config.inbound.reassembly.en_ipv6 = false;
+ }
+
+ if (capa.reassembly.max_num_frags < MAX_FRAGS) {
+ ipsec_config.inbound.reassembly.en_ipv4 = false;
+ ipsec_config.inbound.reassembly.en_ipv6 = false;
+ }
+
+ if (ipsec_config.inbound.reassembly.en_ipv4)
+ suite_context.reass_ipv4 = true;
+ else
+ suite_context.reass_ipv4 = false;
+
+ if (ipsec_config.inbound.reassembly.en_ipv6)
+ suite_context.reass_ipv6 = true;
+ else
+ suite_context.reass_ipv6 = false;
+
+ if (suite_context.reass_ipv4 || suite_context.reass_ipv6) {
+ if (ODP_IPSEC_OP_MODE_INLINE == suite_context.inbound_op_mode)
+ ipsec_config.inbound.reass_inline = true;
+
+ if (ODP_IPSEC_OP_MODE_ASYNC == suite_context.inbound_op_mode) {
+ ipsec_config.inbound.reass_async = true;
+
+ /* Reassembly with ASYNC not supported */
+ suite_context.reass_ipv4 = false;
+ suite_context.reass_ipv6 = false;
+ }
+ }
+
+ if (ODP_IPSEC_OK != odp_ipsec_config(&ipsec_config))
+ return -1;
+
+ return 0;
+}
+
+int ipsec_term(odp_instance_t inst)
+{
+ odp_pool_t pool = suite_context.pool;
+ odp_queue_t default_queue = suite_context.default_queue;
+ /* suite_context.pktio is set to ODP_PKTIO_INVALID by ipsec_suite_init()
+ if inline processing is not supported. */
+ odp_pktio_t pktio = odp_pktio_lookup("loop");
+
+ if (ODP_PKTIO_INVALID != pktio) {
+ if (odp_pktio_close(pktio))
+ ODPH_ERR("IPsec pktio close failed\n");
+ }
+
+ if (ODP_QUEUE_INVALID != default_queue) {
+ if (odp_queue_destroy(default_queue))
+ ODPH_ERR("IPsec defaultq destroy failed\n");
+ }
+
+ if (ODP_POOL_INVALID != pool) {
+ if (odp_pool_destroy(pool))
+ ODPH_ERR("Packet pool destroy failed\n");
+ }
+
+ if (0 != odp_term_local()) {
+ ODPH_ERR("odp_term_local() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ ODPH_ERR("odp_term_global() failed\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/test/validation/api/ipsec/ipsec.h b/test/validation/api/ipsec/ipsec.h
new file mode 100644
index 000000000..47612e3b3
--- /dev/null
+++ b/test/validation/api/ipsec/ipsec.h
@@ -0,0 +1,164 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2020, Marvell
+ * Copyright (c) 2020, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_IPSEC_H_
+#define _ODP_TEST_IPSEC_H_
+
+#include <odp_cunit_common.h>
+
+#define IPV4ADDR(a, b, c, d) odp_cpu_to_be_32(((a) << 24) | \
+ ((b) << 16) | \
+ ((c) << 8) | \
+ ((d) << 0))
+
+/* test arrays: */
+extern odp_testinfo_t ipsec_in_suite[];
+extern odp_testinfo_t ipsec_out_suite[];
+
+int ipsec_init(odp_instance_t *inst);
+int ipsec_term(odp_instance_t inst);
+
+int ipsec_in_inline_init(void);
+int ipsec_out_inline_init(void);
+
+int ipsec_suite_sync_init(void);
+int ipsec_suite_plain_init(void);
+int ipsec_suite_sched_init(void);
+int ipsec_suite_term(void);
+
+struct suite_context_s {
+ odp_bool_t reass_ipv4;
+ odp_bool_t reass_ipv6;
+ odp_ipsec_op_mode_t inbound_op_mode;
+ odp_ipsec_op_mode_t outbound_op_mode;
+ odp_pool_t pool;
+ odp_queue_t default_queue;
+ odp_queue_t queue;
+ odp_pktio_t pktio;
+ odp_queue_type_t q_type;
+};
+
+extern struct suite_context_s suite_context;
+
+#define MAX_FRAG_LEN 1500
+#define MAX_FRAGS 4
+#define MAX_PKT_LEN (MAX_FRAG_LEN * MAX_FRAGS)
+
+typedef struct {
+ uint32_t len;
+ uint32_t l2_offset;
+ uint32_t l3_offset;
+ uint32_t l4_offset;
+ uint8_t data[MAX_PKT_LEN];
+} ipsec_test_packet;
+
+#define _ODP_PROTO_L3_TYPE_UNDEF ((odp_proto_l3_type_t)-1)
+#define _ODP_PROTO_L4_TYPE_UNDEF ((odp_proto_l4_type_t)-1)
+
+enum ipsec_test_stats {
+ IPSEC_TEST_STATS_NONE = 0,
+ IPSEC_TEST_STATS_SUCCESS,
+ IPSEC_TEST_STATS_PROTO_ERR,
+ IPSEC_TEST_STATS_AUTH_ERR,
+};
+
+enum ipsec_test_sa_expiry {
+ IPSEC_TEST_EXPIRY_NONE = 0,
+ IPSEC_TEST_EXPIRY_IGNORED,
+ IPSEC_TEST_EXPIRY_SOFT_BYTE,
+ IPSEC_TEST_EXPIRY_SOFT_PKT,
+ IPSEC_TEST_EXPIRY_HARD_BYTE,
+ IPSEC_TEST_EXPIRY_HARD_PKT,
+};
+
+typedef struct {
+ odp_bool_t lookup;
+ odp_bool_t inline_hdr_in_packet;
+ odp_bool_t test_sa_seq_num;
+} ipsec_test_part_flags_t;
+
+typedef struct {
+ ipsec_test_part_flags_t flags;
+
+ /* Input for the inbound or outbound IPsec operation */
+ const ipsec_test_packet *pkt_in;
+ int num_opt;
+ odp_ipsec_out_opt_t opt;
+
+ /* Expected output */
+ int num_pkt;
+ struct {
+ odp_ipsec_op_status_t status;
+ odp_packet_reass_status_t reass_status;
+ uint16_t num_frags;
+ const ipsec_test_packet *pkt_res;
+ odp_proto_l3_type_t l3_type;
+ odp_proto_l4_type_t l4_type;
+ uint32_t seq_num;
+ /*
+ * Expected original IP length. Non zero only when expected len
+ * differs from that of input test packet (pkt_in).
+ */
+ uint32_t orig_ip_len;
+ enum ipsec_test_sa_expiry sa_expiry;
+ } out[MAX_FRAGS];
+} ipsec_test_part;
+
+extern odp_bool_t sa_expiry_notified;
+
+void ipsec_sa_param_fill(odp_ipsec_sa_param_t *param,
+ odp_ipsec_dir_t dir,
+ odp_ipsec_protocol_t proto,
+ uint32_t spi,
+ odp_ipsec_tunnel_param_t *tun,
+ odp_cipher_alg_t cipher_alg,
+ const odp_crypto_key_t *cipher_key,
+ odp_auth_alg_t auth_alg,
+ const odp_crypto_key_t *auth_key,
+ const odp_crypto_key_t *cipher_key_extra,
+ const odp_crypto_key_t *auth_key_extra);
+
+void ipsec_sa_destroy(odp_ipsec_sa_t sa);
+odp_packet_t ipsec_packet(const ipsec_test_packet *itp);
+void ipsec_check_in_one(const ipsec_test_part *part, odp_ipsec_sa_t sa);
+int ipsec_check_out(const ipsec_test_part *part,
+ odp_ipsec_sa_t sa,
+ odp_packet_t *pkto);
+void ipsec_check_out_one(const ipsec_test_part *part, odp_ipsec_sa_t sa);
+int ipsec_test_sa_update_seq_num(odp_ipsec_sa_t sa, uint32_t seq_num);
+void ipsec_test_packet_from_pkt(ipsec_test_packet *test_pkt, odp_packet_t *pkt);
+int ipsec_check(odp_bool_t ah,
+ odp_cipher_alg_t cipher,
+ uint32_t cipher_bits,
+ odp_auth_alg_t auth,
+ uint32_t auth_bits);
+#define ipsec_check_ah(auth, auth_bits) \
+ ipsec_check(true, ODP_CIPHER_ALG_NULL, 0, auth, auth_bits)
+#define ipsec_check_esp(cipher, cipher_bits, auth, auth_bits) \
+ ipsec_check(false, cipher, cipher_bits, auth, auth_bits)
+int ipsec_check_ah_sha256(void);
+int ipsec_check_esp_null_sha256(void);
+int ipsec_check_esp_aes_cbc_128_null(void);
+int ipsec_check_esp_aes_cbc_128_sha1(void);
+int ipsec_check_esp_aes_cbc_128_sha256(void);
+int ipsec_check_esp_aes_cbc_128_sha384(void);
+int ipsec_check_esp_aes_cbc_128_sha512(void);
+int ipsec_check_esp_aes_ctr_128_null(void);
+int ipsec_check_esp_aes_gcm_128(void);
+int ipsec_check_esp_aes_gcm_256(void);
+int ipsec_check_ah_aes_gmac_128(void);
+int ipsec_check_esp_null_aes_gmac_128(void);
+int ipsec_check_esp_chacha20_poly1305(void);
+int ipsec_check_test_sa_update_seq_num(void);
+int ipsec_check_esp_aes_gcm_128_reass_ipv4(void);
+int ipsec_check_esp_aes_gcm_128_reass_ipv6(void);
+int ipsec_check_esp_null_aes_xcbc(void);
+void ipsec_status_event_get(odp_ipsec_sa_t sa,
+ enum ipsec_test_sa_expiry sa_expiry);
+
+#endif
diff --git a/test/validation/api/ipsec/ipsec_async.sh b/test/validation/api/ipsec/ipsec_async.sh
new file mode 100755
index 000000000..c12f3e70a
--- /dev/null
+++ b/test/validation/api/ipsec/ipsec_async.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+TEST_DIR="${TEST_DIR:-$(dirname $0)/..}/ipsec"
+$TEST_DIR/ipsec_main$EXEEXT async
diff --git a/test/validation/api/ipsec/ipsec_inline_in.sh b/test/validation/api/ipsec/ipsec_inline_in.sh
new file mode 100755
index 000000000..c1d1fed36
--- /dev/null
+++ b/test/validation/api/ipsec/ipsec_inline_in.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+TEST_DIR="${TEST_DIR:-$(dirname $0)/..}/ipsec"
+$TEST_DIR/ipsec_main$EXEEXT inline-in
diff --git a/test/validation/api/ipsec/ipsec_inline_out.sh b/test/validation/api/ipsec/ipsec_inline_out.sh
new file mode 100755
index 000000000..91ebccac7
--- /dev/null
+++ b/test/validation/api/ipsec/ipsec_inline_out.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+TEST_DIR="${TEST_DIR:-$(dirname $0)/..}/ipsec"
+$TEST_DIR/ipsec_main$EXEEXT inline-out
diff --git a/test/validation/api/ipsec/ipsec_main.c b/test/validation/api/ipsec/ipsec_main.c
new file mode 100644
index 000000000..2c343f063
--- /dev/null
+++ b/test/validation/api/ipsec/ipsec_main.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
+ * Copyright (c) 2024 Nokia
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include "ipsec.h"
+
+odp_suiteinfo_t ipsec_sync_suites[] = {
+ {"IPsec-in", ipsec_suite_sync_init, ipsec_suite_term, ipsec_in_suite},
+ {"IPsec-out", ipsec_suite_sync_init, ipsec_suite_term, ipsec_out_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+odp_suiteinfo_t ipsec_async_suites[] = {
+ {"IPsec-plain-in", ipsec_suite_plain_init, ipsec_suite_term, ipsec_in_suite},
+ {"IPsec-sched-in", ipsec_suite_sched_init, ipsec_suite_term, ipsec_in_suite},
+ {"IPsec-plain-out", ipsec_suite_plain_init, ipsec_suite_term, ipsec_out_suite},
+ {"IPsec-sched-out", ipsec_suite_sched_init, ipsec_suite_term, ipsec_out_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+odp_suiteinfo_t ipsec_inline_in_suites[] = {
+ {"IPsec-plain-in", ipsec_suite_plain_init, ipsec_suite_term, ipsec_in_suite},
+ {"IPsec-sched-in", ipsec_suite_sched_init, ipsec_suite_term, ipsec_in_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+odp_suiteinfo_t ipsec_inline_out_suites[] = {
+ {"IPsec-plain-out", ipsec_suite_plain_init, ipsec_suite_term, ipsec_out_suite},
+ {"IPsec-sched-out", ipsec_suite_sched_init, ipsec_suite_term, ipsec_out_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+static void usage_exit(void)
+{
+ fprintf(stderr, "Usage: ipsec_main {sync|async|inline-in|inline-out}\n");
+ exit(EXIT_FAILURE);
+}
+
+int main(int argc, char *argv[])
+{
+ char *test_mode;
+ odp_suiteinfo_t *suites = NULL;
+ int ret;
+
+ if (odp_cunit_parse_options(&argc, argv))
+ return EXIT_FAILURE;
+
+ if (argc < 2)
+ usage_exit();
+ test_mode = argv[1];
+
+ if ((!strcmp(test_mode, "sync"))) {
+ suite_context.inbound_op_mode = ODP_IPSEC_OP_MODE_SYNC;
+ suite_context.outbound_op_mode = ODP_IPSEC_OP_MODE_SYNC;
+ suites = ipsec_sync_suites;
+ } else if ((!strcmp(test_mode, "async"))) {
+ suite_context.inbound_op_mode = ODP_IPSEC_OP_MODE_ASYNC;
+ suite_context.outbound_op_mode = ODP_IPSEC_OP_MODE_ASYNC;
+ suites = ipsec_async_suites;
+ } else if ((!strcmp(test_mode, "inline-in"))) {
+ suite_context.inbound_op_mode = ODP_IPSEC_OP_MODE_INLINE;
+ suite_context.outbound_op_mode = ODP_IPSEC_OP_MODE_ASYNC;
+ suites = ipsec_inline_in_suites;
+ } else if ((!strcmp(test_mode, "inline-out"))) {
+ suite_context.inbound_op_mode = ODP_IPSEC_OP_MODE_ASYNC;
+ suite_context.outbound_op_mode = ODP_IPSEC_OP_MODE_INLINE;
+ suites = ipsec_inline_out_suites;
+ } else {
+ usage_exit();
+ }
+
+ odp_cunit_register_global_init(ipsec_init);
+ odp_cunit_register_global_term(ipsec_term);
+ ret = odp_cunit_register(suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/api/ipsec/ipsec_sync.sh b/test/validation/api/ipsec/ipsec_sync.sh
new file mode 100755
index 000000000..2653ddc78
--- /dev/null
+++ b/test/validation/api/ipsec/ipsec_sync.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+TEST_DIR="${TEST_DIR:-$(dirname $0)/..}/ipsec"
+$TEST_DIR/ipsec_main$EXEEXT sync
diff --git a/test/validation/api/ipsec/ipsec_test_in.c b/test/validation/api/ipsec/ipsec_test_in.c
new file mode 100644
index 000000000..b5251544e
--- /dev/null
+++ b/test/validation/api/ipsec/ipsec_test_in.c
@@ -0,0 +1,2369 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2020-2021, Marvell
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/helper/odph_api.h>
+
+#include "ipsec.h"
+
+#include "reass_test_vectors.h"
+#include "test_vectors.h"
+
+static void part_prep_esp(ipsec_test_part part[], int num_part, bool v6_tunnel)
+{
+ int i;
+
+ memset(part, 0, sizeof(ipsec_test_part) * num_part);
+
+ for (i = 0; i < num_part; i++) {
+ part[i].num_pkt = 1;
+
+ if (v6_tunnel)
+ part[i].out[0].l3_type = ODP_PROTO_L3_TYPE_IPV6;
+ else
+ part[i].out[0].l3_type = ODP_PROTO_L3_TYPE_IPV4;
+
+ part[i].out[0].l4_type = ODP_PROTO_L4_TYPE_ESP;
+ }
+}
+
+static void part_prep_plain(ipsec_test_part *part, int num_pkt, bool v6, bool udp)
+{
+ int i;
+
+ part->num_pkt = num_pkt;
+ for (i = 0; i < num_pkt; i++) {
+ part->out[i].l4_type = _ODP_PROTO_L4_TYPE_UNDEF;
+
+ if (v6)
+ part->out[i].l3_type = ODP_PROTO_L3_TYPE_IPV6;
+ else
+ part->out[i].l3_type = ODP_PROTO_L3_TYPE_IPV4;
+
+ if (udp)
+ part->out[i].l4_type = ODP_PROTO_L4_TYPE_UDP;
+ }
+}
+
+static void test_in_ipv4_ah_sha256(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_ah_sha256_tun_ipv4(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_ah_tun_ipv4_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_ah_sha256_tun_ipv6(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_ah_tun_ipv6_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_ah_sha256_tun_ipv4_notun(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_ah_tun_ipv4_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ /* It is L4_TYPE_IPV4 */
+ .l4_type = _ODP_PROTO_L4_TYPE_UNDEF,
+ .pkt_res = &pkt_ipv4_icmp_0_ipip },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_null_sha256(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_aes_cbc_null(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_NULL, NULL,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_null_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_aes_cbc_sha1(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA1_HMAC, &key_5a_160,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_sha1_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_aes_cbc_sha256(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_aes_cbc_sha384(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA384_HMAC, &key_5a_384,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_sha384_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_aes_cbc_sha512(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA512_HMAC, &key_5a_512,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_sha512_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_aes_ctr_null(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_AES_CTR, &key_a5_128,
+ ODP_AUTH_ALG_NULL, NULL,
+ &key_mcgrew_gcm_salt_3, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_aes_ctr_null_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_ah_sha256_lookup(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1,
+ .flags = {
+ .lookup = 1,
+ },
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_null_sha256_lookup(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1,
+ .flags = {
+ .lookup = 1,
+ },
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_null_sha256_tun_ipv4(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_tun_ipv4_null_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_null_sha256_tun_ipv6(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_tun_ipv6_null_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_udp_null_sha256(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+ param.opt.udp_encap = 1;
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_udp_null_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_udp_null_sha256_lookup(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+ param.opt.udp_encap = 1;
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_udp_null_sha256_1,
+ .flags = {
+ .lookup = 1,
+ },
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_ah_sha256_noreplay(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+ param.inbound.antireplay_ws = 0;
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_test_part test_1235 = {
+ .pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1235,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+ ipsec_check_in_one(&test, sa);
+ ipsec_check_in_one(&test_1235, sa);
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_ah_sha256_replay(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ ipsec_test_part test_repl;
+
+ memset(&test_repl, 0, sizeof(ipsec_test_part));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+ param.inbound.antireplay_ws = 32;
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ test_repl.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1;
+ test_repl.num_pkt = 1;
+ test_repl.out[0].status.error.antireplay = 1;
+
+ ipsec_test_part test_1235 = {
+ .pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1235,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+ ipsec_check_in_one(&test_repl, sa);
+ ipsec_check_in_one(&test_1235, sa);
+ ipsec_check_in_one(&test_repl, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_null_sha256_noreplay(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+ param.inbound.antireplay_ws = 0;
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_test_part test_1235 = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1235,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+ ipsec_check_in_one(&test, sa);
+ ipsec_check_in_one(&test_1235, sa);
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_null_sha256_replay(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ ipsec_test_part test_repl;
+
+ memset(&test_repl, 0, sizeof(ipsec_test_part));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+ param.inbound.antireplay_ws = 32;
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ test_repl.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1;
+ test_repl.num_pkt = 1;
+ test_repl.out[0].status.error.antireplay = 1;
+
+ ipsec_test_part test_1235 = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1235,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+ ipsec_check_in_one(&test_repl, sa);
+ ipsec_check_in_one(&test_1235, sa);
+ ipsec_check_in_one(&test_repl, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_ah_esp_pkt(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ ipsec_test_part test;
+
+ memset(&test, 0, sizeof(ipsec_test_part));
+
+ /* This test will not work properly inbound inline mode.
+ * test_in_ipv4_ah_esp_pkt_lookup will be used instead. */
+ if (suite_context.inbound_op_mode == ODP_IPSEC_OP_MODE_INLINE)
+ return;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ test.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1;
+ test.num_pkt = 1;
+ test.out[0].status.error.proto = 1;
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_ah_pkt(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ ipsec_test_part test;
+
+ memset(&test, 0, sizeof(ipsec_test_part));
+
+ /* This test will not work properly inbound inline mode.
+ * test_in_ipv4_esp_ah_pkt_lookup will be used instead. */
+ if (suite_context.inbound_op_mode == ODP_IPSEC_OP_MODE_INLINE)
+ return;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ test.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1;
+ test.num_pkt = 1;
+ test.out[0].status.error.proto = 1;
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_ah_esp_pkt_lookup(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ ipsec_test_part test;
+
+ memset(&test, 0, sizeof(ipsec_test_part));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ test.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1;
+ test.flags.lookup = 1;
+ test.num_pkt = 1;
+ test.out[0].status.error.sa_lookup = 1;
+
+ ipsec_check_in_one(&test, ODP_IPSEC_SA_INVALID);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_ah_pkt_lookup(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ ipsec_test_part test;
+
+ memset(&test, 0, sizeof(ipsec_test_part));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ test.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1;
+ test.flags.lookup = 1;
+ test.num_pkt = 1;
+ test.out[0].status.error.sa_lookup = 1;
+
+ ipsec_check_in_one(&test, ODP_IPSEC_SA_INVALID);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_ah_sha256_bad1(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ ipsec_test_part test;
+
+ memset(&test, 0, sizeof(ipsec_test_part));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ test.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1_bad1;
+ test.num_pkt = 1;
+ test.out[0].status.error.auth = 1;
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_ah_sha256_bad2(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ ipsec_test_part test;
+
+ memset(&test, 0, sizeof(ipsec_test_part));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ test.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1_bad2;
+ test.num_pkt = 1;
+ test.out[0].status.error.auth = 1;
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_null_sha256_bad1(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ ipsec_test_part test;
+
+ memset(&test, 0, sizeof(ipsec_test_part));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ test.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1_bad1;
+ test.num_pkt = 1;
+ test.out[0].status.error.auth = 1;
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_rfc3602_5_esp(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 0x4321, NULL,
+ ODP_CIPHER_ALG_AES_CBC, &key_rfc3602,
+ ODP_AUTH_ALG_NULL, NULL,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_rfc3602_5_esp,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_rfc3602_5 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_rfc3602_6_esp(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 0x4321, NULL,
+ ODP_CIPHER_ALG_AES_CBC, &key_rfc3602,
+ ODP_AUTH_ALG_NULL, NULL,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_rfc3602_6_esp,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_rfc3602_6 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_rfc3602_7_esp(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 0x8765, &tunnel,
+ ODP_CIPHER_ALG_AES_CBC, &key_rfc3602_2,
+ ODP_AUTH_ALG_NULL, NULL,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_rfc3602_7_esp,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_rfc3602_7 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_rfc3602_8_esp(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 0x8765, &tunnel,
+ ODP_CIPHER_ALG_AES_CBC, &key_rfc3602_2,
+ ODP_AUTH_ALG_NULL, NULL,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_rfc3602_8_esp,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_rfc3602_8 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_mcgrew_gcm_2_esp(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 0xa5f8, &tunnel,
+ ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_2,
+ ODP_AUTH_ALG_AES_GCM, NULL,
+ &key_mcgrew_gcm_salt_2, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_mcgrew_gcm_test_2_esp,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_UDP,
+ .pkt_res = &pkt_mcgrew_gcm_test_2},
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_mcgrew_gcm_3_esp(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 0x4a2cbfe3, &tunnel,
+ ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_3,
+ ODP_AUTH_ALG_AES_GCM, NULL,
+ &key_mcgrew_gcm_salt_3, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_mcgrew_gcm_test_3_esp,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = _ODP_PROTO_L4_TYPE_UNDEF,
+ .pkt_res = &pkt_mcgrew_gcm_test_3},
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_mcgrew_gcm_4_esp(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 0x00000000, &tunnel,
+ ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_4,
+ ODP_AUTH_ALG_AES_GCM, NULL,
+ &key_mcgrew_gcm_salt_4, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_mcgrew_gcm_test_4_esp,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_mcgrew_gcm_test_4},
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_mcgrew_gcm_12_esp(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ /* This test will not work properly inbound inline mode.
+ * Packet might be dropped and we will not check for that. */
+ if (suite_context.inbound_op_mode == ODP_IPSEC_OP_MODE_INLINE)
+ return;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 0x335467ae, &tunnel,
+ ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_12,
+ ODP_AUTH_ALG_AES_GCM, NULL,
+ &key_mcgrew_gcm_salt_12, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_mcgrew_gcm_test_12_esp,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_NONE,
+ .l4_type = _ODP_PROTO_L4_TYPE_UNDEF,
+ .pkt_res = &pkt_mcgrew_gcm_test_12},
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_mcgrew_gcm_12_esp_notun(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 0x335467ae, NULL,
+ ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_12,
+ ODP_AUTH_ALG_AES_GCM, NULL,
+ &key_mcgrew_gcm_salt_12, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_mcgrew_gcm_test_12_esp,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_NO_NEXT,
+ .pkt_res = &pkt_mcgrew_gcm_test_12_notun },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_mcgrew_gcm_15_esp(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 0x00004321, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_AES_GMAC, &key_mcgrew_gcm_15,
+ NULL, &key_mcgrew_gcm_salt_15);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_mcgrew_gcm_test_15_esp,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_mcgrew_gcm_test_15},
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_rfc7634_chacha(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 0x01020304, &tunnel,
+ ODP_CIPHER_ALG_CHACHA20_POLY1305, &key_rfc7634,
+ ODP_AUTH_ALG_CHACHA20_POLY1305, NULL,
+ &key_rfc7634_salt, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_rfc7634_esp,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_rfc7634},
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_ah_aes_gmac_128(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_AES_GMAC, &key_a5_128,
+ NULL, &key_mcgrew_gcm_salt_2);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_ah_aes_gmac_128_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_null_aes_gmac_128(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_AES_GMAC, &key_a5_128,
+ NULL, &key_mcgrew_gcm_salt_2);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_null_aes_gmac_128_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv6_ah_sha256(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0_ah_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV6,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
+ .pkt_res = &pkt_ipv6_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv6_ah_sha256_tun_ipv4(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0_ah_tun_ipv4_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV6,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
+ .pkt_res = &pkt_ipv6_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv6_ah_sha256_tun_ipv6(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_AH, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0_ah_tun_ipv6_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV6,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
+ .pkt_res = &pkt_ipv6_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv6_esp_null_sha256(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0_esp_null_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV6,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
+ .pkt_res = &pkt_ipv6_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv6_esp_null_sha256_tun_ipv4(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0_esp_tun_ipv4_null_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV6,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
+ .pkt_res = &pkt_ipv6_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv6_esp_null_sha256_tun_ipv6(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0_esp_tun_ipv6_null_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV6,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
+ .pkt_res = &pkt_ipv6_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv6_esp_udp_null_sha256(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+ param.opt.udp_encap = 1;
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0_esp_udp_null_sha256_1,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV6,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
+ .pkt_res = &pkt_ipv6_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv6_esp_udp_null_sha256_lookup(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+ param.opt.udp_encap = 1;
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0_esp_udp_null_sha256_1,
+ .flags = {
+ .lookup = 1,
+ },
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV6,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
+ .pkt_res = &pkt_ipv6_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_ipsec_print(void)
+{
+ odp_ipsec_print();
+}
+
+static void test_ipsec_sa_print(void)
+{
+ odp_ipsec_sa_param_t param_in;
+ odp_ipsec_sa_t in_sa;
+
+ ipsec_sa_param_fill(&param_in,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA1_HMAC, &key_5a_160,
+ NULL, NULL);
+
+ in_sa = odp_ipsec_sa_create(&param_in);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, in_sa);
+
+ odp_ipsec_sa_print(in_sa);
+
+ ipsec_sa_destroy(in_sa);
+}
+
+static void test_multi_out_in(odp_ipsec_sa_t out_sa,
+ odp_ipsec_sa_t in_sa,
+ uint8_t tunnel_ip_ver,
+ int num_input_packets,
+ ipsec_test_packet *input_packets[],
+ ipsec_test_packet *result_packet,
+ odp_packet_reass_status_t reass_status)
+{
+ uint8_t ver_ihl = result_packet->data[result_packet->l3_offset];
+ odp_bool_t is_result_ipv6 = (ODPH_IPV4HDR_VER(ver_ihl) == ODPH_IPV6);
+ uint32_t orig_ip_len = 0;
+ int i;
+
+ for (i = 0; i < num_input_packets; i++) {
+ ipsec_test_part test_out;
+ ipsec_test_part test_in;
+ ipsec_test_packet test_pkt;
+ odp_packet_t pkt = ODP_PACKET_INVALID;
+ uint32_t l3_off, pkt_len;
+
+ /*
+ * Convert plain text packet to IPsec packet through
+ * outbound IPsec processing.
+ */
+ part_prep_esp(&test_out, 1, tunnel_ip_ver == ODPH_IPV6);
+ test_out.pkt_in = input_packets[i];
+ CU_ASSERT_EQUAL(ipsec_check_out(&test_out, out_sa, &pkt), 1);
+
+ /*
+ * Perform inbound IPsec processing for the IPsec packet.
+ * Expect result packet only for the last packet.
+ */
+ memset(&test_in, 0, sizeof(test_in));
+
+ /*
+ * In case of complete reassembly, the original IP length is the
+ * sum of IP lengths of the ESP packets that contained the
+ * individual fragments.
+ */
+ if (reass_status == ODP_PACKET_REASS_COMPLETE) {
+ pkt_len = odp_packet_len(pkt);
+ l3_off = odp_packet_l3_offset(pkt);
+ CU_ASSERT(ODP_PACKET_OFFSET_INVALID != l3_off)
+
+ orig_ip_len += pkt_len - l3_off;
+ }
+
+ if (i == num_input_packets - 1) {
+ part_prep_plain(&test_in, 1, is_result_ipv6, true);
+ test_in.out[0].pkt_res = result_packet;
+ test_in.out[0].reass_status = reass_status;
+ test_in.out[0].num_frags = num_input_packets;
+ test_in.out[0].orig_ip_len = orig_ip_len;
+ }
+ ipsec_test_packet_from_pkt(&test_pkt, &pkt);
+ test_in.pkt_in = &test_pkt;
+
+ ipsec_check_in_one(&test_in, in_sa);
+ }
+}
+
+static void test_in_ipv4_esp_reass_success_two_frags(odp_ipsec_sa_t out_sa,
+ odp_ipsec_sa_t in_sa)
+{
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv4_udp_p1_f1,
+ &pkt_ipv4_udp_p1_f2,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv4_udp_p1;
+
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV4,
+ ODPH_ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet,
+ ODP_PACKET_REASS_COMPLETE);
+}
+
+static void test_in_ipv4_esp_reass_success_four_frags(odp_ipsec_sa_t out_sa,
+ odp_ipsec_sa_t in_sa)
+{
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv4_udp_p2_f1,
+ &pkt_ipv4_udp_p2_f2,
+ &pkt_ipv4_udp_p2_f3,
+ &pkt_ipv4_udp_p2_f4,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv4_udp_p2;
+
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV4,
+ ODPH_ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet,
+ ODP_PACKET_REASS_COMPLETE);
+}
+
+static void test_in_ipv4_esp_reass_success_two_frags_ooo(odp_ipsec_sa_t out_sa,
+ odp_ipsec_sa_t in_sa)
+{
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv4_udp_p1_f2,
+ &pkt_ipv4_udp_p1_f1,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv4_udp_p1;
+
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV4,
+ ODPH_ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet,
+ ODP_PACKET_REASS_COMPLETE);
+}
+
+static void test_in_ipv4_esp_reass_success_four_frags_ooo(odp_ipsec_sa_t out_sa,
+ odp_ipsec_sa_t in_sa)
+{
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv4_udp_p2_f4,
+ &pkt_ipv4_udp_p2_f1,
+ &pkt_ipv4_udp_p2_f2,
+ &pkt_ipv4_udp_p2_f3,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv4_udp_p2;
+
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV4,
+ ODPH_ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet,
+ ODP_PACKET_REASS_COMPLETE);
+}
+
+static void test_in_ipv4_esp_reass_incomp_missing(odp_ipsec_sa_t out_sa,
+ odp_ipsec_sa_t in_sa)
+{
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv4_udp_p1_f1,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv4_udp_p1_f1;
+
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV4,
+ ODPH_ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet,
+ ODP_PACKET_REASS_INCOMPLETE);
+}
+
+static void test_in_ipv4_esp_reass_success(void)
+{
+ odp_ipsec_tunnel_param_t in_tunnel, out_tunnel;
+ odp_ipsec_sa_param_t param_in, param_out;
+ uint32_t src = IPV4ADDR(10, 0, 11, 2);
+ uint32_t dst = IPV4ADDR(10, 0, 22, 2);
+ odp_ipsec_sa_t out_sa, in_sa;
+
+ memset(&in_tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ memset(&out_tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ out_tunnel.type = ODP_IPSEC_TUNNEL_IPV4;
+ out_tunnel.ipv4.src_addr = &src;
+ out_tunnel.ipv4.dst_addr = &dst;
+
+ ipsec_sa_param_fill(&param_out,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP,
+ 0x4a2cbfe7, &out_tunnel,
+ ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_4,
+ ODP_AUTH_ALG_AES_GCM, NULL,
+ &key_mcgrew_gcm_salt_4, NULL);
+
+ ipsec_sa_param_fill(&param_in,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 0x4a2cbfe7, &in_tunnel,
+ ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_4,
+ ODP_AUTH_ALG_AES_GCM, NULL,
+ &key_mcgrew_gcm_salt_4, NULL);
+
+ param_in.inbound.reassembly_en = 1;
+
+ out_sa = odp_ipsec_sa_create(&param_out);
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, out_sa);
+
+ in_sa = odp_ipsec_sa_create(&param_in);
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, in_sa);
+
+ printf("\n IPv4 two frags");
+ test_in_ipv4_esp_reass_success_two_frags(out_sa, in_sa);
+
+ printf("\n IPv4 four frags");
+ test_in_ipv4_esp_reass_success_four_frags(out_sa, in_sa);
+
+ printf("\n IPv4 two frags out of order");
+ test_in_ipv4_esp_reass_success_two_frags_ooo(out_sa, in_sa);
+
+ printf("\n IPv4 four frags out of order");
+ test_in_ipv4_esp_reass_success_four_frags_ooo(out_sa, in_sa);
+
+ printf("\n");
+
+ ipsec_sa_destroy(in_sa);
+ ipsec_sa_destroy(out_sa);
+}
+
+static void test_in_ipv4_esp_reass_incomp(void)
+{
+ odp_ipsec_tunnel_param_t in_tunnel, out_tunnel;
+ odp_ipsec_sa_param_t param_in, param_out;
+ uint32_t src = IPV4ADDR(10, 0, 11, 2);
+ uint32_t dst = IPV4ADDR(10, 0, 22, 2);
+ odp_ipsec_sa_t out_sa, in_sa;
+
+ memset(&in_tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ memset(&out_tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ out_tunnel.type = ODP_IPSEC_TUNNEL_IPV4;
+ out_tunnel.ipv4.src_addr = &src;
+ out_tunnel.ipv4.dst_addr = &dst;
+
+ ipsec_sa_param_fill(&param_out,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP,
+ 0x4a2cbfe7, &out_tunnel,
+ ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_4,
+ ODP_AUTH_ALG_AES_GCM, NULL,
+ &key_mcgrew_gcm_salt_4, NULL);
+
+ ipsec_sa_param_fill(&param_in,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 0x4a2cbfe7, &in_tunnel,
+ ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_4,
+ ODP_AUTH_ALG_AES_GCM, NULL,
+ &key_mcgrew_gcm_salt_4, NULL);
+
+ param_in.inbound.reassembly_en = 1;
+
+ out_sa = odp_ipsec_sa_create(&param_out);
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, out_sa);
+
+ in_sa = odp_ipsec_sa_create(&param_in);
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, in_sa);
+
+ printf("\n IPv4 missing frag");
+ test_in_ipv4_esp_reass_incomp_missing(out_sa, in_sa);
+
+ printf("\n");
+
+ ipsec_sa_destroy(in_sa);
+ ipsec_sa_destroy(out_sa);
+}
+
+static void test_in_ipv6_esp_reass_success_two_frags(odp_ipsec_sa_t out_sa,
+ odp_ipsec_sa_t in_sa)
+{
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv6_udp_p1_f1,
+ &pkt_ipv6_udp_p1_f2,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv6_udp_p1;
+
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV6,
+ ODPH_ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet,
+ ODP_PACKET_REASS_COMPLETE);
+}
+
+static void test_in_ipv6_esp_reass_success_four_frags(odp_ipsec_sa_t out_sa,
+ odp_ipsec_sa_t in_sa)
+{
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv6_udp_p2_f1,
+ &pkt_ipv6_udp_p2_f2,
+ &pkt_ipv6_udp_p2_f3,
+ &pkt_ipv6_udp_p2_f4,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv6_udp_p2;
+
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV6,
+ ODPH_ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet,
+ ODP_PACKET_REASS_COMPLETE);
+}
+
+static void test_in_ipv6_esp_reass_success_two_frags_ooo(odp_ipsec_sa_t out_sa,
+ odp_ipsec_sa_t in_sa)
+{
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv6_udp_p1_f2,
+ &pkt_ipv6_udp_p1_f1,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv6_udp_p1;
+
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV6,
+ ODPH_ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet,
+ ODP_PACKET_REASS_COMPLETE);
+}
+
+static void test_in_ipv6_esp_reass_success_four_frags_ooo(odp_ipsec_sa_t out_sa,
+ odp_ipsec_sa_t in_sa)
+{
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv6_udp_p2_f2,
+ &pkt_ipv6_udp_p2_f3,
+ &pkt_ipv6_udp_p2_f4,
+ &pkt_ipv6_udp_p2_f1,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv6_udp_p2;
+
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV6,
+ ODPH_ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet,
+ ODP_PACKET_REASS_COMPLETE);
+}
+
+static void test_in_ipv6_esp_reass_incomp_missing(odp_ipsec_sa_t out_sa,
+ odp_ipsec_sa_t in_sa)
+{
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv6_udp_p1_f1,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv6_udp_p1_f1;
+
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV6,
+ ODPH_ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet,
+ ODP_PACKET_REASS_INCOMPLETE);
+}
+
+static void test_in_ipv6_esp_reass_success(void)
+{
+ odp_ipsec_tunnel_param_t in_tunnel, out_tunnel;
+ odp_ipsec_sa_param_t param_in, param_out;
+ odp_ipsec_sa_t out_sa, in_sa;
+ uint8_t src[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ };
+ uint8_t dst[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+ };
+
+ memset(&in_tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ memset(&out_tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ out_tunnel.type = ODP_IPSEC_TUNNEL_IPV6;
+ out_tunnel.ipv6.src_addr = &src;
+ out_tunnel.ipv6.dst_addr = &dst;
+ out_tunnel.ipv6.hlimit = 64;
+
+ ipsec_sa_param_fill(&param_out,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP,
+ 0x4a2cbfe7, &out_tunnel,
+ ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_4,
+ ODP_AUTH_ALG_AES_GCM, NULL,
+ &key_mcgrew_gcm_salt_4, NULL);
+
+ ipsec_sa_param_fill(&param_in,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 0x4a2cbfe7, &in_tunnel,
+ ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_4,
+ ODP_AUTH_ALG_AES_GCM, NULL,
+ &key_mcgrew_gcm_salt_4, NULL);
+ param_in.inbound.reassembly_en = 1;
+
+ out_sa = odp_ipsec_sa_create(&param_out);
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, out_sa);
+
+ in_sa = odp_ipsec_sa_create(&param_in);
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, in_sa);
+
+ printf("\n IPv6 two frags");
+ test_in_ipv6_esp_reass_success_two_frags(out_sa, in_sa);
+
+ printf("\n IPv6 four frags");
+ test_in_ipv6_esp_reass_success_four_frags(out_sa, in_sa);
+
+ printf("\n IPv6 two frags out of order");
+ test_in_ipv6_esp_reass_success_two_frags_ooo(out_sa, in_sa);
+
+ printf("\n IPv6 four frags out of order");
+ test_in_ipv6_esp_reass_success_four_frags_ooo(out_sa, in_sa);
+
+ printf("\n");
+
+ ipsec_sa_destroy(in_sa);
+ ipsec_sa_destroy(out_sa);
+}
+
+static void test_in_ipv6_esp_reass_incomp(void)
+{
+ odp_ipsec_tunnel_param_t in_tunnel, out_tunnel;
+ odp_ipsec_sa_param_t param_in, param_out;
+ odp_ipsec_sa_t out_sa, in_sa;
+ uint8_t src[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ };
+ uint8_t dst[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+ };
+
+ memset(&in_tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ memset(&out_tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ out_tunnel.type = ODP_IPSEC_TUNNEL_IPV6;
+ out_tunnel.ipv6.src_addr = &src;
+ out_tunnel.ipv6.dst_addr = &dst;
+
+ ipsec_sa_param_fill(&param_out,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP,
+ 0x4a2cbfe7, &out_tunnel,
+ ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_4,
+ ODP_AUTH_ALG_AES_GCM, NULL,
+ &key_mcgrew_gcm_salt_4, NULL);
+
+ ipsec_sa_param_fill(&param_in,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 0x4a2cbfe7, &in_tunnel,
+ ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_4,
+ ODP_AUTH_ALG_AES_GCM, NULL,
+ &key_mcgrew_gcm_salt_4, NULL);
+ param_in.inbound.reassembly_en = 1;
+
+ out_sa = odp_ipsec_sa_create(&param_out);
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, out_sa);
+
+ in_sa = odp_ipsec_sa_create(&param_in);
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, in_sa);
+
+ printf("\n IPv6 missing frag");
+ test_in_ipv6_esp_reass_incomp_missing(out_sa, in_sa);
+
+ printf("\n");
+
+ ipsec_sa_destroy(in_sa);
+ ipsec_sa_destroy(out_sa);
+}
+
+static void test_in_ipv4_null_aes_xcbc_esp(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 0x100, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_AES_XCBC_MAC, &key_auth_aes_xcbc_128,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_null_aes_xcbc_esp,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_UDP,
+ .pkt_res = &pkt_ipv4_null_aes_xcbc_plain,
+ },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+odp_testinfo_t ipsec_in_suite[] = {
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_rfc3602_5_esp,
+ ipsec_check_esp_aes_cbc_128_null),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_rfc3602_6_esp,
+ ipsec_check_esp_aes_cbc_128_null),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_rfc3602_7_esp,
+ ipsec_check_esp_aes_cbc_128_null),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_rfc3602_8_esp,
+ ipsec_check_esp_aes_cbc_128_null),
+ /* test 1, 5, 6, 8 -- 11 -- ESN */
+ /* test 7 -- invalid, plaintext packet includes trl into IP length */
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_mcgrew_gcm_2_esp,
+ ipsec_check_esp_aes_gcm_128),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_mcgrew_gcm_3_esp,
+ ipsec_check_esp_aes_gcm_256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_mcgrew_gcm_4_esp,
+ ipsec_check_esp_aes_gcm_128),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_mcgrew_gcm_12_esp,
+ ipsec_check_esp_aes_gcm_128),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_mcgrew_gcm_12_esp_notun,
+ ipsec_check_esp_aes_gcm_128),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_mcgrew_gcm_15_esp,
+ ipsec_check_esp_null_aes_gmac_128),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_rfc7634_chacha,
+ ipsec_check_esp_chacha20_poly1305),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_ah_sha256,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_ah_sha256_tun_ipv4,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_ah_sha256_tun_ipv6,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_ah_sha256_tun_ipv4_notun,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_null_sha256,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_aes_cbc_null,
+ ipsec_check_esp_aes_cbc_128_null),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_aes_cbc_sha1,
+ ipsec_check_esp_aes_cbc_128_sha1),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_aes_cbc_sha256,
+ ipsec_check_esp_aes_cbc_128_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_aes_cbc_sha384,
+ ipsec_check_esp_aes_cbc_128_sha384),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_aes_cbc_sha512,
+ ipsec_check_esp_aes_cbc_128_sha512),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_aes_ctr_null,
+ ipsec_check_esp_aes_ctr_128_null),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_ah_sha256_lookup,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_null_sha256_lookup,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_null_sha256_tun_ipv4,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_null_sha256_tun_ipv6,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_udp_null_sha256,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_udp_null_sha256_lookup,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_ah_sha256_noreplay,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_ah_sha256_replay,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_null_sha256_noreplay,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_null_sha256_replay,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_ah_esp_pkt,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_ah_pkt,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_ah_esp_pkt_lookup,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_ah_pkt_lookup,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_ah_sha256_bad1,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_ah_sha256_bad2,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_null_sha256_bad1,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_ah_aes_gmac_128,
+ ipsec_check_ah_aes_gmac_128),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_null_aes_gmac_128,
+ ipsec_check_esp_null_aes_gmac_128),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv6_ah_sha256,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv6_ah_sha256_tun_ipv4,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv6_ah_sha256_tun_ipv6,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv6_esp_null_sha256,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv6_esp_null_sha256_tun_ipv4,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv6_esp_null_sha256_tun_ipv6,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv6_esp_udp_null_sha256,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv6_esp_udp_null_sha256_lookup,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO(test_ipsec_print),
+ ODP_TEST_INFO_CONDITIONAL(test_ipsec_sa_print,
+ ipsec_check_esp_aes_cbc_128_sha1),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_reass_success,
+ ipsec_check_esp_aes_gcm_128_reass_ipv4),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_reass_incomp,
+ ipsec_check_esp_aes_gcm_128_reass_ipv4),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv6_esp_reass_success,
+ ipsec_check_esp_aes_gcm_128_reass_ipv6),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv6_esp_reass_incomp,
+ ipsec_check_esp_aes_gcm_128_reass_ipv6),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_null_aes_xcbc_esp,
+ ipsec_check_esp_null_aes_xcbc),
+ ODP_TEST_INFO_NULL,
+};
diff --git a/test/validation/api/ipsec/ipsec_test_out.c b/test/validation/api/ipsec/ipsec_test_out.c
new file mode 100644
index 000000000..ca8bf97a5
--- /dev/null
+++ b/test/validation/api/ipsec/ipsec_test_out.c
@@ -0,0 +1,2077 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2020, Marvell
+ * Copyright (c) 2020-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/helper/odph_api.h>
+
+#include "ipsec.h"
+#include "test_vectors.h"
+
+/*
+ * Miscellaneous parameters for combined out+in tests
+ */
+typedef struct {
+ ipsec_test_part_flags_t part_flags;
+ odp_bool_t display_algo;
+ odp_bool_t ah;
+ odp_bool_t v6;
+ odp_bool_t tunnel;
+ odp_bool_t tunnel_is_v6;
+ odp_bool_t udp_encap;
+ enum ipsec_test_stats stats;
+} ipsec_test_flags;
+
+static void test_out_in_all(const ipsec_test_flags *flags);
+
+struct cipher_param {
+ const char *name;
+ odp_cipher_alg_t algo;
+ const odp_crypto_key_t *key;
+ const odp_crypto_key_t *key_extra;
+};
+
+struct auth_param {
+ const char *name;
+ odp_auth_alg_t algo;
+ const odp_crypto_key_t *key;
+ const odp_crypto_key_t *key_extra;
+};
+
+#define ALG(alg, key, key_extra) { #alg, alg, key, key_extra }
+
+/*
+ * Ciphers that can be used in ESP and combined with any integrity
+ * algorithm. This excludes combined mode algorithms such as AES-GCM.
+ */
+static struct cipher_param ciphers[] = {
+ ALG(ODP_CIPHER_ALG_NULL, NULL, NULL),
+ ALG(ODP_CIPHER_ALG_DES, &key_des_64, NULL),
+ ALG(ODP_CIPHER_ALG_3DES_CBC, &key_des_192, NULL),
+ ALG(ODP_CIPHER_ALG_AES_CBC, &key_a5_128, NULL),
+ ALG(ODP_CIPHER_ALG_AES_CBC, &key_a5_192, NULL),
+ ALG(ODP_CIPHER_ALG_AES_CBC, &key_a5_256, NULL),
+ ALG(ODP_CIPHER_ALG_AES_CTR, &key_a5_128, &key_mcgrew_gcm_salt_3),
+ ALG(ODP_CIPHER_ALG_AES_CTR, &key_a5_192, &key_mcgrew_gcm_salt_3),
+ ALG(ODP_CIPHER_ALG_AES_CTR, &key_a5_256, &key_mcgrew_gcm_salt_3)
+};
+
+/*
+ * Integrity algorithms that can be used in ESP and AH. This excludes
+ * AES-GMAC which is defined for ESP as a combined-mode algorithm.
+ */
+static struct auth_param auths[] = {
+ ALG(ODP_AUTH_ALG_NULL, NULL, NULL),
+ ALG(ODP_AUTH_ALG_MD5_HMAC, &key_5a_128, NULL),
+ ALG(ODP_AUTH_ALG_SHA1_HMAC, &key_5a_160, NULL),
+ ALG(ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256, NULL),
+ ALG(ODP_AUTH_ALG_SHA384_HMAC, &key_5a_384, NULL),
+ ALG(ODP_AUTH_ALG_SHA512_HMAC, &key_5a_512, NULL),
+ ALG(ODP_AUTH_ALG_AES_CMAC, &key_5a_128, NULL),
+ ALG(ODP_AUTH_ALG_AES_XCBC_MAC, &key_5a_128, NULL)
+};
+
+/*
+ * Integrity algorithms that can be used in AH but not in ESP as
+ * individual algorithms (combined with a cipher).
+ */
+static struct auth_param ah_auths[] = {
+ ALG(ODP_AUTH_ALG_AES_GMAC, &key_a5_128, &key_mcgrew_gcm_salt_2),
+ ALG(ODP_AUTH_ALG_AES_GMAC, &key_a5_192, &key_mcgrew_gcm_salt_2),
+ ALG(ODP_AUTH_ALG_AES_GMAC, &key_a5_256, &key_mcgrew_gcm_salt_2),
+};
+
+struct cipher_auth_comb_param {
+ struct cipher_param cipher;
+ struct auth_param auth;
+};
+
+static struct cipher_auth_comb_param cipher_auth_comb[] = {
+ {
+ ALG(ODP_CIPHER_ALG_AES_GCM, &key_a5_128, &key_mcgrew_gcm_salt_2),
+ ALG(ODP_AUTH_ALG_AES_GCM, NULL, NULL),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_AES_GCM, &key_a5_192, &key_mcgrew_gcm_salt_2),
+ ALG(ODP_AUTH_ALG_AES_GCM, NULL, NULL),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_AES_GCM, &key_a5_256, &key_mcgrew_gcm_salt_2),
+ ALG(ODP_AUTH_ALG_AES_GCM, NULL, NULL),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_NULL, NULL, NULL),
+ ALG(ODP_AUTH_ALG_AES_GMAC, &key_a5_128, &key_mcgrew_gcm_salt_2),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_NULL, NULL, NULL),
+ ALG(ODP_AUTH_ALG_AES_GMAC, &key_a5_192, &key_mcgrew_gcm_salt_2),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_NULL, NULL, NULL),
+ ALG(ODP_AUTH_ALG_AES_GMAC, &key_a5_256, &key_mcgrew_gcm_salt_2),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_AES_CCM, &key_a5_128, &key_3byte_salt),
+ ALG(ODP_AUTH_ALG_AES_CCM, NULL, NULL),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_AES_CCM, &key_a5_192, &key_3byte_salt),
+ ALG(ODP_AUTH_ALG_AES_CCM, NULL, NULL),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_AES_CCM, &key_a5_256, &key_3byte_salt),
+ ALG(ODP_AUTH_ALG_AES_CCM, NULL, NULL),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_CHACHA20_POLY1305, &key_rfc7634, &key_rfc7634_salt),
+ ALG(ODP_AUTH_ALG_CHACHA20_POLY1305, NULL, NULL),
+ },
+};
+
+static void test_out_ipv4_ah_sha256(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res = &pkt_ipv4_icmp_0_ah_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv4_ah_sha256_tun_ipv4(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ uint32_t src = IPV4ADDR(10, 0, 111, 2);
+ uint32_t dst = IPV4ADDR(10, 0, 222, 2);
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ tunnel.type = ODP_IPSEC_TUNNEL_IPV4;
+ tunnel.ipv4.src_addr = &src;
+ tunnel.ipv4.dst_addr = &dst;
+ tunnel.ipv4.ttl = 64;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_AH, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res = &pkt_ipv4_icmp_0_ah_tun_ipv4_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv4_ah_sha256_tun_ipv6(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ uint8_t src[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ };
+ uint8_t dst[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+ };
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ tunnel.type = ODP_IPSEC_TUNNEL_IPV6;
+ tunnel.ipv6.src_addr = src;
+ tunnel.ipv6.dst_addr = dst;
+ tunnel.ipv6.hlimit = 64;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_AH, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res = &pkt_ipv4_icmp_0_ah_tun_ipv6_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv4_esp_null_sha256(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res = &pkt_ipv4_icmp_0_esp_null_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv4_esp_null_sha256_tun_ipv4(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ uint32_t src = IPV4ADDR(10, 0, 111, 2);
+ uint32_t dst = IPV4ADDR(10, 0, 222, 2);
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ tunnel.type = ODP_IPSEC_TUNNEL_IPV4;
+ tunnel.ipv4.src_addr = &src;
+ tunnel.ipv4.dst_addr = &dst;
+ tunnel.ipv4.ttl = 64;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res =
+ &pkt_ipv4_icmp_0_esp_tun_ipv4_null_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv4_esp_null_sha256_tun_ipv6(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ uint8_t src[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ };
+ uint8_t dst[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+ };
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ tunnel.type = ODP_IPSEC_TUNNEL_IPV6;
+ tunnel.ipv6.src_addr = src;
+ tunnel.ipv6.dst_addr = dst;
+ tunnel.ipv6.hlimit = 64;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res =
+ &pkt_ipv4_icmp_0_esp_tun_ipv6_null_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_ipsec_stats_zero_assert(odp_ipsec_stats_t *stats)
+{
+ CU_ASSERT_EQUAL(stats->success, 0);
+ CU_ASSERT_EQUAL(stats->proto_err, 0);
+ CU_ASSERT_EQUAL(stats->auth_err, 0);
+ CU_ASSERT_EQUAL(stats->antireplay_err, 0);
+ CU_ASSERT_EQUAL(stats->alg_err, 0);
+ CU_ASSERT_EQUAL(stats->mtu_err, 0);
+ CU_ASSERT_EQUAL(stats->hard_exp_bytes_err, 0);
+ CU_ASSERT_EQUAL(stats->hard_exp_pkts_err, 0);
+ CU_ASSERT_EQUAL(stats->success_bytes, 0);
+}
+
+static void test_ipsec_stats_test_assert(odp_ipsec_stats_t *stats,
+ enum ipsec_test_stats test,
+ uint64_t succ_bytes)
+{
+ if (test == IPSEC_TEST_STATS_SUCCESS) {
+ CU_ASSERT_EQUAL(stats->success, 1);
+ CU_ASSERT(stats->success_bytes >= succ_bytes);
+ } else {
+ CU_ASSERT_EQUAL(stats->success, 0);
+ CU_ASSERT_EQUAL(stats->success_bytes, 0);
+ }
+
+ if (test == IPSEC_TEST_STATS_PROTO_ERR) {
+ /* Braces needed by CU macro */
+ CU_ASSERT_EQUAL(stats->proto_err, 1);
+ } else {
+ /* Braces needed by CU macro */
+ CU_ASSERT_EQUAL(stats->proto_err, 0);
+ }
+
+ if (test == IPSEC_TEST_STATS_AUTH_ERR) {
+ /* Braces needed by CU macro */
+ CU_ASSERT_EQUAL(stats->auth_err, 1);
+ } else {
+ /* Braces needed by CU macro */
+ CU_ASSERT_EQUAL(stats->auth_err, 0);
+ }
+
+ CU_ASSERT_EQUAL(stats->antireplay_err, 0);
+ CU_ASSERT_EQUAL(stats->alg_err, 0);
+ CU_ASSERT_EQUAL(stats->mtu_err, 0);
+ CU_ASSERT_EQUAL(stats->hard_exp_bytes_err, 0);
+ CU_ASSERT_EQUAL(stats->hard_exp_pkts_err, 0);
+}
+
+static void ipsec_pkt_proto_err_set(odp_packet_t pkt)
+{
+ uint32_t l3_off = odp_packet_l3_offset(pkt);
+ odph_ipv4hdr_t ip;
+
+ memset(&ip, 0, sizeof(ip));
+
+ /* Simulate proto error by corrupting protocol field */
+
+ odp_packet_copy_to_mem(pkt, l3_off, sizeof(ip), &ip);
+
+ if (ip.proto == ODPH_IPPROTO_ESP)
+ ip.proto = ODPH_IPPROTO_AH;
+ else
+ ip.proto = ODPH_IPPROTO_ESP;
+
+ odp_packet_copy_from_mem(pkt, l3_off, sizeof(ip), &ip);
+}
+
+static void ipsec_pkt_auth_err_set(odp_packet_t pkt)
+{
+ uint32_t data, len;
+
+ /* Simulate auth error by corrupting ICV */
+
+ len = odp_packet_len(pkt);
+ odp_packet_copy_to_mem(pkt, len - sizeof(data), sizeof(data), &data);
+ data = ~data;
+ odp_packet_copy_from_mem(pkt, len - sizeof(data), sizeof(data), &data);
+}
+
+static void ipsec_pkt_update(odp_packet_t pkt, const ipsec_test_flags *flags)
+{
+ if (flags && flags->stats == IPSEC_TEST_STATS_PROTO_ERR)
+ ipsec_pkt_proto_err_set(pkt);
+
+ if (flags && flags->stats == IPSEC_TEST_STATS_AUTH_ERR)
+ ipsec_pkt_auth_err_set(pkt);
+}
+
+static void ipsec_check_out_in_one(const ipsec_test_part *part_outbound,
+ const ipsec_test_part *part_inbound,
+ odp_ipsec_sa_t sa,
+ odp_ipsec_sa_t sa_in,
+ const ipsec_test_flags *flags)
+{
+ int num_out = part_outbound->num_pkt;
+ odp_packet_t pkto[num_out];
+ int i;
+
+ num_out = ipsec_check_out(part_outbound, sa, pkto);
+
+ for (i = 0; i < num_out; i++) {
+ ipsec_test_part part_in = *part_inbound;
+ ipsec_test_packet pkt_in;
+
+ ipsec_pkt_update(pkto[i], flags);
+
+ ipsec_test_packet_from_pkt(&pkt_in, &pkto[i]);
+ part_in.pkt_in = &pkt_in;
+
+ ipsec_check_in_one(&part_in, sa_in);
+ }
+}
+
+static int sa_creation_failure_ok(const odp_ipsec_sa_param_t *param)
+{
+ odp_cipher_alg_t cipher = param->crypto.cipher_alg;
+ odp_auth_alg_t auth = param->crypto.auth_alg;
+
+ /* Single algorithm must not fail */
+ if (cipher == ODP_CIPHER_ALG_NULL || auth == ODP_AUTH_ALG_NULL)
+ return 0;
+
+ /* Combined mode algorithms must not fail */
+ if (cipher == ODP_CIPHER_ALG_AES_GCM ||
+ cipher == ODP_CIPHER_ALG_AES_CCM ||
+ cipher == ODP_CIPHER_ALG_CHACHA20_POLY1305)
+ return 0;
+
+ /* Combination of mandatory algorithms must not fail */
+ if (cipher == ODP_CIPHER_ALG_AES_CBC && auth == ODP_AUTH_ALG_SHA1_HMAC)
+ return 0;
+
+ printf("\n Algorithm combination (%d, %d) maybe not supported.\n", cipher, auth);
+ printf(" SA creation failed, skipping test.\n");
+ return 1;
+}
+
+static void test_out_in_common(const ipsec_test_flags *flags,
+ odp_cipher_alg_t cipher,
+ const odp_crypto_key_t *cipher_key,
+ odp_auth_alg_t auth,
+ const odp_crypto_key_t *auth_key,
+ const odp_crypto_key_t *cipher_key_extra,
+ const odp_crypto_key_t *auth_key_extra)
+{
+ odp_ipsec_tunnel_param_t *tun_ptr = NULL;
+ odp_ipsec_tunnel_param_t tunnel;
+ uint32_t src_v4 = IPV4ADDR(10, 0, 111, 2);
+ uint32_t dst_v4 = IPV4ADDR(10, 0, 222, 2);
+ uint8_t src_v6[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ };
+ uint8_t dst_v6[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+ };
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_stats_t stats;
+ odp_ipsec_protocol_t proto = flags->ah ? ODP_IPSEC_AH : ODP_IPSEC_ESP;
+ odp_ipsec_sa_t sa_out;
+ odp_ipsec_sa_t sa_in;
+ odp_proto_l3_type_t out_l3_type = ODP_PROTO_L3_TYPE_IPV4;
+ odp_proto_l4_type_t out_l4_type = ODP_PROTO_L4_TYPE_ESP;
+
+ CU_ASSERT_NOT_EQUAL_FATAL(flags, NULL);
+
+ /* ICV won't be generated for NULL AUTH */
+ if ((flags->stats == IPSEC_TEST_STATS_AUTH_ERR) &&
+ (auth == ODP_AUTH_ALG_NULL))
+ return;
+
+ if (flags->tunnel) {
+ if (flags->tunnel_is_v6) {
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ tunnel.type = ODP_IPSEC_TUNNEL_IPV6;
+ tunnel.ipv6.src_addr = &src_v6;
+ tunnel.ipv6.dst_addr = &dst_v6;
+ tunnel.ipv6.hlimit = 64;
+ tun_ptr = &tunnel;
+ } else {
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ tunnel.type = ODP_IPSEC_TUNNEL_IPV4;
+ tunnel.ipv4.src_addr = &src_v4;
+ tunnel.ipv4.dst_addr = &dst_v4;
+ tunnel.ipv4.ttl = 64;
+ tun_ptr = &tunnel;
+ }
+ }
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, proto, 123, tun_ptr,
+ cipher, cipher_key,
+ auth, auth_key,
+ cipher_key_extra, auth_key_extra);
+
+ if (flags->udp_encap)
+ param.opt.udp_encap = 1;
+
+ sa_out = odp_ipsec_sa_create(&param);
+
+ if (sa_out == ODP_IPSEC_SA_INVALID && sa_creation_failure_ok(&param))
+ return;
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa_out);
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, proto, 123, tun_ptr,
+ cipher, cipher_key,
+ auth, auth_key,
+ cipher_key_extra, auth_key_extra);
+
+ if (flags->udp_encap)
+ param.opt.udp_encap = 1;
+
+ sa_in = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa_in);
+
+ if ((flags->tunnel && flags->tunnel_is_v6) ||
+ (!flags->tunnel && flags->v6))
+ out_l3_type = ODP_PROTO_L3_TYPE_IPV6;
+ if (flags->ah)
+ out_l4_type = ODP_PROTO_L4_TYPE_AH;
+ if (flags->udp_encap)
+ out_l4_type = ODP_PROTO_L4_TYPE_UDP;
+
+ ipsec_test_part test_out = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = out_l3_type,
+ .l4_type = out_l4_type,
+ },
+ },
+ };
+ ipsec_test_part test_in = {
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ if (flags->v6) {
+ test_out.pkt_in = &pkt_ipv6_icmp_0;
+ test_in.out[0].l3_type = ODP_PROTO_L3_TYPE_IPV6;
+ test_in.out[0].l4_type = ODP_PROTO_L4_TYPE_ICMPV6;
+ test_in.out[0].pkt_res = &pkt_ipv6_icmp_0;
+ }
+
+ test_out.flags = flags->part_flags;
+ test_in.flags = flags->part_flags;
+
+ if (flags->stats == IPSEC_TEST_STATS_PROTO_ERR)
+ test_in.out[0].status.error.proto = 1;
+ if (flags->stats == IPSEC_TEST_STATS_AUTH_ERR)
+ test_in.out[0].status.error.auth = 1;
+
+ if (flags->stats != IPSEC_TEST_STATS_NONE) {
+ CU_ASSERT_EQUAL(odp_ipsec_stats(sa_out, &stats), 0);
+ test_ipsec_stats_zero_assert(&stats);
+ CU_ASSERT_EQUAL(odp_ipsec_stats(sa_in, &stats), 0);
+ test_ipsec_stats_zero_assert(&stats);
+ }
+
+ if (flags->part_flags.test_sa_seq_num) {
+ int rc;
+
+ test_out.out[0].seq_num = 0x1235;
+ rc = ipsec_test_sa_update_seq_num(sa_out,
+ test_out.out[0].seq_num);
+
+ /* Skip further checks related to this specific test if the
+ * SA update call was not successful.
+ */
+ if (rc < 0) {
+ printf("\t >> skipped");
+ test_out.flags.test_sa_seq_num = false;
+ }
+ }
+
+ ipsec_check_out_in_one(&test_out, &test_in, sa_out, sa_in, flags);
+
+ if (flags->stats != IPSEC_TEST_STATS_NONE) {
+ uint64_t succ_bytes = 0;
+
+ /* Minimum bytes to be counted for stats.success_bytes */
+ if (!flags->ah) {
+ succ_bytes = test_out.pkt_in[0].len -
+ test_out.pkt_in[0].l4_offset;
+
+ if (flags->tunnel)
+ succ_bytes += test_out.pkt_in[0].l4_offset -
+ test_out.pkt_in[0].l3_offset;
+ } else {
+ succ_bytes = test_out.pkt_in[0].len -
+ test_out.pkt_in[0].l3_offset;
+
+ if (flags->tunnel)
+ succ_bytes += (flags->tunnel_is_v6 ?
+ ODPH_IPV6HDR_LEN :
+ ODPH_IPV4HDR_LEN);
+ }
+
+ /* All stats tests have outbound operation success and inbound
+ * varying.
+ */
+ CU_ASSERT_EQUAL(odp_ipsec_stats(sa_out, &stats), 0);
+ test_ipsec_stats_test_assert(&stats, IPSEC_TEST_STATS_SUCCESS,
+ succ_bytes);
+
+ CU_ASSERT_EQUAL(odp_ipsec_stats(sa_in, &stats), 0);
+ test_ipsec_stats_test_assert(&stats, flags->stats, succ_bytes);
+ }
+
+ ipsec_sa_destroy(sa_out);
+ ipsec_sa_destroy(sa_in);
+}
+
+static void test_esp_out_in(struct cipher_param *cipher,
+ struct auth_param *auth,
+ const ipsec_test_flags *flags)
+{
+ int cipher_keylen = cipher->key ? 8 * cipher->key->length : 0;
+ int auth_keylen = auth->key ? 8 * auth->key->length : 0;
+
+ if (ipsec_check_esp(cipher->algo, cipher_keylen,
+ auth->algo, auth_keylen) != ODP_TEST_ACTIVE)
+ return;
+
+ if (flags->display_algo)
+ printf("\n %s (keylen %d) %s (keylen %d) ",
+ cipher->name, cipher_keylen, auth->name, auth_keylen);
+
+ test_out_in_common(flags, cipher->algo, cipher->key,
+ auth->algo, auth->key,
+ cipher->key_extra, auth->key_extra);
+}
+
+static void test_esp_out_in_all(const ipsec_test_flags *flags_in)
+{
+ uint32_t c;
+ uint32_t a;
+ ipsec_test_flags flags = *flags_in;
+
+ flags.ah = false;
+
+ for (c = 0; c < ODPH_ARRAY_SIZE(ciphers); c++)
+ for (a = 0; a < ODPH_ARRAY_SIZE(auths); a++)
+ test_esp_out_in(&ciphers[c], &auths[a], &flags);
+
+ for (c = 0; c < ODPH_ARRAY_SIZE(cipher_auth_comb); c++)
+ test_esp_out_in(&cipher_auth_comb[c].cipher,
+ &cipher_auth_comb[c].auth,
+ &flags);
+}
+
+/*
+ * Test ESP output followed by input with all combinations of ciphers and
+ * integrity algorithms.
+ */
+static void test_esp_out_in_all_basic(void)
+{
+ ipsec_test_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+ flags.display_algo = true;
+
+ test_esp_out_in_all(&flags);
+
+ printf("\n ");
+}
+
+static int is_out_mode_inline(void)
+{
+ return suite_context.outbound_op_mode == ODP_IPSEC_OP_MODE_INLINE;
+}
+
+static void test_inline_hdr_in_packet(void)
+{
+ ipsec_test_flags flags = {
+ .part_flags.inline_hdr_in_packet = true,
+ };
+ test_out_in_all(&flags);
+}
+
+static void test_ah_out_in(struct auth_param *auth,
+ const ipsec_test_flags *flags_in)
+{
+ int auth_keylen = auth->key ? 8 * auth->key->length : 0;
+ ipsec_test_flags flags = *flags_in;
+
+ if (ipsec_check_ah(auth->algo, auth_keylen) != ODP_TEST_ACTIVE)
+ return;
+
+ if (flags.display_algo)
+ printf("\n %s (keylen %d) ", auth->name, auth_keylen);
+
+ flags.ah = true;
+
+ test_out_in_common(&flags, ODP_CIPHER_ALG_NULL, NULL,
+ auth->algo, auth->key,
+ NULL, auth->key_extra);
+}
+
+static void test_ah_out_in_all(const ipsec_test_flags *flags)
+{
+ uint32_t a;
+
+ for (a = 0; a < ODPH_ARRAY_SIZE(auths); a++)
+ test_ah_out_in(&auths[a], flags);
+ for (a = 0; a < ODPH_ARRAY_SIZE(ah_auths); a++)
+ test_ah_out_in(&ah_auths[a], flags);
+}
+
+static void test_ah_out_in_all_basic(void)
+{
+ ipsec_test_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+ flags.display_algo = true;
+
+ test_ah_out_in_all(&flags);
+
+ printf("\n ");
+}
+
+static void test_out_in_all(const ipsec_test_flags *flags)
+{
+ test_esp_out_in_all(flags);
+ test_ah_out_in_all(flags);
+}
+
+static void test_out_ipv4_esp_udp_null_sha256(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+ param.opt.udp_encap = 1;
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res = &pkt_ipv4_icmp_0_esp_udp_null_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv4_ah_sha256_frag_check(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ ipsec_test_part test;
+ ipsec_test_part test2;
+
+ memset(&test, 0, sizeof(ipsec_test_part));
+ memset(&test2, 0, sizeof(ipsec_test_part));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+ param.outbound.frag_mode = ODP_IPSEC_FRAG_CHECK;
+ param.outbound.mtu = 100;
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ test.pkt_in = &pkt_ipv4_icmp_0;
+ test.num_pkt = 1;
+ test.out[0].status.error.mtu = 1;
+ test.out[0].l3_type = ODP_PROTO_L3_TYPE_IPV4;
+ test.out[0].l4_type = ODP_PROTO_L4_TYPE_ICMPV4;
+
+ test2.pkt_in = &pkt_ipv4_icmp_0;
+ test2.num_opt = 1;
+ test2.opt.flag.frag_mode = 1;
+ test2.opt.frag_mode = ODP_IPSEC_FRAG_DISABLED;
+ test2.num_pkt = 1;
+ test2.out[0].pkt_res = &pkt_ipv4_icmp_0_ah_sha256_1;
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_check_out_one(&test2, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv4_ah_sha256_frag_check_2(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ ipsec_test_part test;
+
+ memset(&test, 0, sizeof(ipsec_test_part));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+ param.outbound.frag_mode = ODP_IPSEC_FRAG_CHECK;
+ param.outbound.mtu = 100;
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ test.pkt_in = &pkt_ipv4_icmp_0;
+ test.num_pkt = 1;
+ test.out[0].status.error.mtu = 1;
+ test.out[0].l3_type = ODP_PROTO_L3_TYPE_IPV4;
+ test.out[0].l4_type = ODP_PROTO_L4_TYPE_ICMPV4;
+
+ ipsec_test_part test2 = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res = &pkt_ipv4_icmp_0_ah_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ odp_ipsec_sa_mtu_update(sa, 256);
+
+ ipsec_check_out_one(&test2, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv4_esp_null_sha256_frag_check(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ ipsec_test_part test;
+ ipsec_test_part test2;
+
+ memset(&test, 0, sizeof(ipsec_test_part));
+ memset(&test2, 0, sizeof(ipsec_test_part));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ param.outbound.frag_mode = ODP_IPSEC_FRAG_CHECK;
+ param.outbound.mtu = 100;
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ test.pkt_in = &pkt_ipv4_icmp_0;
+ test.num_pkt = 1;
+ test.out[0].status.error.mtu = 1;
+ test.out[0].l3_type = ODP_PROTO_L3_TYPE_IPV4;
+ test.out[0].l4_type = ODP_PROTO_L4_TYPE_ICMPV4;
+
+ test2.pkt_in = &pkt_ipv4_icmp_0;
+ test2.num_opt = 1;
+ test2.opt.flag.frag_mode = 1;
+ test2.opt.frag_mode = ODP_IPSEC_FRAG_DISABLED;
+ test2.num_pkt = 1;
+ test2.out[0].pkt_res = &pkt_ipv4_icmp_0_esp_null_sha256_1;
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_check_out_one(&test2, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv4_esp_null_sha256_frag_check_2(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ ipsec_test_part test;
+
+ memset(&test, 0, sizeof(ipsec_test_part));
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ param.outbound.frag_mode = ODP_IPSEC_FRAG_CHECK;
+ param.outbound.mtu = 100;
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ test.pkt_in = &pkt_ipv4_icmp_0;
+ test.num_pkt = 1;
+ test.out[0].status.error.mtu = 1;
+ test.out[0].l3_type = ODP_PROTO_L3_TYPE_IPV4;
+ test.out[0].l4_type = ODP_PROTO_L4_TYPE_ICMPV4;
+
+ ipsec_test_part test2 = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res = &pkt_ipv4_icmp_0_esp_null_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ odp_ipsec_sa_mtu_update(sa, 256);
+
+ ipsec_check_out_one(&test2, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv6_ah_sha256(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_AH, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res = &pkt_ipv6_icmp_0_ah_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv6_ah_sha256_tun_ipv4(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ uint32_t src = IPV4ADDR(10, 0, 111, 2);
+ uint32_t dst = IPV4ADDR(10, 0, 222, 2);
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ tunnel.type = ODP_IPSEC_TUNNEL_IPV4;
+ tunnel.ipv4.src_addr = &src;
+ tunnel.ipv4.dst_addr = &dst;
+ tunnel.ipv4.ttl = 64;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_AH, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res = &pkt_ipv6_icmp_0_ah_tun_ipv4_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv6_ah_sha256_tun_ipv6(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ uint8_t src[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ };
+ uint8_t dst[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+ };
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ tunnel.type = ODP_IPSEC_TUNNEL_IPV6;
+ tunnel.ipv6.src_addr = src;
+ tunnel.ipv6.dst_addr = dst;
+ tunnel.ipv6.hlimit = 64;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_AH, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res = &pkt_ipv6_icmp_0_ah_tun_ipv6_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv6_esp_null_sha256(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res = &pkt_ipv6_icmp_0_esp_null_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv6_esp_null_sha256_tun_ipv4(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ uint32_t src = IPV4ADDR(10, 0, 111, 2);
+ uint32_t dst = IPV4ADDR(10, 0, 222, 2);
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ tunnel.type = ODP_IPSEC_TUNNEL_IPV4;
+ tunnel.ipv4.src_addr = &src;
+ tunnel.ipv4.dst_addr = &dst;
+ tunnel.ipv4.ttl = 64;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res =
+ &pkt_ipv6_icmp_0_esp_tun_ipv4_null_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv6_esp_null_sha256_tun_ipv6(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ uint8_t src[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ };
+ uint8_t dst[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+ };
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ tunnel.type = ODP_IPSEC_TUNNEL_IPV6;
+ tunnel.ipv6.src_addr = &src;
+ tunnel.ipv6.dst_addr = &dst;
+ tunnel.ipv6.hlimit = 64;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res =
+ &pkt_ipv6_icmp_0_esp_tun_ipv6_null_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv6_esp_udp_null_sha256(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+ param.opt.udp_encap = 1;
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv6_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res = &pkt_ipv6_icmp_0_esp_udp_null_sha256_1 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_dummy_esp_null_sha256_tun(odp_ipsec_tunnel_param_t tunnel)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+ odp_ipsec_sa_t sa2;
+ ipsec_test_part test;
+ ipsec_test_part test_in;
+ ipsec_test_part test_empty;
+ odp_proto_l3_type_t out_l3_type = ODP_PROTO_L3_TYPE_IPV4;
+
+ if (tunnel.type == ODP_IPSEC_TUNNEL_IPV6)
+ out_l3_type = ODP_PROTO_L3_TYPE_IPV6;
+
+ memset(&test, 0, sizeof(ipsec_test_part));
+ memset(&test_in, 0, sizeof(ipsec_test_part));
+ memset(&test_empty, 0, sizeof(ipsec_test_part));
+
+ /* This test will not work properly in inbound inline mode.
+ * Packet might be dropped and we will not check for that. */
+ if (suite_context.inbound_op_mode == ODP_IPSEC_OP_MODE_INLINE)
+ return;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa2 = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa2);
+
+ test.pkt_in = &pkt_test_nodata;
+ test.num_opt = 1;
+ test.opt.flag.tfc_dummy = 1;
+ test.opt.tfc_pad_len = 16;
+ test.num_pkt = 1;
+ test.out[0].l3_type = out_l3_type;
+ test.out[0].l4_type = ODP_PROTO_L4_TYPE_ESP;
+
+ test_in.num_pkt = 1;
+ test_in.out[0].l3_type = ODP_PROTO_L3_TYPE_IPV4;
+ test_in.out[0].l4_type = ODP_PROTO_L4_TYPE_NO_NEXT;
+
+ test_empty.pkt_in = &pkt_test_empty;
+ test_empty.num_opt = 1;
+ test_empty.opt.flag.tfc_dummy = 1;
+ test_empty.opt.tfc_pad_len = 16;
+ test_empty.num_pkt = 1;
+ test_empty.out[0].l3_type = out_l3_type;
+ test_empty.out[0].l4_type = ODP_PROTO_L4_TYPE_ESP;
+
+ ipsec_check_out_in_one(&test, &test_in, sa, sa2, NULL);
+ ipsec_check_out_in_one(&test_empty, &test_in, sa, sa2, NULL);
+
+ ipsec_sa_destroy(sa2);
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_dummy_esp_null_sha256_tun_ipv4(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ uint32_t src = IPV4ADDR(10, 0, 111, 2);
+ uint32_t dst = IPV4ADDR(10, 0, 222, 2);
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ tunnel.type = ODP_IPSEC_TUNNEL_IPV4;
+ tunnel.ipv4.src_addr = &src;
+ tunnel.ipv4.dst_addr = &dst;
+ tunnel.ipv4.ttl = 64;
+
+ test_out_dummy_esp_null_sha256_tun(tunnel);
+}
+
+static void test_out_dummy_esp_null_sha256_tun_ipv6(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ uint8_t src[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ };
+ uint8_t dst[16] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+ };
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ tunnel.type = ODP_IPSEC_TUNNEL_IPV6;
+ tunnel.ipv6.src_addr = src;
+ tunnel.ipv6.dst_addr = dst;
+ tunnel.ipv6.hlimit = 64;
+
+ test_out_dummy_esp_null_sha256_tun(tunnel);
+}
+
+static void test_out_ipv4_udp_esp_null_sha256(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_udp,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .pkt_res = &pkt_ipv4_udp_esp_null_sha256 },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_out_ipv4_null_aes_xcbc(void)
+{
+ odp_ipsec_tunnel_param_t tunnel;
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ uint32_t src = IPV4ADDR(10, 0, 111, 2);
+ uint32_t dst = IPV4ADDR(10, 0, 222, 2);
+
+ memset(&tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+ tunnel.type = ODP_IPSEC_TUNNEL_IPV4;
+ tunnel.ipv4.src_addr = &src;
+ tunnel.ipv4.dst_addr = &dst;
+ tunnel.ipv4.ttl = 64;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP,
+ 0x100, &tunnel,
+ ODP_CIPHER_ALG_NULL, NULL,
+ ODP_AUTH_ALG_AES_XCBC_MAC, &key_auth_aes_xcbc_128,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_null_aes_xcbc_plain,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = _ODP_PROTO_L4_TYPE_UNDEF,
+ .pkt_res = &pkt_ipv4_null_aes_xcbc_esp,
+ },
+ },
+ };
+
+ ipsec_check_out_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_sa_info(void)
+{
+ uint32_t src = IPV4ADDR(10, 0, 111, 2);
+ uint32_t dst = IPV4ADDR(10, 0, 222, 2);
+ odp_ipsec_tunnel_param_t tunnel_out;
+ odp_ipsec_tunnel_param_t tunnel_in;
+ odp_ipsec_sa_param_t param_out;
+ odp_ipsec_sa_param_t param_in;
+ odp_ipsec_sa_info_t info_out;
+ odp_ipsec_sa_info_t info_in;
+ odp_ipsec_sa_t sa_out;
+ odp_ipsec_sa_t sa_in;
+
+ memset(&tunnel_out, 0, sizeof(tunnel_out));
+ memset(&tunnel_in, 0, sizeof(tunnel_in));
+
+ tunnel_out.type = ODP_IPSEC_TUNNEL_IPV4;
+ tunnel_out.ipv4.src_addr = &src;
+ tunnel_out.ipv4.dst_addr = &dst;
+
+ ipsec_sa_param_fill(&param_out,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP,
+ 123, &tunnel_out,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA1_HMAC, &key_5a_160,
+ NULL, NULL);
+
+ sa_out = odp_ipsec_sa_create(&param_out);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa_out);
+
+ ipsec_sa_param_fill(&param_in,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ 123, &tunnel_in,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA1_HMAC, &key_5a_160,
+ NULL, NULL);
+
+ param_in.inbound.antireplay_ws = 32;
+ sa_in = odp_ipsec_sa_create(&param_in);
+ CU_ASSERT_FATAL(sa_in != ODP_IPSEC_SA_INVALID);
+
+ memset(&info_out, 0, sizeof(info_out));
+ CU_ASSERT_EQUAL_FATAL(0, odp_ipsec_sa_info(sa_out, &info_out));
+
+ CU_ASSERT_EQUAL(info_out.param.dir, param_out.dir);
+ CU_ASSERT_EQUAL(info_out.param.proto, param_out.proto);
+ CU_ASSERT_EQUAL(info_out.param.mode, param_out.mode);
+
+ CU_ASSERT_EQUAL(info_out.param.crypto.cipher_alg,
+ param_out.crypto.cipher_alg);
+ CU_ASSERT_EQUAL(info_out.param.crypto.auth_alg,
+ param_out.crypto.auth_alg);
+ CU_ASSERT_EQUAL(info_out.param.opt.udp_encap, param_out.opt.udp_encap);
+ CU_ASSERT_EQUAL(info_out.param.spi, param_out.spi);
+ CU_ASSERT_EQUAL(info_out.param.opt.esn, param_out.opt.esn);
+ CU_ASSERT_EQUAL(info_out.param.opt.udp_encap, param_out.opt.udp_encap);
+ CU_ASSERT_EQUAL(info_out.param.opt.copy_dscp, param_out.opt.copy_dscp);
+ CU_ASSERT_EQUAL(info_out.param.opt.copy_flabel, param_out.opt.copy_flabel);
+ CU_ASSERT_EQUAL(info_out.param.opt.copy_df, param_out.opt.copy_df);
+
+ CU_ASSERT_EQUAL(ODP_IPSEC_MODE_TUNNEL, info_out.param.mode);
+
+ CU_ASSERT_EQUAL(info_out.param.outbound.tunnel.type,
+ param_out.outbound.tunnel.type);
+ CU_ASSERT_EQUAL(info_out.param.outbound.tunnel.ipv4.dscp,
+ param_out.outbound.tunnel.ipv4.dscp);
+ CU_ASSERT_EQUAL(info_out.param.outbound.tunnel.ipv4.df,
+ param_out.outbound.tunnel.ipv4.df);
+ CU_ASSERT_NOT_EQUAL_FATAL(NULL,
+ info_out.param.outbound.tunnel.ipv4.src_addr);
+ CU_ASSERT_EQUAL(0, memcmp(info_out.param.outbound.tunnel.ipv4.src_addr,
+ param_out.outbound.tunnel.ipv4.src_addr,
+ ODP_IPV4_ADDR_SIZE));
+ CU_ASSERT_NOT_EQUAL_FATAL(NULL,
+ info_out.param.outbound.tunnel.ipv4.dst_addr);
+ CU_ASSERT_EQUAL(0, memcmp(info_out.param.outbound.tunnel.ipv4.dst_addr,
+ param_out.outbound.tunnel.ipv4.dst_addr,
+ ODP_IPV4_ADDR_SIZE));
+
+ CU_ASSERT_EQUAL(info_out.param.lifetime.soft_limit.bytes,
+ param_out.lifetime.soft_limit.bytes);
+ CU_ASSERT_EQUAL(info_out.param.lifetime.hard_limit.bytes,
+ param_out.lifetime.hard_limit.bytes);
+ CU_ASSERT_EQUAL(info_out.param.lifetime.soft_limit.packets,
+ param_out.lifetime.soft_limit.packets);
+ CU_ASSERT_EQUAL(info_out.param.lifetime.hard_limit.packets,
+ param_out.lifetime.hard_limit.packets);
+
+ CU_ASSERT_EQUAL(0, info_out.outbound.seq_num);
+
+ memset(&info_in, 0, sizeof(info_in));
+ CU_ASSERT_EQUAL_FATAL(0, odp_ipsec_sa_info(sa_in, &info_in));
+ CU_ASSERT_EQUAL(0, info_in.inbound.antireplay_window_top);
+
+ ipsec_test_part test_out = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ESP,
+ },
+ },
+ };
+ ipsec_test_part test_in = {
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ },
+ },
+ };
+
+ ipsec_check_out_in_one(&test_out, &test_in, sa_out, sa_in, NULL);
+
+ memset(&info_out, 0, sizeof(info_out));
+ CU_ASSERT_EQUAL_FATAL(0, odp_ipsec_sa_info(sa_out, &info_out));
+ CU_ASSERT_EQUAL(1, info_out.outbound.seq_num);
+
+ memset(&info_in, 0, sizeof(info_in));
+ CU_ASSERT_EQUAL_FATAL(0, odp_ipsec_sa_info(sa_in, &info_in));
+ CU_ASSERT_EQUAL(1, info_in.inbound.antireplay_window_top);
+
+ ipsec_sa_destroy(sa_out);
+ ipsec_sa_destroy(sa_in);
+
+ /*
+ * Additional check for SA lookup parameters. Let's use transport
+ * mode SA and ODP_IPSEC_DSTADD_SPI lookup mode.
+ */
+ ipsec_sa_param_fill(&param_in,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, NULL,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA1_HMAC, &key_5a_160,
+ NULL, NULL);
+ param_in.inbound.lookup_mode = ODP_IPSEC_LOOKUP_DSTADDR_SPI;
+ param_in.inbound.lookup_param.ip_version = ODP_IPSEC_IPV4;
+ param_in.inbound.lookup_param.dst_addr = &dst;
+ sa_in = odp_ipsec_sa_create(&param_in);
+ CU_ASSERT_FATAL(sa_in != ODP_IPSEC_SA_INVALID);
+
+ memset(&info_in, 0, sizeof(info_in));
+ CU_ASSERT_FATAL(odp_ipsec_sa_info(sa_in, &info_in) == 0);
+
+ CU_ASSERT(info_in.param.inbound.lookup_mode ==
+ ODP_IPSEC_LOOKUP_DSTADDR_SPI);
+ CU_ASSERT_FATAL(info_in.param.inbound.lookup_param.dst_addr ==
+ &info_in.inbound.lookup_param.dst_addr);
+ CU_ASSERT(!memcmp(info_in.param.inbound.lookup_param.dst_addr,
+ &dst,
+ ODP_IPV4_ADDR_SIZE));
+ ipsec_sa_destroy(sa_in);
+}
+
+static void test_test_sa_update_seq_num(void)
+{
+ ipsec_test_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+ flags.display_algo = true;
+ flags.part_flags.test_sa_seq_num = true;
+
+ test_out_in_all(&flags);
+
+ printf("\n ");
+}
+
+#define SOFT_LIMIT_PKT_CNT 1024
+#define HARD_LIMIT_PKT_CNT 2048
+#define DELTA_PKT_CNT 320
+
+static void test_out_ipv4_esp_sa_expiry(enum ipsec_test_sa_expiry expiry)
+{
+ int byte_count_per_packet = pkt_ipv4_icmp_0.len - pkt_ipv4_icmp_0.l3_offset;
+ uint32_t src = IPV4ADDR(10, 0, 11, 2);
+ uint32_t dst = IPV4ADDR(10, 0, 22, 2);
+ odp_ipsec_tunnel_param_t out_tunnel;
+ odp_ipsec_sa_param_t param_out;
+ int i, inc, limit, delta;
+ uint64_t soft_limit_byte;
+ uint64_t hard_limit_byte;
+ uint64_t soft_limit_pkt;
+ uint64_t hard_limit_pkt;
+ odp_ipsec_sa_t out_sa;
+
+ switch (expiry) {
+ case IPSEC_TEST_EXPIRY_SOFT_PKT:
+ soft_limit_pkt = SOFT_LIMIT_PKT_CNT;
+ hard_limit_pkt = HARD_LIMIT_PKT_CNT;
+ soft_limit_byte = 0;
+ hard_limit_byte = 0;
+ delta = DELTA_PKT_CNT;
+ limit = soft_limit_pkt;
+ inc = 1;
+ break;
+ case IPSEC_TEST_EXPIRY_HARD_PKT:
+ soft_limit_pkt = SOFT_LIMIT_PKT_CNT;
+ hard_limit_pkt = HARD_LIMIT_PKT_CNT;
+ soft_limit_byte = 0;
+ hard_limit_byte = 0;
+ delta = DELTA_PKT_CNT;
+ limit = hard_limit_pkt;
+ inc = 1;
+ break;
+ case IPSEC_TEST_EXPIRY_SOFT_BYTE:
+ soft_limit_pkt = 0;
+ hard_limit_pkt = 0;
+ soft_limit_byte = byte_count_per_packet * SOFT_LIMIT_PKT_CNT;
+ hard_limit_byte = byte_count_per_packet * HARD_LIMIT_PKT_CNT;
+ delta = byte_count_per_packet * DELTA_PKT_CNT;
+ limit = soft_limit_byte;
+ inc = byte_count_per_packet;
+ break;
+ case IPSEC_TEST_EXPIRY_HARD_BYTE:
+ soft_limit_pkt = 0;
+ hard_limit_pkt = 0;
+ soft_limit_byte = byte_count_per_packet * SOFT_LIMIT_PKT_CNT;
+ hard_limit_byte = byte_count_per_packet * HARD_LIMIT_PKT_CNT;
+ delta = byte_count_per_packet * DELTA_PKT_CNT;
+ limit = hard_limit_byte;
+ inc = byte_count_per_packet;
+ break;
+ default:
+ return;
+ }
+
+ memset(&out_tunnel, 0, sizeof(odp_ipsec_tunnel_param_t));
+
+ out_tunnel.type = ODP_IPSEC_TUNNEL_IPV4;
+ out_tunnel.ipv4.src_addr = &src;
+ out_tunnel.ipv4.dst_addr = &dst;
+
+ ipsec_sa_param_fill(&param_out, ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP,
+ 0x4a2cbfe7, &out_tunnel,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA1_HMAC, &key_5a_160,
+ NULL, NULL);
+
+ param_out.lifetime.soft_limit.bytes = soft_limit_byte;
+ param_out.lifetime.hard_limit.bytes = hard_limit_byte;
+ param_out.lifetime.soft_limit.packets = soft_limit_pkt;
+ param_out.lifetime.hard_limit.packets = hard_limit_pkt;
+
+ out_sa = odp_ipsec_sa_create(&param_out);
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, out_sa);
+
+ ipsec_test_part test_out = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ESP,
+ },
+ },
+ };
+
+ test_out.out[0].sa_expiry = IPSEC_TEST_EXPIRY_IGNORED;
+
+ for (i = 0; i < limit - delta; i += inc)
+ ipsec_check_out_one(&test_out, out_sa);
+
+ sa_expiry_notified = false;
+ test_out.out[0].sa_expiry = expiry;
+
+ for (; i <= limit && !sa_expiry_notified; i += inc)
+ ipsec_check_out_one(&test_out, out_sa);
+
+ CU_ASSERT(sa_expiry_notified);
+
+ for (; i <= limit + delta; i += inc)
+ ipsec_check_out_one(&test_out, out_sa);
+
+ ipsec_sa_destroy(out_sa);
+}
+
+static void test_out_ipv4_esp_sa_pkt_expiry(void)
+{
+ printf("\n IPv4 IPsec SA packet soft expiry");
+ test_out_ipv4_esp_sa_expiry(IPSEC_TEST_EXPIRY_SOFT_PKT);
+
+ printf("\n IPv4 IPsec SA packet hard expiry");
+ test_out_ipv4_esp_sa_expiry(IPSEC_TEST_EXPIRY_HARD_PKT);
+
+ printf("\n");
+}
+
+static void test_out_ipv4_esp_sa_byte_expiry(void)
+{
+ printf("\n IPv4 IPsec SA byte soft expiry");
+ test_out_ipv4_esp_sa_expiry(IPSEC_TEST_EXPIRY_SOFT_BYTE);
+
+ printf("\n IPv4 IPsec SA byte hard expiry");
+ test_out_ipv4_esp_sa_expiry(IPSEC_TEST_EXPIRY_HARD_BYTE);
+
+ printf("\n");
+}
+
+static void ipsec_test_capability(void)
+{
+ odp_ipsec_capability_t capa;
+
+ CU_ASSERT(odp_ipsec_capability(&capa) == 0);
+}
+
+static void test_defaults(uint8_t fill)
+{
+ odp_ipsec_config_t config;
+ odp_ipsec_sa_param_t sa_param;
+
+ memset(&config, fill, sizeof(config));
+ odp_ipsec_config_init(&config);
+ CU_ASSERT(config.inbound.lookup.min_spi == 0);
+ CU_ASSERT(config.inbound.lookup.max_spi == UINT32_MAX);
+ CU_ASSERT(config.inbound.lookup.spi_overlap == 0);
+ CU_ASSERT(config.inbound.retain_outer == ODP_PROTO_LAYER_NONE);
+ CU_ASSERT(config.inbound.parse_level == ODP_PROTO_LAYER_NONE);
+ CU_ASSERT(config.inbound.chksums.all_chksum == 0);
+ CU_ASSERT(!config.inbound.reassembly.en_ipv4);
+ CU_ASSERT(!config.inbound.reassembly.en_ipv6);
+ CU_ASSERT(config.inbound.reassembly.max_wait_time == 0);
+ CU_ASSERT(config.inbound.reassembly.max_num_frags == 2);
+ CU_ASSERT(!config.inbound.reass_async);
+ CU_ASSERT(!config.inbound.reass_inline);
+ CU_ASSERT(config.outbound.all_chksum == 0);
+ CU_ASSERT(!config.stats_en);
+ CU_ASSERT(!config.vector.enable);
+
+ memset(&sa_param, fill, sizeof(sa_param));
+ odp_ipsec_sa_param_init(&sa_param);
+ CU_ASSERT(sa_param.proto == ODP_IPSEC_ESP);
+ CU_ASSERT(sa_param.crypto.cipher_alg == ODP_CIPHER_ALG_NULL);
+ CU_ASSERT(sa_param.crypto.auth_alg == ODP_AUTH_ALG_NULL);
+ CU_ASSERT(sa_param.crypto.icv_len == 0);
+ CU_ASSERT(sa_param.opt.esn == 0);
+ CU_ASSERT(sa_param.opt.udp_encap == 0);
+ CU_ASSERT(sa_param.opt.copy_dscp == 0);
+ CU_ASSERT(sa_param.opt.copy_flabel == 0);
+ CU_ASSERT(sa_param.opt.copy_df == 0);
+ CU_ASSERT(sa_param.opt.dec_ttl == 0);
+ CU_ASSERT(sa_param.lifetime.soft_limit.bytes == 0);
+ CU_ASSERT(sa_param.lifetime.soft_limit.packets == 0);
+ CU_ASSERT(sa_param.lifetime.hard_limit.bytes == 0);
+ CU_ASSERT(sa_param.lifetime.hard_limit.packets == 0);
+ CU_ASSERT(sa_param.context == NULL);
+ CU_ASSERT(sa_param.context_len == 0);
+ CU_ASSERT(sa_param.inbound.lookup_mode == ODP_IPSEC_LOOKUP_DISABLED);
+ CU_ASSERT(sa_param.inbound.antireplay_ws == 0);
+ CU_ASSERT(sa_param.inbound.pipeline == ODP_IPSEC_PIPELINE_NONE);
+ CU_ASSERT(!sa_param.inbound.reassembly_en);
+ CU_ASSERT(sa_param.outbound.tunnel.type == ODP_IPSEC_TUNNEL_IPV4);
+ CU_ASSERT(sa_param.outbound.tunnel.ipv4.dscp == 0);
+ CU_ASSERT(sa_param.outbound.tunnel.ipv4.df == 0);
+ CU_ASSERT(sa_param.outbound.tunnel.ipv4.ttl == 255);
+ CU_ASSERT(sa_param.outbound.tunnel.ipv6.flabel == 0);
+ CU_ASSERT(sa_param.outbound.tunnel.ipv6.dscp == 0);
+ CU_ASSERT(sa_param.outbound.tunnel.ipv6.hlimit == 255);
+ CU_ASSERT(sa_param.outbound.frag_mode == ODP_IPSEC_FRAG_DISABLED);
+}
+
+static void ipsec_test_default_values(void)
+{
+ test_defaults(0);
+ test_defaults(0xff);
+}
+
+static void test_ipsec_stats(void)
+{
+ ipsec_test_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+
+ printf("\n Stats : success");
+ flags.stats = IPSEC_TEST_STATS_SUCCESS;
+ test_out_in_all(&flags);
+
+ printf("\n Stats : proto err");
+ flags.stats = IPSEC_TEST_STATS_PROTO_ERR;
+ test_out_in_all(&flags);
+
+ printf("\n Stats : auth err");
+ flags.stats = IPSEC_TEST_STATS_AUTH_ERR;
+ test_out_in_all(&flags);
+
+ printf("\n ");
+}
+
+static void test_udp_encap(void)
+{
+ ipsec_test_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+ flags.udp_encap = 1;
+ flags.tunnel = 0;
+
+ printf("\n IPv4 Transport");
+ flags.v6 = 0;
+ test_esp_out_in_all(&flags);
+
+ printf("\n IPv6 Transport");
+ flags.v6 = 1;
+ test_esp_out_in_all(&flags);
+
+ flags.tunnel = 1;
+
+ printf("\n IPv4-in-IPv4 Tunnel");
+ flags.v6 = 0;
+ flags.tunnel_is_v6 = 0;
+ test_esp_out_in_all(&flags);
+
+ printf("\n IPv4-in-IPv6 Tunnel");
+ flags.v6 = 0;
+ flags.tunnel_is_v6 = 1;
+ test_esp_out_in_all(&flags);
+
+ printf("\n IPv6-in-IPv4 Tunnel");
+ flags.v6 = 1;
+ flags.tunnel_is_v6 = 0;
+ test_esp_out_in_all(&flags);
+
+ printf("\n IPv6-in-IPv6 Tunnel");
+ flags.v6 = 1;
+ flags.tunnel_is_v6 = 1;
+ test_esp_out_in_all(&flags);
+
+ printf("\n ");
+}
+
+static void test_max_num_sa(void)
+{
+ odp_ipsec_capability_t capa;
+ uint32_t sa_pairs;
+ odp_bool_t odd = false;
+ uint32_t n;
+ uint8_t cipher_key_data[128 / 8]; /* 128 bit key for AES */
+ uint8_t auth_key_data[160 / 8]; /* 160 bit key for SHA-1 */
+ odp_crypto_key_t cipher_key;
+ odp_crypto_key_t auth_key;
+ uint32_t tun_src;
+ uint32_t tun_dst;
+ odp_ipsec_tunnel_param_t tun = {
+ .type = ODP_IPSEC_TUNNEL_IPV4,
+ .ipv4.src_addr = &tun_src,
+ .ipv4.dst_addr = &tun_dst,
+ .ipv4.ttl = 64,
+ };
+ odp_ipsec_sa_param_t param;
+ const uint32_t spi_start = 256;
+ odp_ipsec_sa_t sa_odd = ODP_IPSEC_SA_INVALID;
+ ipsec_test_part test_out = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ESP,
+ },
+ },
+ };
+ ipsec_test_part test_in = {
+ .flags = {
+ /* Test lookup now that we have lots of SAs */
+ .lookup = 1,
+ },
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ CU_ASSERT_FATAL(odp_ipsec_capability(&capa) == 0);
+ sa_pairs = capa.max_num_sa / 2;
+ if (capa.max_num_sa > 2 && capa.max_num_sa % 2)
+ odd = true;
+
+ odp_ipsec_sa_t sa_out[sa_pairs];
+ odp_ipsec_sa_t sa_in[sa_pairs];
+
+ memset(cipher_key_data, 0xa5, sizeof(cipher_key_data));
+ cipher_key.data = cipher_key_data;
+ cipher_key.length = sizeof(cipher_key_data);
+
+ memset(auth_key_data, 0x5a, sizeof(auth_key_data));
+ auth_key.data = auth_key_data;
+ auth_key.length = sizeof(auth_key_data);
+
+ for (n = 0; n < sa_pairs; n++) {
+ /* Make keys unique */
+ if (cipher_key.length > sizeof(n))
+ memcpy(cipher_key.data, &n, sizeof(n));
+ if (auth_key.length > sizeof(n))
+ memcpy(auth_key.data, &n, sizeof(n));
+
+ /* These are for outbound SAs only */
+ tun_src = 0x0a000000 + n;
+ tun_dst = 0x0a800000 + n;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP,
+ spi_start + n, &tun,
+ ODP_CIPHER_ALG_AES_CBC, &cipher_key,
+ ODP_AUTH_ALG_SHA1_HMAC, &auth_key,
+ NULL, NULL);
+ sa_out[n] = odp_ipsec_sa_create(&param);
+ CU_ASSERT_FATAL(sa_out[n] != ODP_IPSEC_SA_INVALID);
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
+ spi_start + n, &tun,
+ ODP_CIPHER_ALG_AES_CBC, &cipher_key,
+ ODP_AUTH_ALG_SHA1_HMAC, &auth_key,
+ NULL, NULL);
+ sa_in[n] = odp_ipsec_sa_create(&param);
+ CU_ASSERT_FATAL(sa_in[n] != ODP_IPSEC_SA_INVALID);
+ }
+
+ n = sa_pairs - 1;
+ if (odd) {
+ /*
+ * We have an odd number of max SAs. Let's create a similar
+ * SA as the last created outbound SA and test it against
+ * the last created inbound SA.
+ */
+ tun_src = 0x0a000000 + n;
+ tun_dst = 0x0a800000 + n;
+
+ ipsec_sa_param_fill(&param,
+ ODP_IPSEC_DIR_OUTBOUND, ODP_IPSEC_ESP,
+ spi_start + n, &tun,
+ ODP_CIPHER_ALG_AES_CBC, &cipher_key,
+ ODP_AUTH_ALG_SHA1_HMAC, &auth_key,
+ NULL, NULL);
+ sa_odd = odp_ipsec_sa_create(&param);
+ CU_ASSERT_FATAL(sa_odd != ODP_IPSEC_SA_INVALID);
+
+ ipsec_check_out_in_one(&test_out, &test_in,
+ sa_odd, sa_in[n], NULL);
+ }
+
+ for (n = 0; n < sa_pairs; n++)
+ ipsec_check_out_in_one(&test_out, &test_in,
+ sa_out[n], sa_in[n], NULL);
+
+ for (n = 0; n < sa_pairs; n++) {
+ ipsec_sa_destroy(sa_out[n]);
+ ipsec_sa_destroy(sa_in[n]);
+ }
+ if (odd)
+ ipsec_sa_destroy(sa_odd);
+}
+
+odp_testinfo_t ipsec_out_suite[] = {
+ ODP_TEST_INFO(ipsec_test_capability),
+ ODP_TEST_INFO(ipsec_test_default_values),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_ah_sha256,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_ah_sha256_tun_ipv4,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_ah_sha256_tun_ipv6,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_null_sha256,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_null_sha256_tun_ipv4,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_null_sha256_tun_ipv6,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_udp_null_sha256,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_ah_sha256_frag_check,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_ah_sha256_frag_check_2,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_null_sha256_frag_check,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_null_sha256_frag_check_2,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv6_ah_sha256,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv6_ah_sha256_tun_ipv4,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv6_ah_sha256_tun_ipv6,
+ ipsec_check_ah_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv6_esp_null_sha256,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv6_esp_null_sha256_tun_ipv4,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv6_esp_null_sha256_tun_ipv6,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv6_esp_udp_null_sha256,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_dummy_esp_null_sha256_tun_ipv4,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_dummy_esp_null_sha256_tun_ipv6,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_udp_esp_null_sha256,
+ ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_null_aes_xcbc,
+ ipsec_check_esp_null_aes_xcbc),
+ ODP_TEST_INFO_CONDITIONAL(test_sa_info,
+ ipsec_check_esp_aes_cbc_128_sha1),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_sa_pkt_expiry,
+ ipsec_check_esp_aes_cbc_128_sha1),
+ ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_sa_byte_expiry,
+ ipsec_check_esp_aes_cbc_128_sha1),
+ ODP_TEST_INFO_CONDITIONAL(test_test_sa_update_seq_num,
+ ipsec_check_test_sa_update_seq_num),
+ ODP_TEST_INFO(test_esp_out_in_all_basic),
+ ODP_TEST_INFO_CONDITIONAL(test_inline_hdr_in_packet,
+ is_out_mode_inline),
+ ODP_TEST_INFO(test_ah_out_in_all_basic),
+ ODP_TEST_INFO(test_ipsec_stats),
+ ODP_TEST_INFO(test_udp_encap),
+ ODP_TEST_INFO_CONDITIONAL(test_max_num_sa,
+ ipsec_check_esp_aes_cbc_128_sha1),
+ ODP_TEST_INFO_NULL,
+};
diff --git a/test/validation/api/ipsec/reass_test_vectors.c b/test/validation/api/ipsec/reass_test_vectors.c
new file mode 100644
index 000000000..c3bb2bfd4
--- /dev/null
+++ b/test/validation/api/ipsec/reass_test_vectors.c
@@ -0,0 +1,353 @@
+/* Copyright (c) 2021, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "ipsec.h"
+
+/* The source file includes below test vectors */
+
+/* IPv6:
+ *
+ * 1) pkt_ipv6_udp_p1
+ * pkt_ipv6_udp_p1_f1
+ * pkt_ipv6_udp_p1_f2
+ *
+ * 2) pkt_ipv6_udp_p2
+ * pkt_ipv6_udp_p2_f1
+ * pkt_ipv6_udp_p2_f2
+ * pkt_ipv6_udp_p2_f3
+ * pkt_ipv6_udp_p2_f4
+ */
+
+/* IPv4:
+ *
+ * 1) pkt_ipv4_udp_p1
+ * pkt_ipv4_udp_p1_f1
+ * pkt_ipv4_udp_p1_f2
+ *
+ * 2) pkt_ipv4_udp_p2
+ * pkt_ipv4_udp_p2_f1
+ * pkt_ipv4_udp_p2_f2
+ * pkt_ipv4_udp_p2_f3
+ * pkt_ipv4_udp_p2_f4
+ */
+
+ipsec_test_packet pkt_ipv6_udp_p1 = {
+ .len = 1514,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 54,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP */
+ 0x60, 0x00, 0x00, 0x00, 0x05, 0xb4, 0x11, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x0d, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x02, 0x00, 0x00, 0x02,
+
+ /* UDP */
+ 0x08, 0x00, 0x27, 0x10, 0x05, 0xb4, 0x2b, 0xe8,
+ },
+};
+
+ipsec_test_packet pkt_ipv6_udp_p1_f1 = {
+ .len = 1398,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 62,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP */
+ 0x60, 0x00, 0x00, 0x00, 0x05, 0x40, 0x2c, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x0d, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x02, 0x00, 0x00, 0x02,
+ 0x11, 0x00, 0x00, 0x01, 0x5c, 0x92, 0xac, 0xf1,
+
+ /* UDP */
+ 0x08, 0x00, 0x27, 0x10, 0x05, 0xb4, 0x2b, 0xe8,
+ },
+};
+
+ipsec_test_packet pkt_ipv6_udp_p1_f2 = {
+ .len = 186,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 62,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x84, 0x2c, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x0d, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x02, 0x00, 0x00, 0x02,
+ 0x11, 0x00, 0x05, 0x38, 0x5c, 0x92, 0xac, 0xf1,
+ },
+};
+
+ipsec_test_packet pkt_ipv6_udp_p2 = {
+ .len = 4496,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 54,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP */
+ 0x60, 0x00, 0x00, 0x00, 0x11, 0x5a, 0x11, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x0d, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x02, 0x00, 0x00, 0x02,
+
+ /* UDP */
+ 0x08, 0x00, 0x27, 0x10, 0x11, 0x5a, 0x8a, 0x11,
+ },
+};
+
+ipsec_test_packet pkt_ipv6_udp_p2_f1 = {
+ .len = 1398,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 62,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP */
+ 0x60, 0x00, 0x00, 0x00, 0x05, 0x40, 0x2c, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x0d, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x02, 0x00, 0x00, 0x02,
+ 0x11, 0x00, 0x00, 0x01, 0x64, 0x6c, 0x68, 0x9f,
+
+ /* UDP */
+ 0x08, 0x00, 0x27, 0x10, 0x11, 0x5a, 0x8a, 0x11,
+ },
+};
+
+ipsec_test_packet pkt_ipv6_udp_p2_f2 = {
+ .len = 1398,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 62,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP */
+ 0x60, 0x00, 0x00, 0x00, 0x05, 0x40, 0x2c, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x0d, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x02, 0x00, 0x00, 0x02,
+ 0x11, 0x00, 0x05, 0x39, 0x64, 0x6c, 0x68, 0x9f,
+ },
+};
+
+ipsec_test_packet pkt_ipv6_udp_p2_f3 = {
+ .len = 1398,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 62,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP */
+ 0x60, 0x00, 0x00, 0x00, 0x05, 0x40, 0x2c, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x0d, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x02, 0x00, 0x00, 0x02,
+ 0x11, 0x00, 0x0a, 0x71, 0x64, 0x6c, 0x68, 0x9f,
+ },
+};
+
+ipsec_test_packet pkt_ipv6_udp_p2_f4 = {
+ .len = 496,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 62,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP */
+ 0x60, 0x00, 0x00, 0x00, 0x01, 0xba, 0x2c, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x0d, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x02, 0x00, 0x00, 0x02,
+ 0x11, 0x00, 0x0f, 0xa8, 0x64, 0x6c, 0x68, 0x9f,
+ },
+};
+
+ipsec_test_packet pkt_ipv4_udp_p1 = {
+ .len = 1514,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x05, 0xdc, 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x11, 0x66, 0x0d, 0x0d, 0x00, 0x00, 0x02,
+ 0x02, 0x00, 0x00, 0x02,
+
+ /* UDP */
+ 0x08, 0x00, 0x27, 0x10, 0x05, 0xc8, 0xb8, 0x4c,
+ },
+};
+
+ipsec_test_packet pkt_ipv4_udp_p1_f1 = {
+ .len = 1434,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x05, 0x8c, 0x00, 0x01, 0x20, 0x00,
+ 0x40, 0x11, 0x46, 0x5d, 0x0d, 0x00, 0x00, 0x02,
+ 0x02, 0x00, 0x00, 0x02,
+
+ /* UDP */
+ 0x08, 0x00, 0x27, 0x10, 0x05, 0xc8, 0xb8, 0x4c,
+ },
+};
+
+ipsec_test_packet pkt_ipv4_udp_p1_f2 = {
+ .len = 114,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x64, 0x00, 0x01, 0x00, 0xaf,
+ 0x40, 0x11, 0x6a, 0xd6, 0x0d, 0x00, 0x00, 0x02,
+ 0x02, 0x00, 0x00, 0x02,
+ },
+};
+
+ipsec_test_packet pkt_ipv4_udp_p2 = {
+ .len = 4496,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x11, 0x82, 0x00, 0x02, 0x00, 0x00,
+ 0x40, 0x11, 0x5a, 0x66, 0x0d, 0x00, 0x00, 0x02,
+ 0x02, 0x00, 0x00, 0x02,
+
+ /* UDP */
+ 0x08, 0x00, 0x27, 0x10, 0x11, 0x6e, 0x16, 0x76,
+ },
+};
+
+ipsec_test_packet pkt_ipv4_udp_p2_f1 = {
+ .len = 1434,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x05, 0x8c, 0x00, 0x02, 0x20, 0x00,
+ 0x40, 0x11, 0x46, 0x5c, 0x0d, 0x00, 0x00, 0x02,
+ 0x02, 0x00, 0x00, 0x02,
+
+ /* UDP */
+ 0x08, 0x00, 0x27, 0x10, 0x11, 0x6e, 0x16, 0x76,
+ },
+};
+
+ipsec_test_packet pkt_ipv4_udp_p2_f2 = {
+ .len = 1434,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x05, 0x8c, 0x00, 0x02, 0x20, 0xaf,
+ 0x40, 0x11, 0x45, 0xad, 0x0d, 0x00, 0x00, 0x02,
+ 0x02, 0x00, 0x00, 0x02,
+ },
+};
+
+ipsec_test_packet pkt_ipv4_udp_p2_f3 = {
+ .len = 1434,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x05, 0x8c, 0x00, 0x02, 0x21, 0x5e,
+ 0x40, 0x11, 0x44, 0xfe, 0x0d, 0x00, 0x00, 0x02,
+ 0x02, 0x00, 0x00, 0x02,
+ },
+};
+
+ipsec_test_packet pkt_ipv4_udp_p2_f4 = {
+ .len = 296,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x01, 0x1a, 0x00, 0x02, 0x02, 0x0d,
+ 0x40, 0x11, 0x68, 0xc1, 0x0d, 0x00, 0x00, 0x02,
+ 0x02, 0x00, 0x00, 0x02,
+ },
+};
diff --git a/test/validation/api/ipsec/reass_test_vectors.h b/test/validation/api/ipsec/reass_test_vectors.h
new file mode 100644
index 000000000..02b41c573
--- /dev/null
+++ b/test/validation/api/ipsec/reass_test_vectors.h
@@ -0,0 +1,67 @@
+ /* Copyright (c) 2021, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_REASS_TEST_VECTORS_H_
+#define _ODP_REASS_TEST_VECTORS_H_
+
+extern ipsec_test_packet pkt_ipv6_udp_p1;
+extern ipsec_test_packet pkt_ipv6_udp_p1_f1;
+extern ipsec_test_packet pkt_ipv6_udp_p1_f2;
+extern ipsec_test_packet pkt_ipv6_udp_p2;
+extern ipsec_test_packet pkt_ipv6_udp_p2_f1;
+extern ipsec_test_packet pkt_ipv6_udp_p2_f2;
+extern ipsec_test_packet pkt_ipv6_udp_p2_f3;
+extern ipsec_test_packet pkt_ipv6_udp_p2_f4;
+
+extern ipsec_test_packet pkt_ipv4_udp_p1;
+extern ipsec_test_packet pkt_ipv4_udp_p1_f1;
+extern ipsec_test_packet pkt_ipv4_udp_p1_f2;
+extern ipsec_test_packet pkt_ipv4_udp_p2;
+extern ipsec_test_packet pkt_ipv4_udp_p2_f1;
+extern ipsec_test_packet pkt_ipv4_udp_p2_f2;
+extern ipsec_test_packet pkt_ipv4_udp_p2_f3;
+extern ipsec_test_packet pkt_ipv4_udp_p2_f4;
+
+static inline void
+test_vector_payload_populate(ipsec_test_packet *pkt, odp_bool_t first_frag)
+{
+ uint32_t i = pkt->l4_offset;
+
+ /* For non-fragmented packets and first frag, skip 8 bytes from
+ * l4_offset for UDP header */
+
+ if (first_frag)
+ i += 8;
+
+ for (; i < pkt->len; i++)
+ pkt->data[i] = 0x58;
+}
+
+static inline void
+reass_test_vectors_init(void)
+{
+ test_vector_payload_populate(&pkt_ipv6_udp_p1, true);
+ test_vector_payload_populate(&pkt_ipv6_udp_p1_f1, true);
+ test_vector_payload_populate(&pkt_ipv6_udp_p1_f2, false);
+
+ test_vector_payload_populate(&pkt_ipv6_udp_p2, true);
+ test_vector_payload_populate(&pkt_ipv6_udp_p2_f1, true);
+ test_vector_payload_populate(&pkt_ipv6_udp_p2_f2, false);
+ test_vector_payload_populate(&pkt_ipv6_udp_p2_f3, false);
+ test_vector_payload_populate(&pkt_ipv6_udp_p2_f4, false);
+
+ test_vector_payload_populate(&pkt_ipv4_udp_p1, true);
+ test_vector_payload_populate(&pkt_ipv4_udp_p1_f1, true);
+ test_vector_payload_populate(&pkt_ipv4_udp_p1_f2, false);
+
+ test_vector_payload_populate(&pkt_ipv4_udp_p2, true);
+ test_vector_payload_populate(&pkt_ipv4_udp_p2_f1, true);
+ test_vector_payload_populate(&pkt_ipv4_udp_p2_f2, false);
+ test_vector_payload_populate(&pkt_ipv4_udp_p2_f3, false);
+ test_vector_payload_populate(&pkt_ipv4_udp_p2_f4, false);
+}
+
+#endif
diff --git a/test/validation/api/ipsec/test_vectors.h b/test/validation/api/ipsec/test_vectors.h
new file mode 100644
index 000000000..b032ef973
--- /dev/null
+++ b/test/validation/api/ipsec/test_vectors.h
@@ -0,0 +1,2168 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2020, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_IPSEC_VECTORS_H_
+#define _ODP_TEST_IPSEC_VECTORS_H_
+
+#define KEY(name, ...) \
+ static uint8_t name ## _data[] = { __VA_ARGS__ }; \
+ static const ODP_UNUSED odp_crypto_key_t name = { \
+ .data = name ## _data, \
+ .length = sizeof(name ## _data), \
+ }
+
+KEY(key_a5_128, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5,
+ 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5);
+KEY(key_5a_128, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a);
+KEY(key_5a_160, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a);
+KEY(key_a5_192, 0xa6, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5,
+ 0xa6, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5,
+ 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5);
+KEY(key_a5_256, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5,
+ 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5,
+ 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5,
+ 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5);
+KEY(key_5a_256, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a);
+KEY(key_5a_384, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a);
+KEY(key_5a_512, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,
+ 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a);
+
+KEY(key_rfc3602, 0x90, 0xd3, 0x82, 0xb4, 0x10, 0xee, 0xba, 0x7a,
+ 0xd9, 0x38, 0xc4, 0x6c, 0xec, 0x1a, 0x82, 0xbf);
+KEY(key_rfc3602_2, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef);
+KEY(key_mcgrew_gcm_2, 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08);
+KEY(key_mcgrew_gcm_salt_2, 0xca, 0xfe, 0xba, 0xbe);
+KEY(key_mcgrew_gcm_3, 0xab, 0xbc, 0xcd, 0xde, 0xf0, 0x01, 0x12, 0x23,
+ 0x34, 0x45, 0x56, 0x67, 0x78, 0x89, 0x9a, 0xab,
+ 0xab, 0xbc, 0xcd, 0xde, 0xf0, 0x01, 0x12, 0x23,
+ 0x34, 0x45, 0x56, 0x67, 0x78, 0x89, 0x9a, 0xab);
+KEY(key_mcgrew_gcm_salt_3, 0x11, 0x22, 0x33, 0x44);
+KEY(key_mcgrew_gcm_4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+KEY(key_mcgrew_gcm_salt_4, 0x00, 0x00, 0x00, 0x00);
+KEY(key_mcgrew_gcm_12, 0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25,
+ 0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47);
+KEY(key_mcgrew_gcm_salt_12, 0xd9, 0x66, 0x42, 0x67);
+KEY(key_mcgrew_gcm_15, 0x4c, 0x80, 0xcd, 0xef, 0xbb, 0x5d, 0x10, 0xda,
+ 0x90, 0x6a, 0xc7, 0x3c, 0x36, 0x13, 0xa6, 0x34);
+KEY(key_mcgrew_gcm_salt_15, 0x22, 0x43, 0x3c, 0x64);
+KEY(key_rfc7634, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f);
+KEY(key_rfc7634_salt, 0xa0, 0xa1, 0xa2, 0xa3);
+KEY(key_3byte_salt, 0x01, 0x02, 0x03);
+KEY(key_auth_aes_xcbc_128, 0x61, 0x31, 0x62, 0x32, 0x63, 0x33, 0x64, 0x34,
+ 0x65, 0x35, 0x66, 0x36, 0x67, 0x37, 0x68, 0x38);
+
+/* DES keys have parity bits so that each byte has odd parity */
+KEY(key_des_64, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4);
+KEY(key_des_192, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4,
+ 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4,
+ 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4);
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv4_icmp_0 = {
+ .len = 142,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x01, 0xac, 0x27, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv4_icmp_0_ipip = {
+ .len = 162,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x94, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x04, 0x19, 0x62, 0x0a, 0x00, 0x6f, 0x02,
+ 0x0a, 0x00, 0xde, 0x02,
+
+ /* Inner IP */
+ 0x45, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x01, 0xac, 0x27, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv4_icmp_0_ah_sha256_1 = {
+ .len = 170,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x33, 0xab, 0xd9, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* AH */
+ 0x01, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x6c, 0x2e, 0xf7, 0x1f, 0x7c, 0x70, 0x39, 0xa3,
+ 0x4a, 0x77, 0x01, 0x47, 0x9e, 0x45, 0x73, 0x51,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_ah_tun_ipv4_sha256_1 = {
+ .len = 190,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0xb0, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x33, 0x19, 0x17, 0x0a, 0x00, 0x6f, 0x02,
+ 0x0a, 0x00, 0xde, 0x02,
+
+ /* AH */
+ 0x04, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b,
+ 0x00, 0x00, 0x00, 0x01,
+ 0xd5, 0x35, 0x9b, 0x21, 0xe6, 0x14, 0x9b, 0x42,
+ 0x1f, 0x00, 0xfa, 0x36, 0x73, 0x4c, 0x53, 0xcf,
+
+ /* Inner IP */
+ 0x45, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x01, 0xac, 0x27, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_ah_tun_ipv6_sha256_1 = {
+ .len = 214,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 54,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0xa0, 0x33, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* AH */
+ 0x04, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x2b, 0x45, 0xbe, 0xd2, 0x9c, 0x9c, 0x3e, 0x0d,
+ 0xe0, 0x32, 0xaf, 0xa0, 0x2d, 0x26, 0xe1, 0x91,
+ 0x00, 0x00, 0x00, 0x00,
+
+ /* Inner IP */
+ 0x45, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x01, 0xac, 0x27, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv4_icmp_0_ah_sha256_1_bad1 = {
+ .len = 168,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x9a, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x33, 0xab, 0xdb, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* AH */
+ 0x01, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x6c, 0x2e, 0xf7, 0x1f, 0x7c, 0x70, 0x39, 0xa3,
+ 0x4a, 0x77, 0x01, 0x47, 0x9e, 0x45, 0x73, 0x51,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv4_icmp_0_ah_sha256_1_bad2 = {
+ .len = 170,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x33, 0xab, 0xd9, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* AH */
+ 0x01, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x6c, 0x2e, 0xf7, 0x1f, 0x7c, 0x70, 0x39, 0xa3,
+ 0x4a, 0x77, 0x01, 0x47, 0x9e, 0x45, 0x73, 0x51,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5d,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv4_icmp_0_ah_sha256_1235 = {
+ .len = 170,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x33, 0xab, 0xd9, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* AH */
+ 0x01, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b,
+ 0x00, 0x00, 0x12, 0x35,
+ 0x04, 0xef, 0x71, 0x73, 0xa1, 0xd4, 0x71, 0x3f,
+ 0xd6, 0x78, 0xfe, 0xa2, 0x59, 0xe9, 0x93, 0x70,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37,
+
+ /* ICMP echo */
+ 0x12, 0x34, 0x00, 0x00,
+
+ /* data */
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv4_icmp_0_esp_null_sha256_1 = {
+ .len = 170,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0xab, 0xda, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+
+ /* ESP TRL */
+ 0x01, 0x02, 0x02, 0x01,
+
+ /* ICV */
+ 0xe9, 0x81, 0xcd, 0x65, 0x9b, 0x25, 0x0b, 0x33,
+ 0xe2, 0xf3, 0x83, 0xf1, 0x6d, 0x14, 0xb4, 0x1f,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_esp_tun_ipv4_null_sha256_1 = {
+ .len = 190,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0xb0, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0x19, 0x18, 0x0a, 0x00, 0x6f, 0x02,
+ 0x0a, 0x00, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* Inner IP */
+ 0x45, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x01, 0xac, 0x27, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+
+ /* ESP TRL */
+ 0x01, 0x02, 0x02, 0x04,
+
+ /* ICV */
+ 0x73, 0x8d, 0xf6, 0x9a, 0x26, 0x06, 0x4d, 0xa1,
+ 0x88, 0x37, 0x65, 0xab, 0x0d, 0xe9, 0x95, 0x3b,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_esp_tun_ipv6_null_sha256_1 = {
+ .len = 210,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 54,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x32, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* Inner IP */
+ 0x45, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x01, 0xac, 0x27, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+
+ /* ESP TRL */
+ 0x01, 0x02, 0x02, 0x04,
+
+ /* ICV */
+ 0x73, 0x8d, 0xf6, 0x9a, 0x26, 0x06, 0x4d, 0xa1,
+ 0x88, 0x37, 0x65, 0xab, 0x0d, 0xe9, 0x95, 0x3b,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_esp_udp_null_sha256_1 = {
+ .len = 178,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0xa4, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x11, 0xab, 0xf3, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* UDP encap */
+ 0x11, 0x94, 0x11, 0x94, 0x00, 0x90, 0x00, 0x00,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+
+ /* ESP TRL */
+ 0x01, 0x02, 0x02, 0x01,
+
+ /* ICV */
+ 0xe9, 0x81, 0xcd, 0x65, 0x9b, 0x25, 0x0b, 0x33,
+ 0xe2, 0xf3, 0x83, 0xf1, 0x6d, 0x14, 0xb4, 0x1f,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_esp_null_sha256_1_bad1 = {
+ .len = 170,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0xab, 0xda, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+
+ /* ESP TRL */
+ 0x01, 0x02, 0x02, 0x01,
+
+ /* ICV */
+ 0x18, 0x00, 0x14, 0x3a, 0x54, 0x72, 0x98, 0xe8,
+ 0xc7, 0x2d, 0xfa, 0xeb, 0x70, 0xe0, 0x24, 0xdf,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_esp_null_sha256_1235 = {
+ .len = 170,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0xab, 0xda, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x12, 0x35,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+
+ /* ESP TRL */
+ 0x01, 0x02, 0x02, 0x01,
+
+ /* ICV */
+ 0x2f, 0xfb, 0xdd, 0x9d, 0xc0, 0xca, 0xb8, 0x0a,
+ 0xaa, 0xf1, 0x59, 0x31, 0x4e, 0xef, 0x62, 0x50,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv4_icmp_0_esp_aes_cbc_null_1 = {
+ .len = 170,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0xab, 0xda, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* IV */
+ 0x96, 0xfa, 0x74, 0x56, 0x78, 0xe4, 0xbb, 0x0c,
+ 0x9e, 0x6e, 0x4a, 0xeb, 0x44, 0xd9, 0xf2, 0xe6,
+
+ /* data */
+ 0x2f, 0xb3, 0xa6, 0xfe, 0x2c, 0x2e, 0xce, 0x65,
+ 0x3a, 0x57, 0xe3, 0x09, 0x5d, 0x66, 0x36, 0x32,
+ 0xb1, 0xc2, 0x59, 0x58, 0xb6, 0xe5, 0x9e, 0xa2,
+ 0x07, 0xf8, 0x26, 0x4a, 0x64, 0xf5, 0x16, 0x01,
+ 0x51, 0x8e, 0xe5, 0x4b, 0x07, 0x2c, 0x4b, 0x23,
+ 0xfa, 0x4e, 0x6e, 0xdb, 0x35, 0xc7, 0x1d, 0x30,
+ 0x42, 0xd9, 0x0f, 0xba, 0x8a, 0x69, 0x7e, 0x29,
+ 0xe7, 0xbd, 0x15, 0xe9, 0x35, 0x9e, 0x81, 0xe7,
+ 0x9e, 0xc9, 0x7d, 0x66, 0x99, 0x58, 0xec, 0x45,
+ 0x29, 0xd0, 0xa4, 0xfd, 0xf1, 0xe7, 0x5b, 0x3e,
+ 0x2a, 0x77, 0x1d, 0x8f, 0x2b, 0x73, 0xba, 0xf8,
+ 0x72, 0xd2, 0xa0, 0x0b, 0x90, 0xb9, 0x73, 0x9c,
+ 0xde, 0x3c, 0xc3, 0xb8, 0x91, 0x97, 0xc4, 0x28,
+ 0xfa, 0x6d, 0xa8, 0x41, 0xb6, 0x83, 0xc8, 0xaa,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_esp_aes_cbc_sha1_1 = {
+ .len = 182,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0xa8, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0xab, 0xce, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* IV */
+ 0x17, 0xc3, 0xfa, 0xaf, 0x1d, 0xeb, 0x94, 0x06,
+ 0x4e, 0xf8, 0x62, 0xb4, 0x1f, 0xa0, 0x17, 0x62,
+
+ /* data */
+ 0xba, 0xf3, 0xfb, 0x10, 0x86, 0xee, 0x80, 0x6f,
+ 0x44, 0xff, 0x94, 0x7f, 0xee, 0xd8, 0x50, 0x62,
+ 0x40, 0x3f, 0x7c, 0x76, 0xb4, 0x65, 0xca, 0x32,
+ 0x91, 0x0e, 0xba, 0xf2, 0xc1, 0x9d, 0x3b, 0xcb,
+ 0x0f, 0xc9, 0xc9, 0xae, 0x33, 0x42, 0x16, 0x36,
+ 0xd3, 0xc8, 0x6c, 0x23, 0xac, 0xbf, 0x98, 0xf2,
+ 0xda, 0x10, 0x95, 0xbc, 0xe8, 0x38, 0xbf, 0x4b,
+ 0x19, 0xd0, 0x58, 0x67, 0xd9, 0xab, 0xd0, 0xf5,
+ 0x59, 0xc9, 0xdc, 0xbb, 0x46, 0xcc, 0x34, 0x26,
+ 0xe6, 0xd6, 0xee, 0x5c, 0xc8, 0xe2, 0x46, 0xc9,
+ 0x14, 0xe9, 0x98, 0xe4, 0xb9, 0xec, 0xf0, 0xa7,
+ 0x12, 0x94, 0x54, 0x4e, 0x56, 0xfd, 0xe8, 0x07,
+ 0xd8, 0x83, 0xf9, 0x78, 0x5f, 0xa6, 0x1a, 0xce,
+ 0xbb, 0xda, 0xbc, 0x7c, 0xd8, 0xb6, 0x7b, 0x4f,
+
+ /* ICV */
+ 0x78, 0x4e, 0xfe, 0xbd, 0x42, 0x7f, 0x42, 0x96,
+ 0x65, 0xe7, 0x60, 0x2f,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_esp_aes_cbc_sha256_1 = {
+ .len = 186,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0xac, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0xab, 0xca, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* IV */
+ 0x96, 0xfa, 0x74, 0x56, 0x78, 0xe4, 0xbb, 0x0c,
+ 0x9e, 0x6e, 0x4a, 0xeb, 0x44, 0xd9, 0xf2, 0xe6,
+
+ /* data */
+ 0x2f, 0xb3, 0xa6, 0xfe, 0x2c, 0x2e, 0xce, 0x65,
+ 0x3a, 0x57, 0xe3, 0x09, 0x5d, 0x66, 0x36, 0x32,
+ 0xb1, 0xc2, 0x59, 0x58, 0xb6, 0xe5, 0x9e, 0xa2,
+ 0x07, 0xf8, 0x26, 0x4a, 0x64, 0xf5, 0x16, 0x01,
+ 0x51, 0x8e, 0xe5, 0x4b, 0x07, 0x2c, 0x4b, 0x23,
+ 0xfa, 0x4e, 0x6e, 0xdb, 0x35, 0xc7, 0x1d, 0x30,
+ 0x42, 0xd9, 0x0f, 0xba, 0x8a, 0x69, 0x7e, 0x29,
+ 0xe7, 0xbd, 0x15, 0xe9, 0x35, 0x9e, 0x81, 0xe7,
+ 0x9e, 0xc9, 0x7d, 0x66, 0x99, 0x58, 0xec, 0x45,
+ 0x29, 0xd0, 0xa4, 0xfd, 0xf1, 0xe7, 0x5b, 0x3e,
+ 0x2a, 0x77, 0x1d, 0x8f, 0x2b, 0x73, 0xba, 0xf8,
+ 0x72, 0xd2, 0xa0, 0x0b, 0x90, 0xb9, 0x73, 0x9c,
+ 0xde, 0x3c, 0xc3, 0xb8, 0x91, 0x97, 0xc4, 0x28,
+ 0xfa, 0x6d, 0xa8, 0x41, 0xb6, 0x83, 0xc8, 0xaa,
+
+ /* IV */
+ 0x8a, 0x39, 0x10, 0x07, 0x02, 0x97, 0xbb, 0x1c,
+ 0x59, 0xb7, 0x70, 0x33, 0xa4, 0x26, 0xa2, 0xb8
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_esp_aes_cbc_sha384_1 = {
+ .len = 194,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0xab, 0xc2, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* IV */
+ 0xcd, 0xc7, 0x8d, 0x99, 0xf7, 0x65, 0x21, 0xf6,
+ 0x40, 0xe3, 0x4c, 0x5e, 0x90, 0x84, 0x4c, 0xf3,
+
+ /* data */
+ 0xb5, 0x9c, 0xa2, 0x3d, 0xb6, 0x09, 0x4f, 0x40,
+ 0x73, 0x4a, 0x33, 0x12, 0x90, 0xb2, 0xf1, 0x24,
+ 0x1f, 0xd3, 0xa3, 0x89, 0x53, 0x12, 0xb0, 0x98,
+ 0x6e, 0xec, 0xde, 0xb8, 0xf2, 0xbb, 0xe0, 0x03,
+ 0xee, 0x86, 0x1c, 0x2c, 0xe2, 0x12, 0x26, 0x89,
+ 0x4d, 0x8a, 0x6a, 0x89, 0xd0, 0x31, 0x68, 0x66,
+ 0xe8, 0x14, 0xe7, 0xd7, 0xaa, 0xd8, 0x2a, 0x61,
+ 0x03, 0x62, 0xb7, 0x46, 0x8e, 0x98, 0xa7, 0xfd,
+ 0x96, 0xe7, 0xbb, 0x5d, 0xf0, 0xc7, 0x42, 0xe1,
+ 0xef, 0x96, 0x1c, 0x79, 0xc0, 0xa4, 0x60, 0x69,
+ 0x2c, 0xc8, 0x02, 0x1f, 0xf4, 0xbf, 0x8f, 0xa4,
+ 0x0e, 0xb5, 0x35, 0xca, 0x51, 0x23, 0xc5, 0x62,
+ 0x13, 0x54, 0xbb, 0xcb, 0x2a, 0x4a, 0xdd, 0x79,
+ 0x32, 0x9f, 0x72, 0xa6, 0xeb, 0xe9, 0x04, 0x61,
+
+ /* ICV */
+ 0x79, 0xbc, 0xb6, 0x2d, 0xcc, 0x14, 0xc8, 0xea,
+ 0xfa, 0x5b, 0x57, 0x8d, 0x0a, 0xec, 0x56, 0xb7,
+ 0xca, 0xb2, 0x38, 0x9b, 0x05, 0x79, 0xf8, 0xdd,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_esp_aes_cbc_sha512_1 = {
+ .len = 202,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0xbc, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0xab, 0xba, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* IV */
+ 0xf9, 0x6b, 0x50, 0xa9, 0x7b, 0x4e, 0xc9, 0xdf,
+ 0x70, 0x29, 0xe2, 0x76, 0xdc, 0x12, 0x1e, 0x8f,
+
+ /* data */
+ 0x9a, 0x7c, 0x5b, 0x96, 0xcd, 0xcb, 0x76, 0x07,
+ 0xf3, 0xb0, 0x86, 0x31, 0xa4, 0xf0, 0xa3, 0xdb,
+ 0xb6, 0x08, 0x46, 0xd4, 0xb2, 0x2c, 0x15, 0x86,
+ 0xdf, 0x4e, 0xb9, 0xd2, 0x75, 0xb5, 0x18, 0x30,
+ 0x25, 0x15, 0x38, 0xbb, 0xbd, 0x17, 0x8b, 0x01,
+ 0xc6, 0xc4, 0x14, 0xe8, 0xe7, 0xc2, 0xc7, 0x63,
+ 0x70, 0x4d, 0xcb, 0x02, 0x95, 0x68, 0x36, 0x85,
+ 0x11, 0x66, 0x76, 0xa0, 0x73, 0xd4, 0xa9, 0x1c,
+ 0x33, 0xff, 0xe6, 0x04, 0x80, 0x47, 0x6d, 0xa4,
+ 0x63, 0x1a, 0x15, 0x89, 0x57, 0xb7, 0x39, 0x4f,
+ 0x61, 0x71, 0x8f, 0x4b, 0xaf, 0x3c, 0x31, 0x0d,
+ 0x9b, 0x1a, 0xea, 0x21, 0x38, 0xb8, 0x64, 0x89,
+ 0x96, 0x76, 0xc7, 0xd2, 0xfc, 0x8e, 0x36, 0x02,
+ 0x35, 0xfe, 0xde, 0x40, 0xc7, 0xd8, 0x60, 0x8d,
+
+ /* ICV */
+ 0xe8, 0x66, 0x6b, 0xb7, 0x4f, 0xb2, 0xa5, 0x08,
+ 0xf1, 0x76, 0x82, 0xa9, 0x3e, 0xed, 0x39, 0xac,
+ 0x17, 0x8f, 0xa8, 0xfe, 0x58, 0x4d, 0x40, 0xed,
+ 0xfe, 0xd9, 0x35, 0x60, 0x13, 0xb5, 0x20, 0xf8,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv4_icmp_0_esp_aes_ctr_null_1 = {
+ .len = 162,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x94, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0xab, 0xe2, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* IV */
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* data */
+ 0x39, 0xab, 0xe5, 0xae, 0x74, 0x57, 0x76, 0x7f,
+ 0x1d, 0x1f, 0xce, 0xe8, 0xca, 0xf1, 0x87, 0xf5,
+ 0xfd, 0x9e, 0x1d, 0x20, 0x38, 0x30, 0x8a, 0xe5,
+ 0xb9, 0x55, 0x80, 0x7b, 0xfd, 0x9d, 0xb9, 0x99,
+ 0x85, 0xcd, 0xb5, 0x30, 0x86, 0xaa, 0xe1, 0x7a,
+ 0x69, 0xe5, 0xfa, 0x38, 0xf3, 0x0f, 0x91, 0x18,
+ 0x75, 0x7b, 0x5f, 0x4e, 0x69, 0x17, 0xaa, 0xe7,
+ 0x84, 0x6c, 0x40, 0x31, 0xec, 0x87, 0x4c, 0x8c,
+ 0xb3, 0xb4, 0x9f, 0x7e, 0xea, 0x83, 0x6f, 0xc6,
+ 0x11, 0xd5, 0xce, 0xbe, 0x65, 0x37, 0x1c, 0xb6,
+ 0xd3, 0xcb, 0x51, 0xa8, 0xa4, 0x0e, 0x3e, 0xe6,
+ 0x26, 0xd8, 0x17, 0xec, 0x8b, 0xca, 0x79, 0x96,
+ 0xa0, 0xcd, 0x6f, 0xdd, 0x9e, 0xe9, 0x6a, 0xc0,
+ 0xf2, 0x6c, 0xdb, 0xfd, 0x99, 0xa2, 0xb5, 0xbf,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv4_icmp_0_ah_aes_gmac_128_1 = {
+ .len = 178,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0xa4, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x33, 0xab, 0xd1, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* AH */
+ 0x01, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xd6, 0x0e, 0xcc, 0x22, 0x31, 0x79, 0x59, 0x72,
+ 0x68, 0xc9, 0x58, 0xfb, 0x8b, 0xb0, 0xbb, 0xd5,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_esp_null_aes_gmac_128_1 = {
+ .len = 178,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0xa4, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0xab, 0xd2, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* IV */
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+
+ /* ESP TRL */
+ 0x01, 0x02, 0x02, 0x01,
+
+ /* ICV */
+ 0x16, 0x0e, 0xa6, 0x8f, 0xb3, 0xa6, 0x8c, 0x74,
+ 0x19, 0x59, 0x72, 0x80, 0x91, 0x98, 0x77, 0x5e,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv6_icmp_0 = {
+ .len = 170,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 62,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP v6 */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x74, 0x00, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* Hop-by-Hop */
+ 0x3a, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv6_icmp_0_ah_sha256_1 = {
+ .len = 202,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 62,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP v6 */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x94, 0x00, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* Hop-by-Hop */
+ 0x33, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,
+
+ /* AH */
+ 0x3a, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b,
+ 0x00, 0x00, 0x00, 0x01,
+ 0xd9, 0x14, 0x87, 0x27, 0x20, 0x1a, 0xc2, 0x66,
+ 0xc1, 0xca, 0x99, 0x2b, 0x8a, 0xae, 0x2f, 0x27,
+ 0x00, 0x00, 0x00, 0x00,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv6_icmp_0_ah_tun_ipv4_sha256_1 = {
+ .len = 218,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0xcc, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x33, 0x18, 0xfb, 0x0a, 0x00, 0x6f, 0x02,
+ 0x0a, 0x00, 0xde, 0x02,
+
+ /* AH */
+ 0x29, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x7f, 0xde, 0x8a, 0x48, 0xc5, 0xc5, 0xfa, 0x52,
+ 0xb8, 0xf6, 0xc2, 0xe3, 0x8f, 0x10, 0xb2, 0x47,
+
+ /* IP v6 */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x74, 0x00, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* Hop-by-Hop */
+ 0x3a, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv6_icmp_0_ah_tun_ipv6_sha256_1 = {
+ .len = 242,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 54,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x33, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* AH */
+ 0x29, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b,
+ 0x00, 0x00, 0x00, 0x01,
+ 0x62, 0x96, 0x2b, 0x40, 0x3e, 0x53, 0x76, 0x4a,
+ 0x4d, 0x7f, 0xf6, 0x22, 0x35, 0x3c, 0x74, 0xe2,
+ 0x00, 0x00, 0x00, 0x00,
+
+ /* IP v6 */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x74, 0x00, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* Hop-by-Hop */
+ 0x3a, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv6_icmp_0_esp_null_sha256_1 = {
+ .len = 198,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 62,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP v6 */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x90, 0x00, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* Hop-by-Hop */
+ 0x32, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+
+ /* ESP TRL */
+ 0x01, 0x02, 0x02, 0x3a,
+
+ /* ICV */
+ 0x20, 0xa6, 0x89, 0x7b, 0x0a, 0x52, 0x5b, 0xca,
+ 0x98, 0x56, 0xd1, 0xfe, 0x56, 0xc7, 0xa4, 0x5b,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv6_icmp_0_esp_tun_ipv4_null_sha256_1 = {
+ .len = 218,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0xcc, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0x18, 0xfc, 0x0a, 0x00, 0x6f, 0x02,
+ 0x0a, 0x00, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* IP v6 */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x74, 0x00, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* Hop-by-Hop */
+ 0x3a, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+
+ /* ESP TRL */
+ 0x01, 0x02, 0x02, 0x29,
+
+ /* ICV */
+ 0xd0, 0x96, 0x6e, 0xda, 0xc5, 0x08, 0xcc, 0x0e,
+ 0xd1, 0x22, 0xa5, 0xed, 0x13, 0x07, 0xd9, 0xcd,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv6_icmp_0_esp_tun_ipv6_null_sha256_1 = {
+ .len = 238,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 54,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x32, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* IP v6 */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x74, 0x00, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* Hop-by-Hop */
+ 0x3a, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+
+ /* ESP TRL */
+ 0x01, 0x02, 0x02, 0x29,
+
+ /* ICV */
+ 0xd0, 0x96, 0x6e, 0xda, 0xc5, 0x08, 0xcc, 0x0e,
+ 0xd1, 0x22, 0xa5, 0xed, 0x13, 0x07, 0xd9, 0xcd,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_rfc3602_5 = {
+ .len = 98,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x54, 0x08, 0xf2, 0x00, 0x00,
+ 0x40, 0x01, 0xf9, 0xfe, 0xc0, 0xa8, 0x7b, 0x03,
+ 0xc0, 0xa8, 0x7b, 0x64,
+
+ /* ICMP */
+ 0x08, 0x00, 0x0e, 0xbd, 0xa7, 0x0a, 0x00, 0x00,
+ 0x8e, 0x9c, 0x08, 0x3d, 0xb9, 0x5b, 0x07, 0x00,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_rfc3602_5_esp = {
+ .len = 138,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x7c, 0x08, 0xf2, 0x00, 0x00,
+ 0x40, 0x32, 0xf9, 0xa5, 0xc0, 0xa8, 0x7b, 0x03,
+ 0xc0, 0xa8, 0x7b, 0x64,
+
+ /* ESP */
+ 0x00, 0x00, 0x43, 0x21, 0x00, 0x00, 0x00, 0x01,
+
+ /* IV */
+ 0xe9, 0x6e, 0x8c, 0x08, 0xab, 0x46, 0x57, 0x63,
+ 0xfd, 0x09, 0x8d, 0x45, 0xdd, 0x3f, 0xf8, 0x93,
+
+ /* data */
+ 0xf6, 0x63, 0xc2, 0x5d, 0x32, 0x5c, 0x18, 0xc6,
+ 0xa9, 0x45, 0x3e, 0x19, 0x4e, 0x12, 0x08, 0x49,
+ 0xa4, 0x87, 0x0b, 0x66, 0xcc, 0x6b, 0x99, 0x65,
+ 0x33, 0x00, 0x13, 0xb4, 0x89, 0x8d, 0xc8, 0x56,
+ 0xa4, 0x69, 0x9e, 0x52, 0x3a, 0x55, 0xdb, 0x08,
+ 0x0b, 0x59, 0xec, 0x3a, 0x8e, 0x4b, 0x7e, 0x52,
+ 0x77, 0x5b, 0x07, 0xd1, 0xdb, 0x34, 0xed, 0x9c,
+ 0x53, 0x8a, 0xb5, 0x0c, 0x55, 0x1b, 0x87, 0x4a,
+ 0xa2, 0x69, 0xad, 0xd0, 0x47, 0xad, 0x2d, 0x59,
+ 0x13, 0xac, 0x19, 0xb7, 0xcf, 0xba, 0xd4, 0xa6,
+ },
+};
+
+static const ipsec_test_packet pkt_rfc3602_6 = {
+ .len = 62,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x30, 0x08, 0xfe, 0x00, 0x00,
+ 0x40, 0x01, 0xfa, 0x16, 0xc0, 0xa8, 0x7b, 0x03,
+ 0xc0, 0xa8, 0x7b, 0x64,
+
+ /* ICMP */
+ 0x08, 0x00, 0xb5, 0xe8, 0xa8, 0x0a, 0x05, 0x00,
+ 0xa6, 0x9c, 0x08, 0x3d, 0x0b, 0x66, 0x0e, 0x00,
+ 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77,
+ 0x77, 0x77, 0x77, 0x77,
+ },
+};
+
+static const ipsec_test_packet pkt_rfc3602_6_esp = {
+ .len = 90,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x4c, 0x08, 0xfe, 0x00, 0x00,
+ 0x40, 0x32, 0xf9, 0xc9, 0xc0, 0xa8, 0x7b, 0x03,
+ 0xc0, 0xa8, 0x7b, 0x64,
+
+ /* ESP */
+ 0x00, 0x00, 0x43, 0x21, 0x00, 0x00, 0x00, 0x08,
+
+ /* IV */
+ 0x69, 0xd0, 0x8d, 0xf7, 0xd2, 0x03, 0x32, 0x9d,
+ 0xb0, 0x93, 0xfc, 0x49, 0x24, 0xe5, 0xbd, 0x80,
+
+ /* data */
+ 0xf5, 0x19, 0x95, 0x88, 0x1e, 0xc4, 0xe0, 0xc4,
+ 0x48, 0x89, 0x87, 0xce, 0x74, 0x2e, 0x81, 0x09,
+ 0x68, 0x9b, 0xb3, 0x79, 0xd2, 0xd7, 0x50, 0xc0,
+ 0xd9, 0x15, 0xdc, 0xa3, 0x46, 0xa8, 0x9f, 0x75,
+ },
+};
+
+static const ipsec_test_packet pkt_rfc3602_7 = {
+ .len = 98,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x54, 0x09, 0x04, 0x00, 0x00,
+ 0x40, 0x01, 0xf9, 0x88, 0xc0, 0xa8, 0x7b, 0x03,
+ 0xc0, 0xa8, 0x7b, 0xc8,
+
+ /* ICMP */
+ 0x08, 0x00, 0x9f, 0x76, 0xa9, 0x0a, 0x01, 0x00,
+ 0xb4, 0x9c, 0x08, 0x3d, 0x02, 0xa2, 0x04, 0x00,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ },
+};
+
+static const ipsec_test_packet pkt_rfc3602_7_esp = {
+ .len = 154,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x8c, 0x09, 0x05, 0x00, 0x00,
+ 0x40, 0x32, 0xf9, 0x1e, 0xc0, 0xa8, 0x7b, 0x03,
+ 0xc0, 0xa8, 0x7b, 0xc8,
+
+ /* ESP */
+ 0x00, 0x00, 0x87, 0x65, 0x00, 0x00, 0x00, 0x02,
+
+ /* IV */
+ 0xf4, 0xe7, 0x65, 0x24, 0x4f, 0x64, 0x07, 0xad,
+ 0xf1, 0x3d, 0xc1, 0x38, 0x0f, 0x67, 0x3f, 0x37,
+
+ /* data */
+ 0x77, 0x3b, 0x52, 0x41, 0xa4, 0xc4, 0x49, 0x22,
+ 0x5e, 0x4f, 0x3c, 0xe5, 0xed, 0x61, 0x1b, 0x0c,
+ 0x23, 0x7c, 0xa9, 0x6c, 0xf7, 0x4a, 0x93, 0x01,
+ 0x3c, 0x1b, 0x0e, 0xa1, 0xa0, 0xcf, 0x70, 0xf8,
+ 0xe4, 0xec, 0xae, 0xc7, 0x8a, 0xc5, 0x3a, 0xad,
+ 0x7a, 0x0f, 0x02, 0x2b, 0x85, 0x92, 0x43, 0xc6,
+ 0x47, 0x75, 0x2e, 0x94, 0xa8, 0x59, 0x35, 0x2b,
+ 0x8a, 0x4d, 0x4d, 0x2d, 0xec, 0xd1, 0x36, 0xe5,
+ 0xc1, 0x77, 0xf1, 0x32, 0xad, 0x3f, 0xbf, 0xb2,
+ 0x20, 0x1a, 0xc9, 0x90, 0x4c, 0x74, 0xee, 0x0a,
+ 0x10, 0x9e, 0x0c, 0xa1, 0xe4, 0xdf, 0xe9, 0xd5,
+ 0xa1, 0x00, 0xb8, 0x42, 0xf1, 0xc2, 0x2f, 0x0d,
+ },
+};
+
+static const ipsec_test_packet pkt_rfc3602_8 = {
+ .len = 82,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x44, 0x09, 0x0c, 0x00, 0x00,
+ 0x40, 0x01, 0xf9, 0x90, 0xc0, 0xa8, 0x7b, 0x03,
+ 0xc0, 0xa8, 0x7b, 0xc8,
+
+ /* ICMP */
+ 0x08, 0x00, 0xd6, 0x3c, 0xaa, 0x0a, 0x02, 0x00,
+ 0xc6, 0x9c, 0x08, 0x3d, 0xa3, 0xde, 0x03, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ },
+};
+
+static const ipsec_test_packet pkt_rfc3602_8_esp = {
+ .len = 138,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x7c, 0x09, 0x0d, 0x00, 0x00,
+ 0x40, 0x32, 0xf9, 0x26, 0xc0, 0xa8, 0x7b, 0x03,
+ 0xc0, 0xa8, 0x7b, 0xc8,
+
+ /* ESP */
+ 0x00, 0x00, 0x87, 0x65, 0x00, 0x00, 0x00, 0x05,
+
+ /* IV */
+ 0x85, 0xd4, 0x72, 0x24, 0xb5, 0xf3, 0xdd, 0x5d,
+ 0x21, 0x01, 0xd4, 0xea, 0x8d, 0xff, 0xab, 0x22,
+
+ /* data */
+ 0x15, 0xb9, 0x26, 0x83, 0x81, 0x95, 0x96, 0xa8,
+ 0x04, 0x72, 0x32, 0xcc, 0x00, 0xf7, 0x04, 0x8f,
+ 0xe4, 0x53, 0x18, 0xe1, 0x1f, 0x8a, 0x0f, 0x62,
+ 0xed, 0xe3, 0xc3, 0xfc, 0x61, 0x20, 0x3b, 0xb5,
+ 0x0f, 0x98, 0x0a, 0x08, 0xc9, 0x84, 0x3f, 0xd3,
+ 0xa1, 0xb0, 0x6d, 0x5c, 0x07, 0xff, 0x96, 0x39,
+ 0xb7, 0xeb, 0x7d, 0xfb, 0x35, 0x12, 0xe5, 0xde,
+ 0x43, 0x5e, 0x72, 0x07, 0xed, 0x97, 0x1e, 0xf3,
+ 0xd2, 0x72, 0x6d, 0x9b, 0x5e, 0xf6, 0xaf, 0xfc,
+ 0x6d, 0x17, 0xa0, 0xde, 0xcb, 0xb1, 0x38, 0x92,
+ },
+};
+
+/*
+ * Several tests from draft-mcgrew-gcm-test-01. It was never completed as an
+ * RFC, but serves good purpopse anyway.
+ *
+ * Note: plaintext texts also contain ESP trailers, which we
+ * do not include here into plaintext packets.
+ */
+static const ipsec_test_packet pkt_mcgrew_gcm_test_2 = {
+ .len = 76,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x3e, 0x69, 0x8f, 0x00, 0x00,
+ 0x80, 0x11, 0x4d, 0xcc, 0xc0, 0xa8, 0x01, 0x02,
+ 0xc0, 0xa8, 0x01, 0x01,
+
+ /* UDP */
+ 0x0a, 0x98, 0x00, 0x35, 0x00, 0x2a, 0x23, 0x43,
+ 0xb2, 0xd0, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x73, 0x69, 0x70,
+ 0x09, 0x63, 0x79, 0x62, 0x65, 0x72, 0x63, 0x69,
+ 0x74, 0x79, 0x02, 0x64, 0x6b, 0x00, 0x00, 0x01,
+ 0x00, 0x01,
+ },
+};
+
+static const ipsec_test_packet pkt_mcgrew_gcm_test_2_esp = {
+ .len = 130,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP - not a part of RFC, added for simplicity */
+ 0x45, 0x00, 0x00, 0x74, 0x69, 0x8f, 0x00, 0x00,
+ 0x80, 0x32, 0x4d, 0x75, 0xc0, 0xa8, 0x01, 0x02,
+ 0xc0, 0xa8, 0x01, 0x01,
+
+ /* ESP */
+ 0x00, 0x00, 0xa5, 0xf8, 0x00, 0x00, 0x00, 0x0a,
+
+ /* IV */
+ 0xfa, 0xce, 0xdb, 0xad, 0xde, 0xca, 0xf8, 0x88,
+
+ /* Data */
+ 0xde, 0xb2, 0x2c, 0xd9, 0xb0, 0x7c, 0x72, 0xc1,
+ 0x6e, 0x3a, 0x65, 0xbe, 0xeb, 0x8d, 0xf3, 0x04,
+ 0xa5, 0xa5, 0x89, 0x7d, 0x33, 0xae, 0x53, 0x0f,
+ 0x1b, 0xa7, 0x6d, 0x5d, 0x11, 0x4d, 0x2a, 0x5c,
+ 0x3d, 0xe8, 0x18, 0x27, 0xc1, 0x0e, 0x9a, 0x4f,
+ 0x51, 0x33, 0x0d, 0x0e, 0xec, 0x41, 0x66, 0x42,
+ 0xcf, 0xbb, 0x85, 0xa5, 0xb4, 0x7e, 0x48, 0xa4,
+ 0xec, 0x3b, 0x9b, 0xa9, 0x5d, 0x91, 0x8b, 0xd4,
+ 0x26, 0xf8, 0x39, 0x1b, 0x99, 0x27, 0xd0, 0xfc,
+ 0xc9, 0x84, 0x56, 0x1b, 0xbb, 0xce, 0x9f, 0xc0,
+ },
+};
+
+static const ipsec_test_packet pkt_mcgrew_gcm_test_3 = {
+ .len = 62,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x30, 0x69, 0xa6, 0x40, 0x00,
+ 0x80, 0x06, 0x26, 0x90, 0xc0, 0xa8, 0x01, 0x02,
+ 0x93, 0x89, 0x15, 0x5e,
+
+ /* TCP */
+ 0x0a, 0x9e, 0x00, 0x8b, 0x2d, 0xc5, 0x7e, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x02, 0x40, 0x00,
+ 0x20, 0xbf, 0x00, 0x00, 0x02, 0x04, 0x05, 0xb4,
+ 0x01, 0x01, 0x04, 0x02,
+ },
+};
+
+static const ipsec_test_packet pkt_mcgrew_gcm_test_3_esp = {
+ .len = 118,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP - not a part of RFC, added for simplicity */
+ 0x45, 0x00, 0x00, 0x68, 0x69, 0x8f, 0x00, 0x00,
+ 0x80, 0x32, 0x4d, 0x81, 0xc0, 0xa8, 0x01, 0x02,
+ 0xc0, 0xa8, 0x01, 0x01,
+
+ /* ESP */
+ 0x4a, 0x2c, 0xbf, 0xe3, 0x00, 0x00, 0x00, 0x02,
+
+ /* IV */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+
+ /* Data */
+ 0xff, 0x42, 0x5c, 0x9b, 0x72, 0x45, 0x99, 0xdf,
+ 0x7a, 0x3b, 0xcd, 0x51, 0x01, 0x94, 0xe0, 0x0d,
+ 0x6a, 0x78, 0x10, 0x7f, 0x1b, 0x0b, 0x1c, 0xbf,
+ 0x06, 0xef, 0xae, 0x9d, 0x65, 0xa5, 0xd7, 0x63,
+ 0x74, 0x8a, 0x63, 0x79, 0x85, 0x77, 0x1d, 0x34,
+ 0x7f, 0x05, 0x45, 0x65, 0x9f, 0x14, 0xe9, 0x9d,
+ 0xef, 0x84, 0x2d, 0x8b, 0x42, 0xf5, 0x64, 0xf5,
+ 0x2d, 0xfd, 0xd6, 0xee, 0xf4, 0xf9, 0x2e, 0xad,
+ 0xba, 0xc2, 0x39, 0x90,
+ },
+};
+
+static const ipsec_test_packet pkt_mcgrew_gcm_test_4 = {
+ .len = 74,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x3c, 0x99, 0xc5, 0x00, 0x00,
+ 0x80, 0x01, 0xcb, 0x7a, 0x40, 0x67, 0x93, 0x18,
+ 0x01, 0x01, 0x01, 0x01,
+
+ /* ICMP */
+ 0x08, 0x00, 0x07, 0x5c, 0x02, 0x00, 0x44, 0x00,
+ 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
+ 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x61,
+ 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ },
+};
+
+static const ipsec_test_packet pkt_mcgrew_gcm_test_4_esp = {
+ .len = 130,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP - not a part of RFC, added for simplicity */
+ 0x45, 0x00, 0x00, 0x74, 0x69, 0x8f, 0x00, 0x00,
+ 0x80, 0x32, 0x4d, 0x75, 0xc0, 0xa8, 0x01, 0x02,
+ 0xc0, 0xa8, 0x01, 0x01,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+
+ /* IV */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* Data */
+ 0x46, 0x88, 0xda, 0xf2, 0xf9, 0x73, 0xa3, 0x92,
+ 0x73, 0x29, 0x09, 0xc3, 0x31, 0xd5, 0x6d, 0x60,
+ 0xf6, 0x94, 0xab, 0xaa, 0x41, 0x4b, 0x5e, 0x7f,
+ 0xf5, 0xfd, 0xcd, 0xff, 0xf5, 0xe9, 0xa2, 0x84,
+ 0x45, 0x64, 0x76, 0x49, 0x27, 0x19, 0xff, 0xb6,
+ 0x4d, 0xe7, 0xd9, 0xdc, 0xa1, 0xe1, 0xd8, 0x94,
+ 0xbc, 0x3b, 0xd5, 0x78, 0x73, 0xed, 0x4d, 0x18,
+ 0x1d, 0x19, 0xd4, 0xd5, 0xc8, 0xc1, 0x8a, 0xf6,
+ 0xfe, 0x1d, 0x73, 0x72, 0x22, 0x8a, 0x69, 0xf4,
+ 0x0d, 0xeb, 0x37, 0x3d, 0xdc, 0x01, 0x67, 0x6b,
+ },
+};
+
+static const ipsec_test_packet pkt_mcgrew_gcm_test_12 = {
+ .len = 14,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = ODP_PACKET_OFFSET_INVALID,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+ },
+};
+
+static const ipsec_test_packet pkt_mcgrew_gcm_test_12_notun = {
+ .len = 34,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP - not a part of RFC, added for simplicity */
+ 0x45, 0x00, 0x00, 0x14, 0x69, 0x8f, 0x00, 0x00,
+ 0x80, 0x3b, 0x4d, 0xcc, 0xc0, 0xa8, 0x01, 0x02,
+ 0xc0, 0xa8, 0x01, 0x01,
+ },
+};
+
+static const ipsec_test_packet pkt_mcgrew_gcm_test_12_esp = {
+ .len = 70,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP - not a part of RFC, added for simplicity */
+ 0x45, 0x00, 0x00, 0x38, 0x69, 0x8f, 0x00, 0x00,
+ 0x80, 0x32, 0x4d, 0xb2, 0xc0, 0xa8, 0x01, 0x02,
+ 0xc0, 0xa8, 0x01, 0x01,
+
+ /* ESP */
+ 0x33, 0x54, 0x67, 0xae, 0xff, 0xff, 0xff, 0xff,
+
+ /* IV */
+ 0x43, 0x45, 0x7e, 0x91, 0x82, 0x44, 0x3b, 0xc6,
+
+ /* Data */
+ 0x43, 0x7f, 0x86, 0x51, 0x7e, 0xa5, 0x95, 0xd2,
+ 0xca, 0x00, 0x4c, 0x33, 0x38, 0x8c, 0x46, 0x77,
+ 0x0c, 0x59, 0x0a, 0xd6,
+ },
+};
+
+static const ipsec_test_packet pkt_mcgrew_gcm_test_15 = {
+ .len = 62,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x30, 0xda, 0x3a, 0x00, 0x00,
+ 0x80, 0x01, 0xdf, 0x3b, 0xc0, 0xa8, 0x00, 0x05,
+ 0xc0, 0xa8, 0x00, 0x01,
+
+ /* ICMP */
+ 0x08, 0x00, 0xc6, 0xcd, 0x02, 0x00, 0x07, 0x00,
+ 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
+ 0x71, 0x72, 0x73, 0x74,
+ },
+};
+
+static const ipsec_test_packet pkt_mcgrew_gcm_test_15_esp = {
+ .len = 118,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP - not a part of RFC, added for simplicity */
+ 0x45, 0x00, 0x00, 0x68, 0x69, 0x8f, 0x00, 0x00,
+ 0x80, 0x32, 0x4d, 0x81, 0xc0, 0xa8, 0x01, 0x02,
+ 0xc0, 0xa8, 0x01, 0x01,
+
+ /* ESP */
+ 0x00, 0x00, 0x43, 0x21, 0x00, 0x00, 0x00, 0x07,
+
+ /* IV */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x30, 0xda, 0x3a, 0x00, 0x00,
+ 0x80, 0x01, 0xdf, 0x3b, 0xc0, 0xa8, 0x00, 0x05,
+ 0xc0, 0xa8, 0x00, 0x01, 0x08, 0x00, 0xc6, 0xcd,
+ 0x02, 0x00, 0x07, 0x00, 0x61, 0x62, 0x63, 0x64,
+ 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74,
+ 0x01, 0x02, 0x02, 0x04, 0x59, 0x4f, 0x40, 0x55,
+ 0x42, 0x8d, 0x39, 0x9a, 0x9d, 0x66, 0xc1, 0x5e,
+ 0x77, 0x02, 0x3a, 0x98,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv4_rfc7634 = {
+ .len = 98,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x54, 0xa6, 0xf2, 0x00, 0x00,
+ 0x40, 0x01, 0xe7, 0x78, 0xc6, 0x33, 0x64, 0x05,
+ 0xc0, 0x00, 0x02, 0x05,
+
+ /* ICMP */
+ 0x08, 0x00, 0x5b, 0x7a, 0x3a, 0x08, 0x00, 0x00,
+ 0x55, 0x3b, 0xec, 0x10, 0x00, 0x07, 0x36, 0x27,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet pkt_ipv4_rfc7634_esp = {
+ .len = 154,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x8c, 0x23, 0x45, 0x00, 0x00,
+ 0x40, 0x32, 0xde, 0x5b, 0xcb, 0x00, 0x71, 0x99,
+ 0xcb, 0x00, 0x71, 0x05,
+
+ /* ESP */
+ 0x01, 0x02, 0x03, 0x04, 0x00, 0x00, 0x00, 0x05,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x24, 0x03, 0x94, 0x28, 0xb9, 0x7f, 0x41, 0x7e,
+ 0x3c, 0x13, 0x75, 0x3a, 0x4f, 0x05, 0x08, 0x7b,
+ 0x67, 0xc3, 0x52, 0xe6, 0xa7, 0xfa, 0xb1, 0xb9,
+ 0x82, 0xd4, 0x66, 0xef, 0x40, 0x7a, 0xe5, 0xc6,
+ 0x14, 0xee, 0x80, 0x99, 0xd5, 0x28, 0x44, 0xeb,
+ 0x61, 0xaa, 0x95, 0xdf, 0xab, 0x4c, 0x02, 0xf7,
+ 0x2a, 0xa7, 0x1e, 0x7c, 0x4c, 0x4f, 0x64, 0xc9,
+ 0xbe, 0xfe, 0x2f, 0xac, 0xc6, 0x38, 0xe8, 0xf3,
+ 0xcb, 0xec, 0x16, 0x3f, 0xac, 0x46, 0x9b, 0x50,
+ 0x27, 0x73, 0xf6, 0xfb, 0x94, 0xe6, 0x64, 0xda,
+ 0x91, 0x65, 0xb8, 0x28, 0x29, 0xf6, 0x41, 0xe0,
+ 0x76, 0xaa, 0xa8, 0x26, 0x6b, 0x7f, 0xb0, 0xf7,
+ 0xb1, 0x1b, 0x36, 0x99, 0x07, 0xe1, 0xad, 0x43,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv6_icmp_0_esp_udp_null_sha256_1 = {
+ .len = 206,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 62,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x86, 0xdd,
+
+ /* IP v6 */
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x98, 0x00, 0x40,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x11, 0x43, 0xff, 0xfe, 0x4a, 0xd7, 0x0a,
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+
+ /* Hop-by-Hop */
+ 0x11, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,
+
+ /* UDP encap */
+ 0x11, 0x94, 0x11, 0x94, 0x00, 0x90, 0x00, 0x00,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* ICMP */
+ 0x08, 0x00, 0xfb, 0x37, 0x12, 0x34, 0x00, 0x00,
+ 0xba, 0xbe, 0x01, 0x23, 0x45, 0x67, 0xca, 0xfe,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b,
+
+ /* ESP TRL */
+ 0x01, 0x02, 0x02, 0x3a,
+
+ /* ICV */
+ 0x20, 0xa6, 0x89, 0x7b, 0x0a, 0x52, 0x5b, 0xca,
+ 0x98, 0x56, 0xd1, 0xfe, 0x56, 0xc7, 0xa4, 0x5b,
+ },
+};
+
+static const ipsec_test_packet pkt_test_empty = {
+ .len = 1,
+ .l2_offset = ODP_PACKET_OFFSET_INVALID,
+ .l3_offset = ODP_PACKET_OFFSET_INVALID,
+ .l4_offset = ODP_PACKET_OFFSET_INVALID,
+ .data = { 0 },
+};
+
+static const ipsec_test_packet pkt_test_nodata = {
+ .len = 14,
+ .l2_offset = 0,
+ .l3_offset = ODP_PACKET_OFFSET_INVALID,
+ .l4_offset = ODP_PACKET_OFFSET_INVALID,
+ .data = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
+ 0x0a, 0x0b, 0x0c, 0x0d,
+ },
+};
+
+static const ipsec_test_packet pkt_ipv4_udp = {
+ .len = 76,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x3e, 0x69, 0x8f, 0x00, 0x00,
+ 0x80, 0x11, 0x00, 0x00, 0xc0, 0xa8, 0x01, 0x02,
+ 0xc0, 0xa8, 0x01, 0x01,
+
+ /* UDP */
+ 0x0a, 0x98, 0x00, 0x35, 0x00, 0x2a, 0x00, 0x00,
+ 0xb2, 0xd0, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x73, 0x69, 0x70,
+ 0x09, 0x63, 0x79, 0x62, 0x65, 0x72, 0x63, 0x69,
+ 0x74, 0x79, 0x02, 0x64, 0x6b, 0x00, 0x00, 0x01,
+ 0x00, 0x01,
+ },
+};
+
+static const ipsec_test_packet pkt_ipv4_udp_esp_null_sha256 = {
+ .len = 102,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH - not a part of RFC, added for simplicity */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x58, 0x69, 0x8f, 0x00, 0x00,
+ 0x80, 0x32, 0x4d, 0x91, 0xc0, 0xa8, 0x01, 0x02,
+ 0xc0, 0xa8, 0x01, 0x01,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* UDP */
+ 0x0a, 0x98, 0x00, 0x35, 0x00, 0x2a, 0x23, 0x43,
+ 0xb2, 0xd0, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x73, 0x69, 0x70,
+ 0x09, 0x63, 0x79, 0x62, 0x65, 0x72, 0x63, 0x69,
+ 0x74, 0x79, 0x02, 0x64, 0x6b, 0x00, 0x00, 0x01,
+ 0x00, 0x01,
+
+ /* ESP TRL */
+ 0x00, 0x11, 0x2d, 0x4a, 0x06, 0x9f, 0x97, 0xcf,
+ 0xa3, 0x05, 0xea, 0x90, 0x7a, 0xf6, 0x6b, 0x0a,
+ 0x3f, 0xc7,
+ },
+};
+
+static const ipsec_test_packet pkt_ipv4_null_aes_xcbc_esp = {
+ .len = 106,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x5c, 0x06, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0x13, 0x6c, 0x0a, 0x00, 0x6f, 0x02,
+ 0x0a, 0x00, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x2f, 0x49, 0x37, 0x00, 0x00,
+ 0x40, 0x11, 0x22, 0x84, 0x0d, 0x00, 0x00, 0x02,
+ 0x02, 0x00, 0x00, 0x02, 0x08, 0x00, 0x08, 0x00,
+ 0x00, 0x1b, 0x6d, 0x99, 0x58, 0x58, 0x58, 0x58,
+ 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58,
+ 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58,
+
+ /* ESP trailer */
+ 0x01, 0x02, 0x03, 0x03, 0x04,
+
+ /* ICV */
+ 0xf1, 0x52, 0x64, 0xd1, 0x9b, 0x62, 0x24, 0xdd,
+ 0xcc, 0x14, 0xf5, 0xc1,
+ },
+};
+
+static const ipsec_test_packet pkt_ipv4_null_aes_xcbc_plain = {
+ .len = 61,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0x2f, 0x49, 0x37, 0x00, 0x00,
+ 0x40, 0x11, 0x22, 0x84, 0x0d, 0x00, 0x00, 0x02,
+ 0x02, 0x00, 0x00, 0x02, 0x08, 0x00, 0x08, 0x00,
+ 0x00, 0x1b, 0x6d, 0x99, 0x58, 0x58, 0x58, 0x58,
+ 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58,
+ 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58,
+ },
+};
+
+#endif
diff --git a/test/common_plat/validation/api/lock/.gitignore b/test/validation/api/lock/.gitignore
index ff16646f4..ff16646f4 100644
--- a/test/common_plat/validation/api/lock/.gitignore
+++ b/test/validation/api/lock/.gitignore
diff --git a/test/validation/api/lock/Makefile.am b/test/validation/api/lock/Makefile.am
new file mode 100644
index 000000000..ad75e1854
--- /dev/null
+++ b/test/validation/api/lock/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = lock_main
+lock_main_SOURCES = lock.c
diff --git a/test/common_plat/validation/api/lock/lock.c b/test/validation/api/lock/lock.c
index bd9a2aad2..a4e6932c4 100644
--- a/test/common_plat/validation/api/lock/lock.c
+++ b/test/validation/api/lock/lock.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -6,13 +6,13 @@
#include <malloc.h>
#include <odp_api.h>
-#include <CUnit/Basic.h>
+#include <odp/helper/odph_api.h>
#include <odp_cunit_common.h>
#include <unistd.h>
-#include "lock.h"
#define VERBOSE 0
+#define MAX_WORKERS 32
#define MIN_ITERATIONS 1000
#define MAX_ITERATIONS 30000
#define ITER_MPLY_FACTOR 3
@@ -257,7 +257,7 @@ static int ticketlock_api_tests(void *arg UNUSED)
static void rwlock_api_test(odp_rwlock_t *rw_lock)
{
- int rc;
+ int rc = 0;
odp_rwlock_init(rw_lock);
/* CU_ASSERT(odp_rwlock_is_locked(rw_lock) == 0); */
@@ -265,23 +265,40 @@ static void rwlock_api_test(odp_rwlock_t *rw_lock)
odp_rwlock_read_lock(rw_lock);
rc = odp_rwlock_read_trylock(rw_lock);
- CU_ASSERT(rc == 0);
+ CU_ASSERT(rc != 0);
+ if (rc == 1)
+ odp_rwlock_read_unlock(rw_lock);
+
rc = odp_rwlock_write_trylock(rw_lock);
CU_ASSERT(rc == 0);
+ if (rc == 1)
+ odp_rwlock_write_unlock(rw_lock);
odp_rwlock_read_unlock(rw_lock);
rc = odp_rwlock_read_trylock(rw_lock);
+ CU_ASSERT(rc != 0);
if (rc == 1)
odp_rwlock_read_unlock(rw_lock);
odp_rwlock_write_lock(rw_lock);
/* CU_ASSERT(odp_rwlock_is_locked(rw_lock) == 1); */
+ rc = odp_rwlock_read_trylock(rw_lock);
+ CU_ASSERT(rc == 0);
+ if (rc == 1)
+ odp_rwlock_read_unlock(rw_lock);
+
+ rc = odp_rwlock_write_trylock(rw_lock);
+ CU_ASSERT(rc == 0);
+ if (rc == 1)
+ odp_rwlock_write_unlock(rw_lock);
+
odp_rwlock_write_unlock(rw_lock);
/* CU_ASSERT(odp_rwlock_is_locked(rw_lock) == 0); */
rc = odp_rwlock_write_trylock(rw_lock);
+ CU_ASSERT(rc != 0);
if (rc == 1)
odp_rwlock_write_unlock(rw_lock);
}
@@ -383,7 +400,7 @@ static int no_lock_functional_test(void *arg UNUSED)
lock_owner_delay = BASE_DELAY;
/*
- * Tunning the iteration number:
+ * Tuning the iteration number:
* Here, we search for an iteration number that guarantees to show
* race conditions between the odp threads.
* Iterations is set to ITER_MPLY_FACTOR * cnt where cnt is when
@@ -453,7 +470,7 @@ static int no_lock_functional_test(void *arg UNUSED)
CU_ASSERT(sync_failures != 0 || global_mem->g_num_threads == 1);
/*
- * set the iterration for the future tests to be far above the
+ * set the iteration for the future tests to be far above the
* contention level
*/
iterations *= ITER_MPLY_FACTOR;
@@ -552,12 +569,12 @@ static int spinlock_functional_test(void *arg UNUSED)
if ((global_mem->g_verbose) &&
((sync_failures != 0) || (is_locked_errs != 0)))
- printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
- " sync_failures and %" PRIu32
- " is_locked_errs in %" PRIu32
- " iterations\n", thread_num,
- per_thread_mem->thread_id, per_thread_mem->thread_core,
- sync_failures, is_locked_errs, iterations);
+ ODPH_ERR("Thread %" PRIu32 " (id=%d core=%d) had %" PRIu32
+ " sync_failures and %" PRIu32
+ " is_locked_errs in %" PRIu32
+ " iterations\n", thread_num,
+ per_thread_mem->thread_id, per_thread_mem->thread_core,
+ sync_failures, is_locked_errs, iterations);
CU_ASSERT(sync_failures == 0);
CU_ASSERT(is_locked_errs == 0);
@@ -658,14 +675,14 @@ static int spinlock_recursive_functional_test(void *arg UNUSED)
if ((global_mem->g_verbose) &&
(sync_failures != 0 || recursive_errs != 0 || is_locked_errs != 0))
- printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
- " sync_failures and %" PRIu32
- " recursive_errs and %" PRIu32
- " is_locked_errs in %" PRIu32
- " iterations\n", thread_num,
- per_thread_mem->thread_id, per_thread_mem->thread_core,
- sync_failures, recursive_errs, is_locked_errs,
- iterations);
+ ODPH_ERR("Thread %" PRIu32 " (id=%d core=%d) had %" PRIu32
+ " sync_failures and %" PRIu32
+ " recursive_errs and %" PRIu32
+ " is_locked_errs in %" PRIu32
+ " iterations\n", thread_num,
+ per_thread_mem->thread_id, per_thread_mem->thread_core,
+ sync_failures, recursive_errs, is_locked_errs,
+ iterations);
CU_ASSERT(sync_failures == 0);
CU_ASSERT(recursive_errs == 0);
@@ -748,12 +765,12 @@ static int ticketlock_functional_test(void *arg UNUSED)
if ((global_mem->g_verbose) &&
((sync_failures != 0) || (is_locked_errs != 0)))
- printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
- " sync_failures and %" PRIu32
- " is_locked_errs in %" PRIu32 " iterations\n",
- thread_num,
- per_thread_mem->thread_id, per_thread_mem->thread_core,
- sync_failures, is_locked_errs, iterations);
+ ODPH_ERR("Thread %" PRIu32 " (id=%d core=%d) had %" PRIu32
+ " sync_failures and %" PRIu32
+ " is_locked_errs in %" PRIu32 " iterations\n",
+ thread_num,
+ per_thread_mem->thread_id, per_thread_mem->thread_core,
+ sync_failures, is_locked_errs, iterations);
CU_ASSERT(sync_failures == 0);
CU_ASSERT(is_locked_errs == 0);
@@ -841,11 +858,11 @@ static int rwlock_functional_test(void *arg UNUSED)
}
if ((global_mem->g_verbose) && (sync_failures != 0))
- printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
- " sync_failures in %" PRIu32 " iterations\n", thread_num,
- per_thread_mem->thread_id,
- per_thread_mem->thread_core,
- sync_failures, iterations);
+ ODPH_ERR("Thread %" PRIu32 " (id=%d core=%d) had %" PRIu32
+ " sync_failures in %" PRIu32 " iterations\n", thread_num,
+ per_thread_mem->thread_id,
+ per_thread_mem->thread_core,
+ sync_failures, iterations);
CU_ASSERT(sync_failures == 0);
@@ -968,13 +985,13 @@ static int rwlock_recursive_functional_test(void *arg UNUSED)
}
if ((global_mem->g_verbose) && (sync_failures != 0))
- printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
- " sync_failures and %" PRIu32
- " recursive_errs in %" PRIu32
- " iterations\n", thread_num,
- per_thread_mem->thread_id,
- per_thread_mem->thread_core,
- sync_failures, recursive_errs, iterations);
+ ODPH_ERR("Thread %" PRIu32 " (id=%d core=%d) had %" PRIu32
+ " sync_failures and %" PRIu32
+ " recursive_errs in %" PRIu32
+ " iterations\n", thread_num,
+ per_thread_mem->thread_id,
+ per_thread_mem->thread_core,
+ sync_failures, recursive_errs, iterations);
CU_ASSERT(sync_failures == 0);
CU_ASSERT(recursive_errs == 0);
@@ -985,13 +1002,12 @@ static int rwlock_recursive_functional_test(void *arg UNUSED)
}
/* Thread-unsafe tests */
-void lock_test_no_lock_functional(void)
+static void lock_test_no_lock_functional(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
- odp_cunit_thread_create(no_lock_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, no_lock_functional_test, NULL, 0, 0);
+ odp_cunit_thread_join(num);
}
odp_testinfo_t lock_suite_no_locking[] = {
@@ -1000,42 +1016,38 @@ odp_testinfo_t lock_suite_no_locking[] = {
};
/* Spin lock tests */
-void lock_test_spinlock_api(void)
+static void lock_test_spinlock_api(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
- odp_cunit_thread_create(spinlock_api_tests, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, spinlock_api_tests, NULL, 0, 0);
+ odp_cunit_thread_join(num);
}
-void lock_test_spinlock_functional(void)
+static void lock_test_spinlock_functional(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
odp_spinlock_init(&global_mem->global_spinlock);
- odp_cunit_thread_create(spinlock_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, spinlock_functional_test, NULL, 0, 0);
+ odp_cunit_thread_join(num);
}
-void lock_test_spinlock_recursive_api(void)
+static void lock_test_spinlock_recursive_api(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
- odp_cunit_thread_create(spinlock_recursive_api_tests, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, spinlock_recursive_api_tests, NULL, 0, 0);
+ odp_cunit_thread_join(num);
}
-void lock_test_spinlock_recursive_functional(void)
+static void lock_test_spinlock_recursive_functional(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
odp_spinlock_recursive_init(&global_mem->global_recursive_spinlock);
- odp_cunit_thread_create(spinlock_recursive_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, spinlock_recursive_functional_test, NULL, 0, 0);
+ odp_cunit_thread_join(num);
}
odp_testinfo_t lock_suite_spinlock[] = {
@@ -1051,24 +1063,21 @@ odp_testinfo_t lock_suite_spinlock_recursive[] = {
};
/* Ticket lock tests */
-void lock_test_ticketlock_api(void)
+static void lock_test_ticketlock_api(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
- odp_cunit_thread_create(ticketlock_api_tests, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, ticketlock_api_tests, NULL, 0, 0);
+ odp_cunit_thread_join(num);
}
-void lock_test_ticketlock_functional(void)
+static void lock_test_ticketlock_functional(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
odp_ticketlock_init(&global_mem->global_ticketlock);
-
- odp_cunit_thread_create(ticketlock_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, ticketlock_functional_test, NULL, 0, 0);
+ odp_cunit_thread_join(num);
}
odp_testinfo_t lock_suite_ticketlock[] = {
@@ -1078,23 +1087,21 @@ odp_testinfo_t lock_suite_ticketlock[] = {
};
/* RW lock tests */
-void lock_test_rwlock_api(void)
+static void lock_test_rwlock_api(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
- odp_cunit_thread_create(rwlock_api_tests, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, rwlock_api_tests, NULL, 0, 0);
+ odp_cunit_thread_join(num);
}
-void lock_test_rwlock_functional(void)
+static void lock_test_rwlock_functional(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
odp_rwlock_init(&global_mem->global_rwlock);
- odp_cunit_thread_create(rwlock_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, rwlock_functional_test, NULL, 0, 0);
+ odp_cunit_thread_join(num);
}
odp_testinfo_t lock_suite_rwlock[] = {
@@ -1103,23 +1110,21 @@ odp_testinfo_t lock_suite_rwlock[] = {
ODP_TEST_INFO_NULL
};
-void lock_test_rwlock_recursive_api(void)
+static void lock_test_rwlock_recursive_api(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
- odp_cunit_thread_create(rwlock_recursive_api_tests, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, rwlock_recursive_api_tests, NULL, 0, 0);
+ odp_cunit_thread_join(num);
}
-void lock_test_rwlock_recursive_functional(void)
+static void lock_test_rwlock_recursive_functional(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
odp_rwlock_recursive_init(&global_mem->global_recursive_rwlock);
- odp_cunit_thread_create(rwlock_recursive_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, rwlock_recursive_functional_test, NULL, 0, 0);
+ odp_cunit_thread_join(num);
}
odp_testinfo_t lock_suite_rwlock_recursive[] = {
@@ -1128,7 +1133,7 @@ odp_testinfo_t lock_suite_rwlock_recursive[] = {
ODP_TEST_INFO_NULL
};
-int lock_suite_init(void)
+static int lock_suite_init(void)
{
uint32_t num_threads, idx;
@@ -1140,26 +1145,34 @@ int lock_suite_init(void)
return 0;
}
-int lock_init(odp_instance_t *inst)
+static int lock_init(odp_instance_t *inst)
{
uint32_t workers_count, max_threads;
int ret = 0;
- odp_cpumask_t mask;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("odph_options() failed\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
- if (0 != odp_init_global(inst, NULL, NULL)) {
- fprintf(stderr, "error: odp_init_global() failed.\n");
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
+ ODPH_ERR("odp_init_global() failed\n");
return -1;
}
if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
- fprintf(stderr, "error: odp_init_local() failed.\n");
+ ODPH_ERR("odp_init_local() failed\n");
return -1;
}
global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
- sizeof(global_shared_mem_t), 64,
- ODP_SHM_SW_ONLY);
+ sizeof(global_shared_mem_t), 64, 0);
if (ODP_SHM_INVALID == global_shm) {
- fprintf(stderr, "Unable reserve memory for global_shm\n");
+ ODPH_ERR("Unable to reserve memory for global_shm\n");
return -1;
}
@@ -1170,7 +1183,7 @@ int lock_init(odp_instance_t *inst)
global_mem->g_iterations = 0; /* tuned by first test */
global_mem->g_verbose = VERBOSE;
- workers_count = odp_cpumask_default_worker(&mask, 0);
+ workers_count = odp_cpumask_default_worker(NULL, 0);
max_threads = (workers_count >= MAX_WORKERS) ?
MAX_WORKERS : workers_count;
@@ -1189,23 +1202,23 @@ int lock_init(odp_instance_t *inst)
return ret;
}
-int lock_term(odp_instance_t inst)
+static int lock_term(odp_instance_t inst)
{
odp_shm_t shm;
shm = odp_shm_lookup(GLOBAL_SHM_NAME);
if (0 != odp_shm_free(shm)) {
- fprintf(stderr, "error: odp_shm_free() failed.\n");
+ ODPH_ERR("odp_shm_free() failed\n");
return -1;
}
if (0 != odp_term_local()) {
- fprintf(stderr, "error: odp_term_local() failed.\n");
+ ODPH_ERR("odp_term_local() failed\n");
return -1;
}
if (0 != odp_term_global(inst)) {
- fprintf(stderr, "error: odp_term_global() failed.\n");
+ ODPH_ERR("odp_term_global() failed\n");
return -1;
}
@@ -1228,12 +1241,12 @@ odp_suiteinfo_t lock_suites[] = {
ODP_SUITE_INFO_NULL
};
-int lock_main(int argc, char *argv[])
+int main(int argc, char *argv[])
{
int ret;
/* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
+ if (odp_cunit_parse_options(&argc, argv))
return -1;
odp_cunit_register_global_init(lock_init);
diff --git a/test/validation/api/ml/.gitignore b/test/validation/api/ml/.gitignore
new file mode 100644
index 000000000..be2347720
--- /dev/null
+++ b/test/validation/api/ml/.gitignore
@@ -0,0 +1 @@
+ml_main
diff --git a/test/validation/api/ml/Makefile.am b/test/validation/api/ml/Makefile.am
new file mode 100644
index 000000000..b7946a963
--- /dev/null
+++ b/test/validation/api/ml/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = ml_main
+ml_main_SOURCES = ml.c
diff --git a/test/validation/api/ml/ml.c b/test/validation/api/ml/ml.c
new file mode 100644
index 000000000..5f8be1b64
--- /dev/null
+++ b/test/validation/api/ml/ml.c
@@ -0,0 +1,572 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include "odp_cunit_common.h"
+
+#define UAREA 0xaa
+#define NUM_COMPL 10u
+#define COMPL_POOL_NAME "ML compl pool"
+
+typedef struct global_t {
+ int disabled;
+ uint32_t num_compl;
+ odp_ml_capability_t ml_capa;
+} global_t;
+
+typedef struct {
+ uint32_t count;
+ uint8_t mark[NUM_COMPL];
+} uarea_init_t;
+
+static global_t global;
+
+static int ml_suite_init(void)
+{
+ memset(&global, 0, sizeof(global_t));
+
+ if (odp_ml_capability(&global.ml_capa)) {
+ ODPH_ERR("ML capability failed\n");
+ return -1;
+ }
+
+ if (global.ml_capa.max_models == 0) {
+ global.disabled = 1;
+ ODPH_DBG("ML test disabled\n");
+ return 0;
+ }
+
+ global.num_compl = ODPH_MIN(NUM_COMPL, global.ml_capa.pool.max_num);
+
+ return 0;
+}
+
+static int check_ml_support(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void test_ml_capability(void)
+{
+ odp_ml_capability_t ml_capa;
+
+ memset(&ml_capa, 0, sizeof(odp_ml_capability_t));
+ CU_ASSERT(odp_ml_capability(&ml_capa) == 0);
+
+ if (ml_capa.max_models == 0)
+ return;
+
+ CU_ASSERT(ml_capa.max_model_size > 0);
+ CU_ASSERT(ml_capa.max_models_loaded > 0);
+ CU_ASSERT(ml_capa.max_inputs > 0);
+ CU_ASSERT(ml_capa.max_outputs > 0);
+ CU_ASSERT(ml_capa.max_segs_per_input > 0);
+ CU_ASSERT(ml_capa.max_segs_per_output > 0);
+ CU_ASSERT(ml_capa.min_input_align > 0);
+ CU_ASSERT(ml_capa.min_output_align > 0);
+
+ if ((ml_capa.load.compl_mode_mask | ml_capa.run.compl_mode_mask) &
+ ODP_ML_COMPL_MODE_EVENT) {
+ odp_pool_capability_t pool_capa;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&pool_capa) == 0);
+
+ CU_ASSERT(ml_capa.pool.max_pools > 0);
+ CU_ASSERT(ml_capa.pool.max_pools <= pool_capa.max_pools);
+ CU_ASSERT(ml_capa.pool.max_num > 0);
+ CU_ASSERT(ml_capa.pool.max_cache_size >= ml_capa.pool.min_cache_size);
+ }
+
+ CU_ASSERT(ml_capa.load.compl_mode_mask);
+ CU_ASSERT(ml_capa.run.compl_mode_mask);
+
+ if (ml_capa.load.compl_mode_mask & ODP_ML_COMPL_MODE_EVENT)
+ CU_ASSERT(ml_capa.load.compl_queue_plain || ml_capa.load.compl_queue_sched);
+
+ if (ml_capa.run.compl_mode_mask & ODP_ML_COMPL_MODE_EVENT)
+ CU_ASSERT(ml_capa.run.compl_queue_plain || ml_capa.run.compl_queue_sched);
+}
+
+static void test_ml_param(uint8_t fill)
+{
+ odp_ml_config_t config;
+ odp_ml_model_param_t model_param;
+ odp_ml_compl_pool_param_t pool_param;
+ odp_ml_compl_param_t compl_param;
+ odp_ml_run_param_t run_param;
+
+ memset(&config, fill, sizeof(config));
+ odp_ml_config_init(&config);
+ CU_ASSERT(config.max_models_created == 1);
+ CU_ASSERT(config.max_models_loaded == 1);
+ CU_ASSERT(config.load_mode_mask == 0);
+ CU_ASSERT(config.run_mode_mask == 0);
+
+ memset(&model_param, fill, sizeof(model_param));
+ odp_ml_model_param_init(&model_param);
+ CU_ASSERT(model_param.max_compl_id == 0);
+ CU_ASSERT(!model_param.extra_stat_enable);
+ CU_ASSERT(model_param.extra_param == NULL);
+ CU_ASSERT(model_param.extra_info.num_inputs == 0);
+ CU_ASSERT(model_param.extra_info.num_outputs == 0);
+ CU_ASSERT(model_param.extra_info.input_format == NULL);
+ CU_ASSERT(model_param.extra_info.output_format == NULL);
+
+ memset(&pool_param, fill, sizeof(pool_param));
+ odp_ml_compl_pool_param_init(&pool_param);
+ CU_ASSERT(pool_param.uarea_size == 0);
+ CU_ASSERT(pool_param.uarea_init.args == NULL);
+ CU_ASSERT(pool_param.uarea_init.init_fn == NULL);
+ CU_ASSERT(pool_param.cache_size <= global.ml_capa.pool.max_cache_size);
+ CU_ASSERT(pool_param.cache_size >= global.ml_capa.pool.min_cache_size);
+
+ memset(&compl_param, fill, sizeof(compl_param));
+ odp_ml_compl_param_init(&compl_param);
+ CU_ASSERT(compl_param.user_ptr == NULL);
+
+ memset(&run_param, fill, sizeof(run_param));
+ odp_ml_run_param_init(&run_param);
+ CU_ASSERT(run_param.batch_size == 0);
+ CU_ASSERT(run_param.result == NULL);
+}
+
+static void test_ml_param_init(void)
+{
+ test_ml_param(0x00);
+ test_ml_param(0xff);
+}
+
+static void test_ml_debug(void)
+{
+ odp_ml_print();
+}
+
+static void ml_compl_pool_create_max_pools(void)
+{
+ int ret;
+ uint32_t i, j;
+ odp_ml_compl_pool_param_t ml_pool_param;
+ uint32_t max_pools = global.ml_capa.pool.max_pools;
+ odp_pool_t compl_pools[max_pools];
+
+ odp_ml_compl_pool_param_init(&ml_pool_param);
+ ml_pool_param.num = global.num_compl;
+ for (i = 0; i < max_pools; i++) {
+ compl_pools[i] = odp_ml_compl_pool_create(NULL, &ml_pool_param);
+
+ if (compl_pools[i] == ODP_POOL_INVALID)
+ break;
+ }
+
+ CU_ASSERT(i == max_pools);
+
+ /* Destroy the created valid pools */
+ for (j = 0; j < i; j++) {
+ ret = odp_pool_destroy(compl_pools[j]);
+ CU_ASSERT(ret == 0);
+
+ if (ret == -1)
+ ODPH_ERR("ML completion pool destroy failed: %u / %u\n", j, i);
+ }
+}
+
+static void compl_pool_info(void)
+{
+ odp_pool_t pool;
+ odp_pool_t compl_pool;
+ odp_pool_info_t pool_info;
+ odp_ml_compl_pool_param_t pool_param;
+
+ /* Create an ML job completion pool */
+ odp_ml_compl_pool_param_init(&pool_param);
+ pool_param.num = global.num_compl;
+
+ compl_pool = odp_ml_compl_pool_create(COMPL_POOL_NAME, &pool_param);
+ CU_ASSERT_FATAL(compl_pool != ODP_POOL_INVALID);
+
+ /* Verify info about the created ML completion pool compl_pool */
+ pool = odp_pool_lookup(COMPL_POOL_NAME);
+ CU_ASSERT(pool == compl_pool);
+
+ memset(&pool_info, 0x66, sizeof(odp_pool_info_t));
+ CU_ASSERT_FATAL(odp_pool_info(compl_pool, &pool_info) == 0);
+
+ CU_ASSERT(!strcmp(pool_info.name, COMPL_POOL_NAME));
+ CU_ASSERT(pool_info.pool_ext == 0);
+ CU_ASSERT(pool_info.type == ODP_POOL_ML_COMPL);
+ CU_ASSERT(pool_info.ml_pool_param.num == NUM_COMPL);
+ CU_ASSERT(pool_info.ml_pool_param.uarea_size == 0);
+ CU_ASSERT(pool_info.ml_pool_param.cache_size == pool_param.cache_size);
+
+ CU_ASSERT_FATAL(odp_pool_destroy(compl_pool) == 0);
+}
+
+static void compl_alloc_max(void)
+{
+ uint64_t u64;
+ odp_event_t event;
+ odp_pool_t compl_pool;
+ odp_ml_compl_pool_param_t pool_param;
+ const int num = ODPH_MIN(global.ml_capa.pool.max_num, 1000000u);
+ odp_ml_compl_t *compl = malloc(num * sizeof(odp_ml_compl_t));
+
+ CU_ASSERT_FATAL(compl != NULL);
+
+ /* Create an ML job completion pool */
+ odp_ml_compl_pool_param_init(&pool_param);
+ pool_param.num = num;
+
+ compl_pool = odp_ml_compl_pool_create(COMPL_POOL_NAME, &pool_param);
+ CU_ASSERT_FATAL(compl_pool != ODP_POOL_INVALID);
+
+ for (int i = 0; i < num; i++) {
+ compl[i] = odp_ml_compl_alloc(compl_pool);
+ CU_ASSERT_FATAL(compl[i] != ODP_ML_COMPL_INVALID);
+
+ u64 = odp_ml_compl_to_u64(compl[i]);
+ CU_ASSERT(u64 != odp_ml_compl_to_u64(ODP_ML_COMPL_INVALID));
+
+ event = odp_ml_compl_to_event(compl[i]);
+ CU_ASSERT(odp_event_type(event) == ODP_EVENT_ML_COMPL);
+ }
+
+ for (int i = 0; i < num; i++)
+ odp_ml_compl_free(compl[i]);
+
+ free(compl);
+
+ CU_ASSERT_FATAL(odp_pool_destroy(compl_pool) == 0);
+}
+
+static void compl_pool_lookup(void)
+{
+ odp_pool_t pool, pool_a, pool_b;
+ odp_ml_compl_pool_param_t pool_param;
+
+ /* Create an ML job completion pool */
+ odp_ml_compl_pool_param_init(&pool_param);
+ pool_param.num = global.num_compl;
+
+ pool_a = odp_ml_compl_pool_create(COMPL_POOL_NAME, &pool_param);
+ CU_ASSERT_FATAL(pool_a != ODP_POOL_INVALID);
+
+ pool = odp_pool_lookup(COMPL_POOL_NAME);
+ CU_ASSERT(pool == pool_a);
+
+ /* Second pool with the same name */
+ pool_b = odp_ml_compl_pool_create(COMPL_POOL_NAME, &pool_param);
+ CU_ASSERT_FATAL(pool_b != ODP_POOL_INVALID);
+
+ pool = odp_pool_lookup(COMPL_POOL_NAME);
+ CU_ASSERT(pool == pool_a || pool == pool_b);
+
+ CU_ASSERT(odp_pool_destroy(pool_a) == 0);
+ CU_ASSERT(odp_pool_destroy(pool_b) == 0);
+}
+
+static int check_event_user_area(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (((global.ml_capa.load.compl_mode_mask | global.ml_capa.run.compl_mode_mask) &
+ ODP_ML_COMPL_MODE_EVENT) &&
+ (global.ml_capa.pool.max_uarea_size > 0))
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static void test_ml_compl_user_area(void)
+{
+ uint32_t i;
+ void *addr;
+ void *prev;
+ odp_pool_t pool;
+ odp_ml_compl_pool_param_t pool_param;
+ uint32_t size = global.ml_capa.pool.max_uarea_size;
+ uint32_t num = global.num_compl;
+ odp_ml_compl_t compl_evs[num];
+
+ odp_ml_compl_pool_param_init(&pool_param);
+ pool_param.num = num;
+ pool_param.uarea_size = size;
+ pool = odp_ml_compl_pool_create(NULL, &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ prev = NULL;
+ for (i = 0; i < num; i++) {
+ odp_event_t ev;
+ int flag = 0;
+
+ compl_evs[i] = odp_ml_compl_alloc(pool);
+
+ if (compl_evs[i] == ODP_ML_COMPL_INVALID)
+ break;
+
+ addr = odp_ml_compl_user_area(compl_evs[i]);
+
+ CU_ASSERT_FATAL(addr != NULL);
+ CU_ASSERT(prev != addr);
+
+ memset(addr, 0, size);
+
+ ev = odp_ml_compl_to_event(compl_evs[i]);
+ CU_ASSERT(odp_event_user_area(ev) == addr);
+ CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == addr);
+ CU_ASSERT(flag < 0);
+
+ prev = addr;
+ }
+ CU_ASSERT(i == num);
+
+ for (uint32_t j = 0; j < i; j++)
+ odp_ml_compl_free(compl_evs[j]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static int check_event_user_area_init(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.ml_capa.pool.max_uarea_size > 0 && global.ml_capa.pool.uarea_persistence)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static void init_event_uarea(void *uarea, uint32_t size, void *args, uint32_t index)
+{
+ uarea_init_t *data = args;
+
+ data->count++;
+ data->mark[index] = 1;
+ memset(uarea, UAREA, size);
+}
+
+static void test_ml_compl_user_area_init(void)
+{
+ odp_ml_compl_pool_param_t pool_param;
+ uint32_t num = global.num_compl, i;
+ odp_pool_t pool;
+ uarea_init_t data;
+ odp_ml_compl_t compl_evs[num];
+ uint8_t *uarea;
+
+ memset(&data, 0, sizeof(uarea_init_t));
+ odp_ml_compl_pool_param_init(&pool_param);
+ pool_param.uarea_init.init_fn = init_event_uarea;
+ pool_param.uarea_init.args = &data;
+ pool_param.num = num;
+ pool_param.uarea_size = 1;
+ pool = odp_ml_compl_pool_create(NULL, &pool_param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ CU_ASSERT(data.count == num);
+
+ for (i = 0; i < num; i++) {
+ CU_ASSERT(data.mark[i] == 1);
+
+ compl_evs[i] = odp_ml_compl_alloc(pool);
+
+ CU_ASSERT(compl_evs[i] != ODP_ML_COMPL_INVALID);
+
+ if (compl_evs[i] == ODP_ML_COMPL_INVALID)
+ break;
+
+ uarea = odp_ml_compl_user_area(compl_evs[i]);
+
+ CU_ASSERT(*uarea == UAREA);
+ }
+
+ for (uint32_t j = 0; j < i; j++)
+ odp_ml_compl_free(compl_evs[j]);
+
+ odp_pool_destroy(pool);
+}
+
+static void test_ml_fp32_to_uint8(void)
+{
+ uint8_t u8[8];
+ float fp[8] = {-20.f, -16.4f, -14.6f, -12.5f, 0, 31.4f, 80.f, 96.3f};
+ uint8_t expected[8] = {0, 0, 4, 10, 43, 127, 255, 255};
+ float scale = 0.3746f;
+ uint8_t zero_point = 43;
+
+ odp_ml_fp32_to_uint8(u8, fp, 8, scale, zero_point);
+ for (uint32_t i = 0; i < 8; i++)
+ CU_ASSERT(u8[i] == expected[i]);
+}
+
+static void test_ml_fp32_from_uint8(void)
+{
+ float fp[4];
+ float scale = 0.4f;
+ uint8_t zero_point = 43;
+ uint8_t u8[4] = {0, 43, 145, 255};
+ float expected[4] = {-17.2f, 0.0f, 40.8f, 84.8f};
+
+ odp_ml_fp32_from_uint8(fp, u8, 4, scale, zero_point);
+ for (uint32_t i = 0; i < 4; i++)
+ CU_ASSERT(fp[i] == expected[i]);
+}
+
+static void test_ml_fp32_to_int8(void)
+{
+ int8_t i8[5];
+ float scale = 0.0223f;
+ int8_t zero_point = 0;
+ float fp32[5] = {-3.4f, -2.5f, 0, 1.4f, 2.9f};
+ int8_t i8_expected[5] = {-127, -112, 0, 63, 127};
+
+ odp_ml_fp32_to_int8(i8, fp32, 5, scale, zero_point);
+
+ for (uint32_t i = 0; i < 5; i++)
+ CU_ASSERT(i8[i] == i8_expected[i]);
+}
+
+static void test_ml_fp32_to_int8_positive_zp(void)
+{
+ int8_t i8[6];
+ float scale = 0.0223f;
+ int8_t zero_point = 56;
+ float fp32[6] = {-4.1f, -3.4f, -2.5f, 0, 1.4f, 2.9f};
+ int8_t i8_expected[6] = {-127, -96, -56, 56, 119, 127};
+
+ odp_ml_fp32_to_int8(i8, fp32, 6, scale, zero_point);
+
+ for (uint32_t i = 0; i < 6; i++)
+ CU_ASSERT(i8[i] == i8_expected[i]);
+}
+
+static void test_ml_fp32_to_int8_negative_zp(void)
+{
+ int8_t i8[6];
+ float scale = 0.0223f;
+ int8_t zero_point = -56;
+ float fp32[6] = {-3.4f, -2.5f, 0, 1.4f, 2.9f, 4.1f};
+ int8_t i8_expected[6] = {-127, -127, -56, 7, 74, 127};
+
+ odp_ml_fp32_to_int8(i8, fp32, 6, scale, zero_point);
+
+ for (uint32_t i = 0; i < 6; i++)
+ CU_ASSERT(i8[i] == i8_expected[i]);
+}
+
+static void test_ml_fp32_from_int8(void)
+{
+ float fp32[6];
+ float scale = 0.05f;
+ int8_t zero_point = 56;
+ int8_t i8[6] = {-128, 46, 0, 56, 85, 127};
+ float fp32_expected[6] = {-9.2f, -0.5f, -2.8f, 0.0f, 1.45f, 3.55f};
+
+ odp_ml_fp32_from_int8(fp32, i8, 6, scale, zero_point);
+
+ for (uint32_t i = 0; i < 6; i++)
+ CU_ASSERT(fp32[i] == fp32_expected[i]);
+}
+
+static int approx_equal(double a, double b)
+{
+ const double tolerance = .01;
+
+ if (a < 0 && b < 0) {
+ a = -a;
+ b = -b;
+ }
+
+ if (a < b) {
+ double tmp = a;
+
+ a = b;
+ b = tmp;
+ }
+
+ return (a * (1 - tolerance) < b && a * (1 + tolerance) > b);
+}
+
+static void test_ml_fp32_fp16(void)
+{
+ float fp32[4];
+ uint16_t fp16[4];
+ float big = 1, small = 1;
+ const float phi = 1.618033988749;
+
+ fp32[0] = 0.0;
+ fp32[1] = -0.0;
+ memset(fp16, 1, sizeof(fp16));
+ odp_ml_fp32_to_fp16(fp16, fp32, 2);
+ memset(fp32, 1, sizeof(fp32));
+ odp_ml_fp32_from_fp16(fp32, fp16, 2);
+ CU_ASSERT(fp32[0] == 0);
+ CU_ASSERT(fp32[1] == 0);
+
+ /*
+ * 65504 is the largest normal number for fp16 with 5 exponent bits (IEEE 754-2008).
+ */
+ while (big < 65504 / 2) {
+ fp32[0] = big;
+ fp32[1] = -big;
+ fp32[2] = small;
+ fp32[3] = -small;
+ memset(fp16, 0, sizeof(fp16));
+ odp_ml_fp32_to_fp16(fp16, fp32, 4);
+ memset(fp32, 0, sizeof(fp32));
+ odp_ml_fp32_from_fp16(fp32, fp16, 4);
+ CU_ASSERT(approx_equal(fp32[0], big));
+ CU_ASSERT(approx_equal(fp32[1], -big));
+ CU_ASSERT(approx_equal(fp32[2], small));
+ CU_ASSERT(approx_equal(fp32[3], -small));
+ big *= phi;
+ small /= phi;
+ }
+}
+
+odp_testinfo_t ml_suite[] = {
+ ODP_TEST_INFO(test_ml_capability),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_param_init, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_debug, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(ml_compl_pool_create_max_pools, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(compl_pool_info, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(compl_alloc_max, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(compl_pool_lookup, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_compl_user_area, check_event_user_area),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_compl_user_area_init, check_event_user_area_init),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_fp32_to_uint8, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_fp32_from_uint8, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_fp32_to_int8, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_fp32_to_int8_positive_zp, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_fp32_to_int8_negative_zp, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_fp32_from_int8, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_fp32_fp16, check_ml_support),
+ ODP_TEST_INFO_NULL
+};
+
+odp_suiteinfo_t ml_suites[] = {
+ {"ML", ml_suite_init, NULL, ml_suite},
+ ODP_SUITE_INFO_NULL
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(ml_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/packet/.gitignore b/test/validation/api/packet/.gitignore
index c05530d2d..c05530d2d 100644
--- a/test/common_plat/validation/api/packet/.gitignore
+++ b/test/validation/api/packet/.gitignore
diff --git a/test/validation/api/packet/Makefile.am b/test/validation/api/packet/Makefile.am
new file mode 100644
index 000000000..ad5775d7e
--- /dev/null
+++ b/test/validation/api/packet/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = packet_main
+packet_main_SOURCES = packet.c
diff --git a/test/validation/api/packet/packet.c b/test/validation/api/packet/packet.c
new file mode 100644
index 000000000..ca9c73f17
--- /dev/null
+++ b/test/validation/api/packet/packet.c
@@ -0,0 +1,4597 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
+ * Copyright (c) 2020, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include <test_packet_ipv4.h>
+#include <test_packet_ipsec.h>
+#include <test_packet_ipv6.h>
+
+#include <odp/helper/odph_api.h>
+
+/* Reserve some tailroom for tests */
+#define TAILROOM_RESERVE 4
+/* Number of packets in the test packet pool */
+#define PACKET_POOL_NUM 300
+/* Number of large, possibly segmented, test packets */
+#define PACKET_POOL_NUM_SEG 4
+ODP_STATIC_ASSERT(PACKET_POOL_NUM_SEG > 1 &&
+ PACKET_POOL_NUM_SEG < PACKET_POOL_NUM,
+ "Invalid PACKET_POOL_NUM_SEG value");
+
+/* Number of packets in parse test */
+#define PARSE_TEST_NUM_PKT 10
+/* Maximum offset to Ethernet in parse tests */
+#define MAX_PARSE_L2_OFFSET 207
+
+/* Default packet vector size */
+#define PKT_VEC_SIZE 64
+/* Number of packet vectors in default pool */
+#define PKT_VEC_NUM 10
+/* Number of preallocated packet vector test packets */
+#define PKT_VEC_PACKET_NUM PKT_VEC_NUM
+
+/* Maximum packet length when 'pool_capa.pkt.max_len == 0' */
+#define DEFAULT_MAX_LEN (32 * 1024)
+
+static odp_pool_capability_t pool_capa;
+static odp_pool_param_t default_param;
+static odp_pool_t default_pool;
+static uint32_t packet_len;
+
+static uint32_t segmented_packet_len;
+static odp_bool_t segmentation_supported = true;
+
+odp_packet_t test_packet, segmented_test_packet;
+/* Packet vector globals */
+static odp_packet_t pkt_vec[PKT_VEC_PACKET_NUM];
+static odp_packet_vector_t pktv_default = ODP_PACKET_VECTOR_INVALID;
+static odp_pool_t vector_default_pool = ODP_POOL_INVALID;
+
+static struct udata_struct {
+ uint64_t u64;
+ uint32_t u32;
+ char str[10];
+} test_packet_udata = {
+ 123456,
+ 789912,
+ "abcdefg",
+};
+
+static struct {
+ odp_pool_t pool;
+ odp_proto_chksums_t all_chksums;
+ uint32_t l2_offset[PARSE_TEST_NUM_PKT];
+} parse_test;
+
+static uint32_t parse_test_pkt_len[] = {
+ sizeof(test_packet_arp),
+ sizeof(test_packet_ipv4_icmp),
+ sizeof(test_packet_ipv4_tcp),
+ sizeof(test_packet_ipv4_udp),
+ sizeof(test_packet_vlan_ipv4_udp),
+ sizeof(test_packet_vlan_qinq_ipv4_udp),
+ sizeof(test_packet_ipv6_icmp),
+ sizeof(test_packet_ipv6_tcp),
+ sizeof(test_packet_ipv6_udp),
+ sizeof(test_packet_vlan_ipv6_udp),
+ sizeof(test_packet_ipv4_sctp),
+ sizeof(test_packet_ipv4_ipsec_ah),
+ sizeof(test_packet_ipv4_ipsec_esp),
+ sizeof(test_packet_ipv6_ipsec_ah),
+ sizeof(test_packet_ipv6_ipsec_esp),
+ sizeof(test_packet_mcast_eth_ipv4_udp),
+ sizeof(test_packet_bcast_eth_ipv4_udp),
+ sizeof(test_packet_mcast_eth_ipv6_udp),
+ sizeof(test_packet_ipv4_udp_first_frag),
+ sizeof(test_packet_ipv4_udp_last_frag),
+ sizeof(test_packet_ipv4_rr_nop_icmp)
+};
+
+#define packet_compare_offset(pkt1, off1, pkt2, off2, len) \
+ _packet_compare_offset((pkt1), (off1), (pkt2), (off2), (len), __LINE__)
+
+#define packet_compare_data(pkt1, pkt2) \
+ _packet_compare_data((pkt1), (pkt2), __LINE__)
+
+static void _packet_compare_data(odp_packet_t pkt1, odp_packet_t pkt2,
+ int line)
+{
+ uint32_t len = odp_packet_len(pkt1);
+ uint32_t offset = 0;
+ uint32_t seglen1, seglen2, cmplen;
+ void *pkt1map, *pkt2map;
+ int ret;
+
+ CU_ASSERT_FATAL(len == odp_packet_len(pkt2));
+
+ while (len > 0) {
+ seglen1 = 0;
+ seglen2 = 0;
+ pkt1map = odp_packet_offset(pkt1, offset, &seglen1, NULL);
+ pkt2map = odp_packet_offset(pkt2, offset, &seglen2, NULL);
+
+ CU_ASSERT_PTR_NOT_NULL_FATAL(pkt1map);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(pkt2map);
+ cmplen = seglen1 < seglen2 ? seglen1 : seglen2;
+ ret = memcmp(pkt1map, pkt2map, cmplen);
+
+ if (ret) {
+ printf("\ncompare_data failed: line %i, offset %"
+ PRIu32 "\n", line, offset);
+ }
+
+ CU_ASSERT(ret == 0);
+
+ offset += cmplen;
+ len -= cmplen;
+ }
+}
+
+static int packet_sanity_check(odp_packet_t pkt)
+{
+ odp_packet_seg_t seg;
+ uint32_t len = 0;
+
+ for (seg = odp_packet_first_seg(pkt);
+ seg != ODP_PACKET_SEG_INVALID;
+ seg = odp_packet_next_seg(pkt, seg)) {
+ uint32_t seglen = odp_packet_seg_data_len(pkt, seg);
+
+ CU_ASSERT(seglen != 0);
+ if (seglen == 0)
+ return 1;
+ len += seglen;
+ }
+ CU_ASSERT(len == odp_packet_len(pkt));
+ return len != odp_packet_len(pkt);
+}
+
+static int fill_data_forward(odp_packet_t pkt, uint32_t offset, uint32_t len,
+ uint32_t *cur_data)
+{
+ uint8_t buf[len];
+ uint32_t i, data;
+
+ data = *cur_data;
+
+ for (i = 0; i < len; i++)
+ buf[i] = data++;
+
+ *cur_data = data;
+
+ return odp_packet_copy_from_mem(pkt, offset, len, buf);
+}
+
+static int fill_data_backward(odp_packet_t pkt, uint32_t offset, uint32_t len,
+ uint32_t *cur_data)
+{
+ uint8_t buf[len];
+ uint32_t i, data;
+
+ data = *cur_data;
+
+ for (i = 0; i < len; i++)
+ buf[len - i - 1] = data++;
+
+ *cur_data = data;
+
+ return odp_packet_copy_from_mem(pkt, offset, len, buf);
+}
+
+static int packet_suite_init(void)
+{
+ odp_pool_param_t params;
+ odp_packet_t pkt_tbl[PACKET_POOL_NUM_SEG];
+ struct udata_struct *udat;
+ uint32_t uarea_size;
+ uint8_t data = 0;
+ uint32_t i;
+ uint32_t num = PACKET_POOL_NUM;
+ int ret;
+
+ memset(&pool_capa, 0, sizeof(odp_pool_capability_t));
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ ODPH_ERR("odp_pool_capability() failed\n");
+ return -1;
+ }
+
+ if (pool_capa.pkt.max_uarea_size == 0)
+ printf("Warning: Packet user area not supported\n");
+
+ if (pool_capa.pkt.max_segs_per_pkt == 0)
+ pool_capa.pkt.max_segs_per_pkt = 10;
+
+ /* Pick a typical packet size and decrement it to the single segment
+ * limit if needed (min_seg_len maybe equal to max_len
+ * on some systems). */
+ packet_len = 512;
+ while (packet_len > (pool_capa.pkt.min_seg_len - TAILROOM_RESERVE))
+ packet_len--;
+
+ if (pool_capa.pkt.max_len) {
+ segmented_packet_len = pool_capa.pkt.max_len;
+ } else {
+ segmented_packet_len = pool_capa.pkt.min_seg_len *
+ pool_capa.pkt.max_segs_per_pkt;
+ }
+ if (pool_capa.pkt.max_num != 0 && pool_capa.pkt.max_num < num)
+ num = pool_capa.pkt.max_num;
+
+ odp_pool_param_init(&params);
+
+ params.type = ODP_POOL_PACKET;
+ params.pkt.seg_len = pool_capa.pkt.min_seg_len;
+ params.pkt.len = pool_capa.pkt.min_seg_len;
+ /* Defining max_len to ensure packet of segmented_packet_len
+ * length can be allocated from this pool.
+ */
+ params.pkt.max_len = segmented_packet_len;
+ params.pkt.num = num;
+ params.pkt.uarea_size = sizeof(struct udata_struct);
+
+ if (params.pkt.uarea_size > pool_capa.pkt.max_uarea_size)
+ params.pkt.uarea_size = pool_capa.pkt.max_uarea_size;
+
+ uarea_size = params.pkt.uarea_size;
+ memcpy(&default_param, &params, sizeof(odp_pool_param_t));
+
+ default_pool = odp_pool_create("default_pool", &params);
+ if (default_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Default pool create failed\n");
+ return -1;
+ }
+
+ test_packet = odp_packet_alloc(default_pool, packet_len);
+
+ if (test_packet == ODP_PACKET_INVALID) {
+ ODPH_ERR("Packet alloc failed\n");
+ return -1;
+ }
+
+ for (i = 0; i < packet_len; i++) {
+ odp_packet_copy_from_mem(test_packet, i, 1, &data);
+ data++;
+ }
+
+ /* Try to allocate PACKET_POOL_NUM_SEG largest possible packets to see
+ * if segmentation is supported */
+ do {
+ ret = odp_packet_alloc_multi(default_pool, segmented_packet_len,
+ pkt_tbl, PACKET_POOL_NUM_SEG);
+ if (ret != PACKET_POOL_NUM_SEG) {
+ if (ret > 0)
+ odp_packet_free_multi(pkt_tbl, ret);
+ segmented_packet_len -= pool_capa.pkt.min_seg_len;
+ continue;
+ }
+ } while (ret != PACKET_POOL_NUM_SEG &&
+ segmented_packet_len > pool_capa.pkt.min_seg_len);
+
+ if (ret != PACKET_POOL_NUM_SEG) {
+ ODPH_ERR("Packet alloc failed\n");
+ return -1;
+ }
+ segmented_test_packet = pkt_tbl[0];
+ odp_packet_free_multi(&pkt_tbl[1], PACKET_POOL_NUM_SEG - 1);
+
+ if (odp_packet_is_valid(test_packet) == 0 ||
+ odp_packet_is_valid(segmented_test_packet) == 0) {
+ ODPH_ERR("odp_packet_is_valid() failed\n");
+ return -1;
+ }
+
+ segmentation_supported = odp_packet_is_segmented(segmented_test_packet);
+
+ data = 0;
+ for (i = 0; i < segmented_packet_len; i++) {
+ odp_packet_copy_from_mem(segmented_test_packet, i, 1, &data);
+ data++;
+ }
+
+ udat = odp_packet_user_area(test_packet);
+ if (odp_packet_user_area_size(test_packet) < uarea_size) {
+ ODPH_ERR("Bad packet user area size %u\n", odp_packet_user_area_size(test_packet));
+ return -1;
+ }
+
+ odp_pool_print(default_pool);
+ memcpy(udat, &test_packet_udata, uarea_size);
+
+ udat = odp_packet_user_area(segmented_test_packet);
+ if (odp_packet_user_area_size(segmented_test_packet) < uarea_size) {
+ ODPH_ERR("Bad segmented packet user area size %u\n",
+ odp_packet_user_area_size(segmented_test_packet));
+ return -1;
+ }
+
+ memcpy(udat, &test_packet_udata, uarea_size);
+
+ return 0;
+}
+
+static int packet_suite_term(void)
+{
+ odp_packet_free(test_packet);
+ odp_packet_free(segmented_test_packet);
+
+ if (odp_pool_destroy(default_pool) != 0)
+ return -1;
+
+ return 0;
+}
+
+/* Set all non-conflicting metadata flags */
+static void packet_set_inflags_common(odp_packet_t pkt, int val)
+{
+ odp_packet_has_l2_set(pkt, val);
+ odp_packet_has_l3_set(pkt, val);
+ odp_packet_has_l4_set(pkt, val);
+ odp_packet_has_eth_set(pkt, val);
+ odp_packet_has_eth_bcast_set(pkt, val);
+ odp_packet_has_eth_mcast_set(pkt, val);
+ odp_packet_has_jumbo_set(pkt, val);
+ odp_packet_has_vlan_set(pkt, val);
+ odp_packet_has_ipv4_set(pkt, val);
+ odp_packet_has_ip_bcast_set(pkt, val);
+ odp_packet_has_ipfrag_set(pkt, val);
+ odp_packet_has_ipopt_set(pkt, val);
+ odp_packet_has_ipsec_set(pkt, val);
+ odp_packet_has_udp_set(pkt, val);
+ odp_packet_user_flag_set(pkt, val);
+}
+
+/* Check all non-conflicting metadata flags */
+static void packet_check_inflags_common(odp_packet_t pkt, int val)
+{
+ CU_ASSERT(odp_packet_has_l2(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_l3(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_l4(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_eth(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_eth_bcast(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_eth_mcast(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_jumbo(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_vlan(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ipv4(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ip_bcast(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ipfrag(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ipopt(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ipsec(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_udp(pkt) == !!val);
+ CU_ASSERT(odp_packet_user_flag(pkt) == !!val);
+}
+
+/* Check all metadata flags */
+static void packet_check_inflags_all(odp_packet_t pkt, int val)
+{
+ CU_ASSERT(odp_packet_has_l2(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_l3(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_l4(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_eth(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_eth_bcast(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_eth_mcast(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_jumbo(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_vlan(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_vlan_qinq(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_arp(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ipv4(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ipv6(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ip_bcast(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ip_mcast(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ipfrag(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ipopt(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ipsec(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_udp(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_tcp(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_sctp(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_icmp(pkt) == !!val);
+ CU_ASSERT(odp_packet_user_flag(pkt) == !!val);
+}
+
+static void packet_test_alloc_free(void)
+{
+ odp_pool_t pool;
+ odp_packet_t packet;
+ odp_pool_param_t params;
+ odp_event_subtype_t subtype;
+ odp_event_t ev;
+
+ odp_pool_param_init(&params);
+
+ params.type = ODP_POOL_PACKET;
+ params.pkt.seg_len = pool_capa.pkt.min_seg_len;
+ params.pkt.len = pool_capa.pkt.min_seg_len;
+ params.pkt.num = 1;
+ params.pkt.max_num = 1;
+
+ pool = odp_pool_create("packet_pool_alloc", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ /* Allocate the only buffer from the pool */
+ packet = odp_packet_alloc(pool, packet_len);
+ CU_ASSERT_FATAL(packet != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(packet) == packet_len);
+
+ ev = odp_packet_to_event(packet);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
+ CU_ASSERT(odp_event_subtype(ev) == ODP_EVENT_PACKET_BASIC);
+ CU_ASSERT(odp_event_types(ev, &subtype) == ODP_EVENT_PACKET);
+ CU_ASSERT(subtype == ODP_EVENT_PACKET_BASIC);
+ CU_ASSERT(odp_event_pool(ev) == pool);
+
+ CU_ASSERT(odp_packet_subtype(packet) == ODP_EVENT_PACKET_BASIC);
+ CU_ASSERT(odp_packet_to_u64(packet) !=
+ odp_packet_to_u64(ODP_PACKET_INVALID));
+
+ /* User pointer should be NULL after alloc */
+ CU_ASSERT(odp_packet_user_ptr(packet) == NULL);
+
+ /* Packet flags should be zero */
+ packet_check_inflags_all(packet, 0);
+
+ /* Pool should have only one packet */
+ CU_ASSERT_FATAL(odp_packet_alloc(pool, packet_len)
+ == ODP_PACKET_INVALID);
+
+ odp_packet_free(packet);
+
+ /* Check that the buffer was returned back to the pool */
+ packet = odp_packet_alloc(pool, packet_len);
+ CU_ASSERT_FATAL(packet != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(packet) == packet_len);
+
+ odp_packet_free(packet);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+/* Wrapper to call odp_packet_alloc_multi multiple times until
+ * either no mure buffers are returned, or num buffers were alloced */
+static int packet_alloc_multi(odp_pool_t pool, uint32_t pkt_len,
+ odp_packet_t pkt[], int num)
+{
+ int ret, total = 0;
+
+ do {
+ ret = odp_packet_alloc_multi(pool, pkt_len, pkt + total,
+ num - total);
+ CU_ASSERT(ret >= 0);
+ CU_ASSERT(ret <= num - total);
+ total += ret;
+ } while (total < num && ret > 0);
+
+ return total;
+}
+
+static void packet_test_alloc_free_multi(void)
+{
+ const int num_pkt = 2;
+ odp_pool_t pool[2];
+ int i, ret;
+ odp_packet_t packet[2 * num_pkt + 1];
+ odp_packet_t inval_pkt[num_pkt];
+ odp_pool_param_t params;
+
+ odp_pool_param_init(&params);
+
+ params.type = ODP_POOL_PACKET;
+ params.pkt.seg_len = pool_capa.pkt.min_seg_len;
+ params.pkt.len = pool_capa.pkt.min_seg_len;
+ params.pkt.num = num_pkt;
+ params.pkt.max_num = num_pkt;
+
+ pool[0] = odp_pool_create("packet_pool_alloc_multi_0", &params);
+ pool[1] = odp_pool_create("packet_pool_alloc_multi_1", &params);
+ CU_ASSERT_FATAL(pool[0] != ODP_POOL_INVALID);
+ CU_ASSERT_FATAL(pool[1] != ODP_POOL_INVALID);
+
+ /* Allocate all the packets from the pools */
+
+ ret = packet_alloc_multi(pool[0], packet_len, &packet[0], num_pkt + 1);
+ CU_ASSERT_FATAL(ret == num_pkt);
+ ret = packet_alloc_multi(pool[1], packet_len,
+ &packet[num_pkt], num_pkt + 1);
+ CU_ASSERT_FATAL(ret == num_pkt);
+
+ for (i = 0; i < 2 * num_pkt; ++i) {
+ odp_event_subtype_t subtype;
+
+ CU_ASSERT(odp_packet_len(packet[i]) == packet_len);
+ CU_ASSERT(odp_event_type(odp_packet_to_event(packet[i])) ==
+ ODP_EVENT_PACKET);
+ CU_ASSERT(odp_event_subtype(odp_packet_to_event(packet[i])) ==
+ ODP_EVENT_PACKET_BASIC);
+ CU_ASSERT(odp_event_types(odp_packet_to_event(packet[i]),
+ &subtype) ==
+ ODP_EVENT_PACKET);
+ CU_ASSERT(subtype == ODP_EVENT_PACKET_BASIC);
+ CU_ASSERT(odp_packet_subtype(packet[i]) ==
+ ODP_EVENT_PACKET_BASIC);
+ CU_ASSERT(odp_packet_to_u64(packet[i]) !=
+ odp_packet_to_u64(ODP_PACKET_INVALID));
+
+ /* User pointer should be NULL after alloc */
+ CU_ASSERT(odp_packet_user_ptr(packet[i]) == NULL);
+ }
+
+ /* Pools should have no more packets */
+ ret = odp_packet_alloc_multi(pool[0], packet_len, inval_pkt, num_pkt);
+ CU_ASSERT(ret == 0);
+ ret = odp_packet_alloc_multi(pool[1], packet_len, inval_pkt, num_pkt);
+ CU_ASSERT(ret == 0);
+
+ /* Free all packets from all pools at once */
+ odp_packet_free_multi(packet, 2 * num_pkt);
+
+ /* Check that all the packets were returned back to their pools */
+ ret = packet_alloc_multi(pool[0], packet_len, &packet[0], num_pkt);
+ CU_ASSERT(ret);
+ ret = packet_alloc_multi(pool[1], packet_len,
+ &packet[num_pkt], num_pkt);
+ CU_ASSERT(ret);
+
+ for (i = 0; i < 2 * num_pkt; ++i) {
+ CU_ASSERT_FATAL(packet[i] != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(packet[i]) == packet_len);
+ }
+ odp_packet_free_multi(packet, 2 * num_pkt);
+ CU_ASSERT(odp_pool_destroy(pool[0]) == 0);
+ CU_ASSERT(odp_pool_destroy(pool[1]) == 0);
+}
+
+static void packet_test_free_sp(void)
+{
+ const int num_pkt = 10;
+ odp_pool_t pool;
+ int i, ret;
+ odp_packet_t packet[num_pkt];
+ odp_pool_param_t params;
+ uint32_t len = packet_len;
+
+ if (pool_capa.pkt.max_len && pool_capa.pkt.max_len < len)
+ len = pool_capa.pkt.max_len;
+
+ odp_pool_param_init(&params);
+
+ params.type = ODP_POOL_PACKET;
+ params.pkt.len = len;
+ params.pkt.num = num_pkt;
+
+ pool = odp_pool_create("packet_pool_free_sp", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ ret = packet_alloc_multi(pool, len, packet, num_pkt);
+ CU_ASSERT_FATAL(ret == num_pkt);
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT_FATAL(packet[i] != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(packet[i]) == len);
+ }
+ odp_packet_free_sp(packet, num_pkt);
+
+ /* Check that all the packets were returned back to the pool */
+ ret = packet_alloc_multi(pool, len, packet, num_pkt);
+ CU_ASSERT_FATAL(ret == num_pkt);
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT_FATAL(packet[i] != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(packet[i]) == len);
+ }
+ odp_packet_free_sp(packet, num_pkt);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_test_alloc_segmented(void)
+{
+ const int num = 5;
+ odp_packet_t pkts[num];
+ odp_packet_t pkt;
+ uint32_t max_len;
+ odp_pool_t pool;
+ odp_pool_param_t params;
+ int ret, i, num_alloc;
+
+ if (pool_capa.pkt.max_segs_per_pkt == 0)
+ pool_capa.pkt.max_segs_per_pkt = 10;
+
+ if (pool_capa.pkt.max_len)
+ max_len = pool_capa.pkt.max_len;
+ else
+ max_len = pool_capa.pkt.min_seg_len *
+ pool_capa.pkt.max_segs_per_pkt;
+
+ odp_pool_param_init(&params);
+
+ params.type = ODP_POOL_PACKET;
+ params.pkt.seg_len = pool_capa.pkt.min_seg_len;
+ params.pkt.len = max_len;
+
+ /* Ensure that 'num' segmented packets can be allocated */
+ params.pkt.num = num * pool_capa.pkt.max_segs_per_pkt;
+
+ pool = odp_pool_create("pool_alloc_segmented", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ /* Less than max len allocs */
+ pkt = odp_packet_alloc(pool, max_len / 2);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(pkt) == max_len / 2);
+
+ odp_packet_free(pkt);
+
+ num_alloc = 0;
+ for (i = 0; i < num; i++) {
+ ret = odp_packet_alloc_multi(pool, max_len / 2,
+ &pkts[num_alloc], num - num_alloc);
+ CU_ASSERT_FATAL(ret >= 0);
+ num_alloc += ret;
+ if (num_alloc >= num)
+ break;
+ }
+
+ CU_ASSERT(num_alloc == num);
+
+ for (i = 0; i < num_alloc; i++)
+ CU_ASSERT(odp_packet_len(pkts[i]) == max_len / 2);
+
+ odp_packet_free_multi(pkts, num_alloc);
+
+ /* Max len allocs */
+ pkt = odp_packet_alloc(pool, max_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(pkt) == max_len);
+
+ odp_packet_free(pkt);
+
+ num_alloc = 0;
+ for (i = 0; i < num; i++) {
+ ret = odp_packet_alloc_multi(pool, max_len,
+ &pkts[num_alloc], num - num_alloc);
+ CU_ASSERT_FATAL(ret >= 0);
+ num_alloc += ret;
+ if (num_alloc >= num)
+ break;
+ }
+
+ CU_ASSERT(num_alloc == num);
+
+ for (i = 0; i < num_alloc; i++)
+ CU_ASSERT(odp_packet_len(pkts[i]) == max_len);
+
+ odp_packet_free_multi(pkts, num_alloc);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_test_alloc_max_len(void)
+{
+ const int num = 5;
+ odp_packet_t pkts[num];
+ odp_packet_t pkt;
+ uint32_t max_len;
+ odp_pool_t pool;
+ odp_pool_param_t params;
+ int ret, i, num_alloc;
+
+ max_len = pool_capa.pkt.max_len;
+ if (!max_len)
+ max_len = DEFAULT_MAX_LEN;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_PACKET;
+ params.pkt.len = max_len;
+ params.pkt.num = num;
+
+ pool = odp_pool_create("pool_alloc_max_len", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ pkt = odp_packet_alloc(pool, max_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(pkt) == max_len);
+
+ odp_packet_free(pkt);
+
+ num_alloc = 0;
+ for (i = 0; i < num; i++) {
+ ret = odp_packet_alloc_multi(pool, max_len,
+ &pkts[num_alloc], num - num_alloc);
+ CU_ASSERT_FATAL(ret >= 0);
+ num_alloc += ret;
+ if (num_alloc >= num)
+ break;
+ }
+
+ CU_ASSERT(num_alloc == num);
+
+ for (i = 0; i < num_alloc; i++)
+ CU_ASSERT(odp_packet_len(pkts[i]) == max_len);
+
+ odp_packet_free_multi(pkts, num_alloc);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_test_alloc_max_segment(void)
+{
+ const int num = 5;
+ uint32_t max_len, max_seg_len;
+ odp_packet_t pkt;
+ odp_pool_t pool;
+ odp_pool_param_t params;
+
+ max_len = pool_capa.pkt.max_len;
+ if (max_len == 0)
+ max_len = DEFAULT_MAX_LEN;
+
+ max_seg_len = pool_capa.pkt.max_seg_len;
+ if (max_seg_len == 0 || max_seg_len > max_len)
+ max_seg_len = max_len;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_PACKET;
+ params.pkt.seg_len = max_seg_len;
+ params.pkt.len = max_len;
+ params.pkt.num = num;
+
+ pool = odp_pool_create("pool_alloc_max_segment", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ pkt = odp_packet_alloc(pool, max_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(pkt) == max_len);
+ CU_ASSERT(odp_packet_seg_len(pkt) >= max_seg_len);
+
+ odp_packet_free(pkt);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_test_alloc_align(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t params;
+ uintptr_t data, mask;
+ uint32_t i;
+ uint32_t error_print = 10;
+ uint32_t len = packet_len;
+ uint32_t align = 256;
+ uint32_t num = 100;
+ odp_packet_t pkt[num];
+
+ CU_ASSERT(pool_capa.pkt.max_align >= 2);
+
+ if (align > pool_capa.pkt.max_align)
+ align = pool_capa.pkt.max_align;
+
+ mask = align - 1;
+
+ odp_pool_param_init(&params);
+
+ params.type = ODP_POOL_PACKET;
+ params.pkt.len = len;
+ params.pkt.num = num;
+ params.pkt.align = align;
+
+ pool = odp_pool_create("packet_pool_align", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ /* Allocate the only buffer from the pool */
+ for (i = 0; i < num; i++) {
+ pkt[i] = odp_packet_alloc(pool, len);
+ CU_ASSERT_FATAL(pkt[i] != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(pkt[i]) == len);
+ data = (uintptr_t)odp_packet_data(pkt[i]);
+
+ if (data & mask) {
+ /* Print only first couple of failures to the log */
+ if (error_print > 0) {
+ CU_ASSERT((data & mask) == 0);
+ printf("\nError: Bad data align. Pointer %p, requested align %u\n",
+ (void *)data, align);
+ error_print--;
+ }
+ }
+ }
+
+ odp_packet_free_multi(pkt, num);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_test_event_conversion(void)
+{
+ odp_packet_t pkt0 = test_packet;
+ odp_packet_t pkt1 = segmented_test_packet;
+ odp_packet_t tmp_pkt;
+ odp_event_t event;
+ odp_event_subtype_t subtype;
+ odp_packet_t pkt[2] = {pkt0, pkt1};
+ odp_event_t ev[2];
+ int i;
+
+ event = odp_packet_to_event(pkt0);
+ CU_ASSERT_FATAL(event != ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_type(event) == ODP_EVENT_PACKET);
+ CU_ASSERT(odp_event_subtype(event) == ODP_EVENT_PACKET_BASIC);
+ CU_ASSERT(odp_event_types(event, &subtype) ==
+ ODP_EVENT_PACKET);
+ CU_ASSERT(subtype == ODP_EVENT_PACKET_BASIC);
+
+ tmp_pkt = odp_packet_from_event(event);
+ CU_ASSERT_FATAL(tmp_pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(tmp_pkt == pkt0);
+ packet_compare_data(tmp_pkt, pkt0);
+
+ odp_packet_to_event_multi(pkt, ev, 2);
+
+ for (i = 0; i < 2; i++) {
+ CU_ASSERT_FATAL(ev[i] != ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_type(ev[i]) == ODP_EVENT_PACKET);
+ CU_ASSERT(odp_event_subtype(ev[i]) == ODP_EVENT_PACKET_BASIC);
+ }
+
+ odp_packet_from_event_multi(pkt, ev, 2);
+ CU_ASSERT(pkt[0] == pkt0);
+ CU_ASSERT(pkt[1] == pkt1);
+ packet_compare_data(pkt[0], pkt0);
+ packet_compare_data(pkt[1], pkt1);
+}
+
+static void packet_test_basic_metadata(void)
+{
+ odp_packet_t pkt = test_packet;
+ odp_time_t ts;
+ odp_packet_data_range_t range;
+
+ CU_ASSERT_PTR_NOT_NULL(odp_packet_head(pkt));
+ CU_ASSERT_PTR_NOT_NULL(odp_packet_data(pkt));
+
+ CU_ASSERT(odp_packet_pool(pkt) != ODP_POOL_INVALID);
+ /* Packet was allocated by application so shouldn't have valid pktio. */
+ CU_ASSERT(odp_packet_input(pkt) == ODP_PKTIO_INVALID);
+ CU_ASSERT(odp_packet_input_index(pkt) < 0);
+
+ /* Packet was not received from a packet IO, shouldn't have ones
+ * complement calculated. */
+ odp_packet_ones_comp(pkt, &range);
+ CU_ASSERT(range.length == 0);
+
+ odp_packet_flow_hash_set(pkt, UINT32_MAX);
+ CU_ASSERT(odp_packet_has_flow_hash(pkt));
+ CU_ASSERT(odp_packet_flow_hash(pkt) == UINT32_MAX);
+ odp_packet_has_flow_hash_clr(pkt);
+ CU_ASSERT(!odp_packet_has_flow_hash(pkt));
+
+ CU_ASSERT(odp_packet_cls_mark(pkt) == 0);
+
+ ts = odp_time_global();
+ odp_packet_ts_set(pkt, ts);
+ CU_ASSERT_FATAL(odp_packet_has_ts(pkt));
+ CU_ASSERT(!odp_time_cmp(ts, odp_packet_ts(pkt)));
+ odp_packet_has_ts_clr(pkt);
+ CU_ASSERT(!odp_packet_has_ts(pkt));
+
+ CU_ASSERT(odp_packet_free_ctrl(pkt) == ODP_PACKET_FREE_CTRL_DISABLED);
+ odp_packet_free_ctrl_set(pkt, ODP_PACKET_FREE_CTRL_DONT_FREE);
+ CU_ASSERT(odp_packet_free_ctrl(pkt) == ODP_PACKET_FREE_CTRL_DONT_FREE);
+ odp_packet_free_ctrl_set(pkt, ODP_PACKET_FREE_CTRL_DISABLED);
+ CU_ASSERT(odp_packet_free_ctrl(pkt) == ODP_PACKET_FREE_CTRL_DISABLED);
+}
+
+static void packet_test_length(void)
+{
+ odp_packet_t pkt = test_packet;
+ uint32_t buf_len, headroom, tailroom, seg_len;
+ void *data;
+
+ buf_len = odp_packet_buf_len(pkt);
+ headroom = odp_packet_headroom(pkt);
+ tailroom = odp_packet_tailroom(pkt);
+ data = odp_packet_data(pkt);
+
+ CU_ASSERT(data != NULL);
+ CU_ASSERT(odp_packet_len(pkt) == packet_len);
+ CU_ASSERT(odp_packet_seg_len(pkt) <= packet_len);
+ CU_ASSERT(odp_packet_data_seg_len(pkt, &seg_len) == data);
+ CU_ASSERT(seg_len == odp_packet_seg_len(pkt));
+ CU_ASSERT(headroom >= pool_capa.pkt.min_headroom);
+ CU_ASSERT(tailroom >= pool_capa.pkt.min_tailroom);
+
+ CU_ASSERT(buf_len >= packet_len + headroom + tailroom);
+}
+
+static void packet_test_reset(void)
+{
+ uint32_t len, headroom;
+ uintptr_t ptr_len;
+ void *data, *new_data, *tail, *new_tail;
+ odp_packet_t pkt;
+
+ pkt = odp_packet_alloc(default_pool, packet_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ len = odp_packet_len(pkt);
+ CU_ASSERT(len == packet_len);
+
+ headroom = odp_packet_headroom(pkt);
+
+ if (headroom) {
+ data = odp_packet_data(pkt);
+ new_data = odp_packet_push_head(pkt, 1);
+ CU_ASSERT(odp_packet_len(pkt) == len + 1);
+ CU_ASSERT((uintptr_t)new_data == ((uintptr_t)data - 1));
+ CU_ASSERT(odp_packet_headroom(pkt) == headroom - 1);
+ ptr_len = (uintptr_t)odp_packet_data(pkt) -
+ (uintptr_t)odp_packet_head(pkt);
+ CU_ASSERT(ptr_len == (headroom - 1));
+ CU_ASSERT(odp_packet_reset(pkt, len) == 0);
+ CU_ASSERT(odp_packet_len(pkt) == len);
+ CU_ASSERT(odp_packet_headroom(pkt) == headroom);
+ ptr_len = (uintptr_t)odp_packet_data(pkt) -
+ (uintptr_t)odp_packet_head(pkt);
+ CU_ASSERT(ptr_len == headroom);
+ }
+
+ data = odp_packet_data(pkt);
+ new_data = odp_packet_pull_head(pkt, 1);
+ CU_ASSERT(odp_packet_len(pkt) == len - 1);
+ CU_ASSERT((uintptr_t)new_data == ((uintptr_t)data + 1));
+ CU_ASSERT(odp_packet_headroom(pkt) == headroom + 1);
+ ptr_len = (uintptr_t)odp_packet_data(pkt) -
+ (uintptr_t)odp_packet_head(pkt);
+ CU_ASSERT(ptr_len == (headroom + 1));
+ CU_ASSERT(odp_packet_reset(pkt, len) == 0);
+ CU_ASSERT(odp_packet_len(pkt) == len);
+ CU_ASSERT(odp_packet_headroom(pkt) == headroom);
+ ptr_len = (uintptr_t)odp_packet_data(pkt) -
+ (uintptr_t)odp_packet_head(pkt);
+ CU_ASSERT(ptr_len == headroom);
+ CU_ASSERT(odp_packet_cls_mark(pkt) == 0);
+
+ tail = odp_packet_tail(pkt);
+ new_tail = odp_packet_pull_tail(pkt, 1);
+ CU_ASSERT(odp_packet_len(pkt) == len - 1);
+ CU_ASSERT((uintptr_t)new_tail == ((uintptr_t)tail - 1));
+ CU_ASSERT(odp_packet_reset(pkt, len) == 0);
+ CU_ASSERT(odp_packet_len(pkt) == len);
+
+ packet_set_inflags_common(pkt, 1);
+ packet_check_inflags_common(pkt, 1);
+ CU_ASSERT(odp_packet_reset(pkt, len) == 0);
+ packet_check_inflags_all(pkt, 0);
+
+ CU_ASSERT(odp_packet_reset(pkt, len - 1) == 0);
+ CU_ASSERT(odp_packet_len(pkt) == (len - 1));
+
+ len = len - len / 2;
+ CU_ASSERT(odp_packet_reset(pkt, len) == 0);
+ CU_ASSERT(odp_packet_len(pkt) == len);
+
+ odp_packet_free(pkt);
+}
+
+static void packet_test_prefetch(void)
+{
+ odp_packet_prefetch(test_packet, 0, odp_packet_len(test_packet));
+ CU_PASS();
+}
+
+static void packet_test_debug(void)
+{
+ CU_ASSERT(odp_packet_is_valid(test_packet) == 1);
+ printf("\n\n");
+ odp_packet_print(test_packet);
+ odp_packet_print_data(test_packet, 0, 100);
+ odp_packet_print_data(test_packet, 14, 20);
+}
+
+static void packet_test_context(void)
+{
+ void *prev_ptr;
+ struct udata_struct *udat;
+ uint32_t uarea_size;
+ odp_packet_t pkt = test_packet;
+ char ptr_test_value = 2;
+
+ prev_ptr = odp_packet_user_ptr(pkt);
+ odp_packet_user_ptr_set(pkt, &ptr_test_value);
+ CU_ASSERT(odp_packet_user_ptr(pkt) == &ptr_test_value);
+ odp_packet_user_ptr_set(pkt, prev_ptr);
+
+ udat = odp_packet_user_area(pkt);
+ uarea_size = odp_packet_user_area_size(pkt);
+ CU_ASSERT(uarea_size >= default_param.pkt.uarea_size);
+
+ if (uarea_size) {
+ CU_ASSERT(udat != NULL);
+ CU_ASSERT(memcmp(udat, &test_packet_udata, default_param.pkt.uarea_size) == 0);
+ } else {
+ CU_ASSERT(udat == NULL);
+ }
+
+ odp_packet_user_ptr_set(pkt, NULL);
+ CU_ASSERT(odp_packet_user_ptr(pkt) == NULL);
+ odp_packet_user_ptr_set(pkt, (void *)0xdead);
+ CU_ASSERT(odp_packet_user_ptr(pkt) == (void *)0xdead);
+
+ odp_packet_reset(pkt, packet_len);
+
+ /* User pointer should be NULL after reset */
+ CU_ASSERT(odp_packet_user_ptr(pkt) == NULL);
+}
+
+static void packet_test_payload_offset(void)
+{
+ odp_packet_t pkt = test_packet;
+ uint32_t pkt_len = odp_packet_len(pkt);
+
+ CU_ASSERT(odp_packet_payload_offset_set(pkt, 42) == 0);
+ CU_ASSERT(odp_packet_payload_offset(pkt) == 42);
+ CU_ASSERT(odp_packet_payload_offset_set(pkt, 0) == 0);
+ CU_ASSERT(odp_packet_payload_offset(pkt) == 0);
+ CU_ASSERT(odp_packet_payload_offset_set(pkt, pkt_len - 1) == 0);
+ CU_ASSERT(odp_packet_payload_offset(pkt) == pkt_len - 1);
+ CU_ASSERT(odp_packet_payload_offset_set(pkt, ODP_PACKET_OFFSET_INVALID) == 0);
+ CU_ASSERT(odp_packet_payload_offset(pkt) == ODP_PACKET_OFFSET_INVALID);
+}
+
+static void packet_test_layer_offsets(void)
+{
+ odp_packet_t pkt = test_packet;
+ uint8_t *l2_addr, *l3_addr, *l4_addr;
+ uint32_t seg_len = 0;
+ const uint32_t l2_off = 2;
+ const uint32_t l3_off = l2_off + 14;
+ const uint32_t l4_off = l3_off + 14;
+ int ret;
+
+ /* Set offsets to the same value */
+ ret = odp_packet_l2_offset_set(pkt, l2_off);
+ CU_ASSERT(ret == 0);
+ ret = odp_packet_l3_offset_set(pkt, l2_off);
+ CU_ASSERT(ret == 0);
+ ret = odp_packet_l4_offset_set(pkt, l2_off);
+ CU_ASSERT(ret == 0);
+
+ /* Addresses should be the same */
+ l2_addr = odp_packet_l2_ptr(pkt, &seg_len);
+ CU_ASSERT(seg_len != 0);
+ l3_addr = odp_packet_l3_ptr(pkt, &seg_len);
+ CU_ASSERT(seg_len != 0);
+ l4_addr = odp_packet_l4_ptr(pkt, &seg_len);
+ CU_ASSERT(seg_len != 0);
+ CU_ASSERT_PTR_NOT_NULL(l2_addr);
+ CU_ASSERT(l2_addr == l3_addr);
+ CU_ASSERT(l2_addr == l4_addr);
+
+ /* Set offsets to the different values */
+ odp_packet_l2_offset_set(pkt, l2_off);
+ CU_ASSERT(odp_packet_l2_offset(pkt) == l2_off);
+ odp_packet_l3_offset_set(pkt, l3_off);
+ CU_ASSERT(odp_packet_l3_offset(pkt) == l3_off);
+ odp_packet_l4_offset_set(pkt, l4_off);
+ CU_ASSERT(odp_packet_l4_offset(pkt) == l4_off);
+
+ /* Addresses should not be the same */
+ l2_addr = odp_packet_l2_ptr(pkt, NULL);
+ CU_ASSERT_PTR_NOT_NULL(l2_addr);
+ l3_addr = odp_packet_l3_ptr(pkt, NULL);
+ CU_ASSERT_PTR_NOT_NULL(l3_addr);
+ l4_addr = odp_packet_l4_ptr(pkt, NULL);
+ CU_ASSERT_PTR_NOT_NULL(l4_addr);
+
+ CU_ASSERT(l2_addr != l3_addr);
+ CU_ASSERT(l2_addr != l4_addr);
+ CU_ASSERT(l3_addr != l4_addr);
+}
+
+static void _verify_headroom_shift(odp_packet_t *pkt,
+ int shift)
+{
+ uint32_t room = odp_packet_headroom(*pkt);
+ uint32_t seg_data_len = odp_packet_seg_len(*pkt);
+ uint32_t pkt_data_len = odp_packet_len(*pkt);
+ void *data = NULL;
+ char *data_orig = odp_packet_data(*pkt);
+ char *head_orig = odp_packet_head(*pkt);
+ uint32_t seg_len;
+ int extended, rc;
+
+ if (shift >= 0) {
+ if ((uint32_t)abs(shift) <= room) {
+ data = odp_packet_push_head(*pkt, shift);
+ extended = 0;
+ } else {
+ rc = odp_packet_extend_head(pkt, shift,
+ &data, &seg_len);
+ extended = 1;
+ }
+ } else {
+ if ((uint32_t)abs(shift) < seg_data_len) {
+ data = odp_packet_pull_head(*pkt, -shift);
+ extended = 0;
+ } else {
+ rc = odp_packet_trunc_head(pkt, -shift,
+ &data, &seg_len);
+ extended = 1;
+ }
+ }
+ packet_sanity_check(*pkt);
+
+ CU_ASSERT_PTR_NOT_NULL(data);
+ if (extended) {
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(odp_packet_seg_len(*pkt) == seg_len);
+ } else {
+ CU_ASSERT(odp_packet_headroom(*pkt) == room - shift);
+ CU_ASSERT(odp_packet_seg_len(*pkt) == seg_data_len + shift);
+ CU_ASSERT(data == data_orig - shift);
+ CU_ASSERT(odp_packet_head(*pkt) == head_orig);
+ }
+
+ CU_ASSERT(odp_packet_len(*pkt) == pkt_data_len + shift);
+ CU_ASSERT(odp_packet_data(*pkt) == data);
+}
+
+static void packet_test_headroom(void)
+{
+ odp_packet_t pkt = odp_packet_copy(test_packet,
+ odp_packet_pool(test_packet));
+ uint32_t room;
+ uint32_t seg_data_len;
+ uint32_t push_val, pull_val;
+
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ room = odp_packet_headroom(pkt);
+
+ CU_ASSERT(room >= pool_capa.pkt.min_headroom);
+
+ seg_data_len = odp_packet_seg_len(pkt);
+ CU_ASSERT(seg_data_len >= 1);
+
+ pull_val = seg_data_len / 2;
+ push_val = room;
+
+ _verify_headroom_shift(&pkt, -pull_val);
+ _verify_headroom_shift(&pkt, push_val + pull_val);
+ _verify_headroom_shift(&pkt, -push_val);
+ _verify_headroom_shift(&pkt, 0);
+
+ if (segmentation_supported) {
+ push_val = room * 2;
+ _verify_headroom_shift(&pkt, push_val);
+ _verify_headroom_shift(&pkt, 0);
+ _verify_headroom_shift(&pkt, -push_val);
+ }
+
+ odp_packet_free(pkt);
+}
+
+static void _verify_tailroom_shift(odp_packet_t *pkt,
+ int shift)
+{
+ odp_packet_seg_t seg;
+ uint32_t room;
+ uint32_t seg_data_len, pkt_data_len, seg_len;
+ void *tail;
+ char *tail_orig;
+ int extended, rc;
+
+ room = odp_packet_tailroom(*pkt);
+ pkt_data_len = odp_packet_len(*pkt);
+ tail_orig = odp_packet_tail(*pkt);
+
+ seg = odp_packet_last_seg(*pkt);
+ CU_ASSERT(seg != ODP_PACKET_SEG_INVALID);
+ seg_data_len = odp_packet_seg_data_len(*pkt, seg);
+
+ if (shift >= 0) {
+ uint32_t l2_off, l3_off, l4_off;
+
+ l2_off = odp_packet_l2_offset(*pkt);
+ l3_off = odp_packet_l3_offset(*pkt);
+ l4_off = odp_packet_l4_offset(*pkt);
+
+ if ((uint32_t)abs(shift) <= room) {
+ tail = odp_packet_push_tail(*pkt, shift);
+ extended = 0;
+ } else {
+ rc = odp_packet_extend_tail(pkt, shift,
+ &tail, &seg_len);
+ extended = 1;
+ }
+
+ CU_ASSERT(l2_off == odp_packet_l2_offset(*pkt));
+ CU_ASSERT(l3_off == odp_packet_l3_offset(*pkt));
+ CU_ASSERT(l4_off == odp_packet_l4_offset(*pkt));
+ } else {
+ if ((uint32_t)abs(shift) < seg_data_len) {
+ tail = odp_packet_pull_tail(*pkt, -shift);
+ extended = 0;
+ } else {
+ rc = odp_packet_trunc_tail(pkt, -shift,
+ &tail, &seg_len);
+ extended = 1;
+ }
+ }
+ packet_sanity_check(*pkt);
+
+ CU_ASSERT_PTR_NOT_NULL(tail);
+ if (extended) {
+ CU_ASSERT(rc >= 0);
+
+ if (shift >= 0) {
+ if (rc == 0)
+ CU_ASSERT(tail == tail_orig);
+ } else {
+ CU_ASSERT(odp_packet_tail(*pkt) == tail);
+ CU_ASSERT(odp_packet_tailroom(*pkt) == seg_len);
+ }
+ } else {
+ CU_ASSERT(odp_packet_seg_data_len(*pkt, seg) ==
+ seg_data_len + shift);
+ CU_ASSERT(odp_packet_tailroom(*pkt) == room - shift);
+ if (room == 0 || (room - shift) == 0)
+ return;
+ if (shift >= 0) {
+ CU_ASSERT(odp_packet_tail(*pkt) == tail_orig + shift);
+ CU_ASSERT(tail == tail_orig);
+ } else {
+ CU_ASSERT(odp_packet_tail(*pkt) == tail);
+ CU_ASSERT(tail == tail_orig + shift);
+ }
+ }
+
+ CU_ASSERT(odp_packet_len(*pkt) == pkt_data_len + shift);
+}
+
+static void packet_test_tailroom(void)
+{
+ odp_packet_t pkt = odp_packet_copy(test_packet,
+ odp_packet_pool(test_packet));
+ odp_packet_seg_t segment;
+ uint32_t room;
+ uint32_t seg_data_len;
+ uint32_t push_val, pull_val;
+
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ segment = odp_packet_last_seg(pkt);
+ CU_ASSERT(segment != ODP_PACKET_SEG_INVALID);
+ room = odp_packet_tailroom(pkt);
+ CU_ASSERT(room >= pool_capa.pkt.min_tailroom);
+
+ seg_data_len = odp_packet_seg_data_len(pkt, segment);
+ CU_ASSERT(seg_data_len >= 1);
+
+ pull_val = seg_data_len / 2;
+ /* Leave one byte in a tailroom for odp_packet_tail() to succeed */
+ push_val = (room > 0) ? room - 1 : room;
+
+ _verify_tailroom_shift(&pkt, -pull_val);
+ _verify_tailroom_shift(&pkt, push_val + pull_val);
+ _verify_tailroom_shift(&pkt, -push_val);
+ _verify_tailroom_shift(&pkt, 0);
+
+ if (segmentation_supported) {
+ push_val = room + 100;
+ _verify_tailroom_shift(&pkt, push_val);
+ _verify_tailroom_shift(&pkt, 0);
+ _verify_tailroom_shift(&pkt, -push_val);
+ }
+
+ odp_packet_free(pkt);
+}
+
+static void packet_test_segments(void)
+{
+ int num_segs, seg_index;
+ uint32_t data_len;
+ odp_packet_seg_t seg;
+ odp_packet_t pkt = test_packet;
+ odp_packet_t seg_pkt = segmented_test_packet;
+
+ CU_ASSERT(odp_packet_is_valid(pkt) == 1);
+
+ num_segs = odp_packet_num_segs(pkt);
+ CU_ASSERT(num_segs != 0);
+
+ if (odp_packet_is_segmented(pkt)) {
+ CU_ASSERT(num_segs > 1);
+ } else {
+ CU_ASSERT(num_segs == 1);
+ }
+
+ CU_ASSERT(odp_packet_is_segmented(pkt) == 0);
+ if (segmentation_supported)
+ CU_ASSERT(odp_packet_is_segmented(seg_pkt) == 1);
+
+ seg = odp_packet_first_seg(pkt);
+ data_len = 0;
+ seg_index = 0;
+ while (seg_index < num_segs && seg != ODP_PACKET_SEG_INVALID) {
+ uint32_t seg_data_len;
+ void *seg_data;
+
+ seg_data_len = odp_packet_seg_data_len(pkt, seg);
+ seg_data = odp_packet_seg_data(pkt, seg);
+
+ CU_ASSERT(seg_data_len > 0);
+ CU_ASSERT_PTR_NOT_NULL(seg_data);
+ CU_ASSERT(odp_packet_seg_to_u64(seg) !=
+ odp_packet_seg_to_u64(ODP_PACKET_SEG_INVALID));
+ CU_ASSERT(odp_memcmp(seg_data, seg_data, seg_data_len) == 0);
+
+ data_len += seg_data_len;
+
+ seg_index++;
+ seg = odp_packet_next_seg(pkt, seg);
+ }
+
+ CU_ASSERT(seg_index == num_segs);
+ CU_ASSERT(data_len <= odp_packet_buf_len(pkt));
+ CU_ASSERT(data_len == odp_packet_len(pkt));
+
+ if (seg_index == num_segs)
+ CU_ASSERT(seg == ODP_PACKET_SEG_INVALID);
+
+ seg = odp_packet_first_seg(seg_pkt);
+ num_segs = odp_packet_num_segs(seg_pkt);
+
+ data_len = 0;
+ seg_index = 0;
+
+ while (seg_index < num_segs && seg != ODP_PACKET_SEG_INVALID) {
+ uint32_t seg_data_len;
+ void *seg_data;
+
+ seg_data_len = odp_packet_seg_data_len(seg_pkt, seg);
+ seg_data = odp_packet_seg_data(seg_pkt, seg);
+
+ CU_ASSERT(seg_data_len > 0);
+ CU_ASSERT(seg_data != NULL);
+ CU_ASSERT(odp_packet_seg_to_u64(seg) !=
+ odp_packet_seg_to_u64(ODP_PACKET_SEG_INVALID));
+ CU_ASSERT(odp_memcmp(seg_data, seg_data, seg_data_len) == 0);
+
+ data_len += seg_data_len;
+
+ seg_index++;
+ seg = odp_packet_next_seg(seg_pkt, seg);
+ }
+
+ CU_ASSERT(seg_index == num_segs);
+ CU_ASSERT(data_len <= odp_packet_buf_len(seg_pkt));
+ CU_ASSERT(data_len == odp_packet_len(seg_pkt));
+
+ if (seg_index == num_segs)
+ CU_ASSERT(seg == ODP_PACKET_SEG_INVALID);
+}
+
+static void packet_test_segment_last(void)
+{
+ odp_packet_t pkt = test_packet;
+ odp_packet_seg_t seg;
+
+ seg = odp_packet_last_seg(pkt);
+ CU_ASSERT_FATAL(seg != ODP_PACKET_SEG_INVALID);
+
+ seg = odp_packet_next_seg(pkt, seg);
+ CU_ASSERT(seg == ODP_PACKET_SEG_INVALID);
+}
+
+#define TEST_INFLAG(packet, flag) \
+do { \
+ odp_packet_##flag##_set(packet, 0); \
+ CU_ASSERT(odp_packet_##flag(packet) == 0); \
+ odp_packet_##flag##_set(packet, 1); \
+ CU_ASSERT(odp_packet_##flag(packet) != 0); \
+} while (0)
+
+static void packet_test_in_flags(void)
+{
+ odp_packet_t pkt = test_packet;
+
+ packet_set_inflags_common(pkt, 0);
+ packet_check_inflags_common(pkt, 0);
+ packet_set_inflags_common(pkt, 1);
+ packet_check_inflags_common(pkt, 1);
+
+ TEST_INFLAG(pkt, has_l2);
+ TEST_INFLAG(pkt, has_l3);
+ TEST_INFLAG(pkt, has_l4);
+ TEST_INFLAG(pkt, has_eth);
+ TEST_INFLAG(pkt, has_eth_bcast);
+ TEST_INFLAG(pkt, has_eth_mcast);
+ TEST_INFLAG(pkt, has_jumbo);
+ TEST_INFLAG(pkt, has_vlan);
+ TEST_INFLAG(pkt, has_vlan_qinq);
+ TEST_INFLAG(pkt, has_arp);
+ TEST_INFLAG(pkt, has_ipv4);
+ TEST_INFLAG(pkt, has_ipv6);
+ TEST_INFLAG(pkt, has_ip_bcast);
+ TEST_INFLAG(pkt, has_ip_mcast);
+ TEST_INFLAG(pkt, has_ipfrag);
+ TEST_INFLAG(pkt, has_ipopt);
+ TEST_INFLAG(pkt, has_ipsec);
+ TEST_INFLAG(pkt, has_udp);
+ TEST_INFLAG(pkt, has_tcp);
+ TEST_INFLAG(pkt, has_sctp);
+ TEST_INFLAG(pkt, has_icmp);
+ TEST_INFLAG(pkt, user_flag);
+
+ packet_set_inflags_common(pkt, 0);
+ packet_check_inflags_common(pkt, 0);
+}
+
+static void packet_test_vlan_flags(void)
+{
+ odp_packet_t pkt = test_packet;
+
+ odp_packet_reset(pkt, odp_packet_len(test_packet));
+
+ CU_ASSERT(!odp_packet_has_vlan(pkt));
+ CU_ASSERT(!odp_packet_has_vlan_qinq(pkt));
+
+ odp_packet_has_vlan_qinq_set(pkt, 1);
+ CU_ASSERT(odp_packet_has_vlan(pkt));
+ CU_ASSERT(odp_packet_has_vlan_qinq(pkt));
+
+ odp_packet_has_vlan_qinq_set(pkt, 0);
+ CU_ASSERT(!odp_packet_has_vlan(pkt));
+ CU_ASSERT(!odp_packet_has_vlan_qinq(pkt));
+
+ odp_packet_has_vlan_set(pkt, 1);
+ CU_ASSERT(odp_packet_has_vlan(pkt));
+ CU_ASSERT(!odp_packet_has_vlan_qinq(pkt));
+
+ odp_packet_reset(pkt, odp_packet_len(test_packet));
+}
+
+static void packet_test_error_flags(void)
+{
+ odp_packet_t pkt = test_packet;
+ int err;
+
+ /**
+ * The packet have not been classified so it doesn't have error flags
+ * properly set. Just check that functions return one of allowed values.
+ */
+ err = odp_packet_has_error(pkt);
+ CU_ASSERT(err == 0 || err == 1);
+
+ err = odp_packet_has_l2_error(pkt);
+ CU_ASSERT(err == 0 || err == 1);
+
+ err = odp_packet_has_l3_error(pkt);
+ CU_ASSERT(err == 0 || err == 1);
+
+ err = odp_packet_has_l4_error(pkt);
+ CU_ASSERT(err == 0 || err == 1);
+}
+
+struct packet_metadata {
+ uint32_t l2_off;
+ uint32_t l3_off;
+ uint32_t l4_off;
+ void *usr_ptr;
+ uint64_t usr_u64;
+};
+
+static void packet_test_add_rem_data(void)
+{
+ odp_packet_t pkt, new_pkt;
+ uint32_t pkt_len, offset, add_len;
+ void *usr_ptr;
+ struct udata_struct *udat;
+ int ret;
+ uint32_t min_seg_len;
+ uint32_t uarea_size = default_param.pkt.uarea_size;
+
+ min_seg_len = pool_capa.pkt.min_seg_len;
+
+ pkt = odp_packet_alloc(default_pool, packet_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ pkt_len = odp_packet_len(pkt);
+ usr_ptr = odp_packet_user_ptr(pkt);
+
+ if (uarea_size) {
+ udat = odp_packet_user_area(pkt);
+
+ CU_ASSERT_FATAL(udat != NULL);
+ CU_ASSERT_FATAL(odp_packet_user_area_size(pkt) >= uarea_size);
+ memcpy(udat, &test_packet_udata, uarea_size);
+ }
+
+ offset = pkt_len / 2;
+
+ if (segmentation_supported) {
+ /* Insert one more packet length in the middle of a packet */
+ add_len = min_seg_len;
+ } else {
+ /* Add diff between largest and smaller packets
+ * which is at least tailroom */
+ add_len = segmented_packet_len - packet_len;
+ }
+
+ new_pkt = pkt;
+ ret = odp_packet_add_data(&new_pkt, offset, add_len);
+ CU_ASSERT(ret >= 0);
+ if (ret < 0)
+ goto free_packet;
+ packet_sanity_check(new_pkt);
+ CU_ASSERT(odp_packet_len(new_pkt) == pkt_len + add_len);
+ /* Verify that user metadata is preserved */
+ CU_ASSERT(odp_packet_user_ptr(new_pkt) == usr_ptr);
+
+ if (uarea_size) {
+ /* Verify that user metadata has been preserved */
+ udat = odp_packet_user_area(new_pkt);
+
+ CU_ASSERT_FATAL(udat != NULL);
+ CU_ASSERT(odp_packet_user_area_size(new_pkt) >= uarea_size);
+ CU_ASSERT(memcmp(udat, &test_packet_udata, uarea_size) == 0);
+ }
+
+ pkt = new_pkt;
+
+ pkt_len = odp_packet_len(pkt);
+ usr_ptr = odp_packet_user_ptr(pkt);
+
+ ret = odp_packet_rem_data(&new_pkt, offset, add_len);
+ CU_ASSERT(ret >= 0);
+ if (ret < 0)
+ goto free_packet;
+ packet_sanity_check(new_pkt);
+ CU_ASSERT(odp_packet_len(new_pkt) == pkt_len - add_len);
+ CU_ASSERT(odp_packet_user_ptr(new_pkt) == usr_ptr);
+
+ if (uarea_size) {
+ /* Verify that user metadata has been preserved */
+ udat = odp_packet_user_area(new_pkt);
+
+ CU_ASSERT(udat != NULL);
+ CU_ASSERT(odp_packet_user_area_size(new_pkt) >= uarea_size);
+ CU_ASSERT(memcmp(udat, &test_packet_udata, uarea_size) == 0);
+ }
+
+ pkt = new_pkt;
+
+free_packet:
+ odp_packet_free(pkt);
+}
+
+#define COMPARE_INFLAG(p1, p2, flag) \
+ CU_ASSERT(odp_packet_##flag(p1) == odp_packet_##flag(p2))
+
+static void packet_compare_inflags(odp_packet_t pkt1, odp_packet_t pkt2)
+{
+ COMPARE_INFLAG(pkt1, pkt2, has_l2);
+ COMPARE_INFLAG(pkt1, pkt2, has_l3);
+ COMPARE_INFLAG(pkt1, pkt2, has_l4);
+ COMPARE_INFLAG(pkt1, pkt2, has_eth);
+ COMPARE_INFLAG(pkt1, pkt2, has_eth_bcast);
+ COMPARE_INFLAG(pkt1, pkt2, has_eth_mcast);
+ COMPARE_INFLAG(pkt1, pkt2, has_jumbo);
+ COMPARE_INFLAG(pkt1, pkt2, has_vlan);
+ COMPARE_INFLAG(pkt1, pkt2, has_vlan_qinq);
+ COMPARE_INFLAG(pkt1, pkt2, has_arp);
+ COMPARE_INFLAG(pkt1, pkt2, has_ipv4);
+ COMPARE_INFLAG(pkt1, pkt2, has_ipv6);
+ COMPARE_INFLAG(pkt1, pkt2, has_ip_bcast);
+ COMPARE_INFLAG(pkt1, pkt2, has_ip_mcast);
+ COMPARE_INFLAG(pkt1, pkt2, has_ipfrag);
+ COMPARE_INFLAG(pkt1, pkt2, has_ipopt);
+ COMPARE_INFLAG(pkt1, pkt2, has_ipsec);
+ COMPARE_INFLAG(pkt1, pkt2, has_udp);
+ COMPARE_INFLAG(pkt1, pkt2, has_tcp);
+ COMPARE_INFLAG(pkt1, pkt2, has_sctp);
+ COMPARE_INFLAG(pkt1, pkt2, has_icmp);
+ COMPARE_INFLAG(pkt1, pkt2, user_flag);
+ COMPARE_INFLAG(pkt1, pkt2, has_flow_hash);
+ COMPARE_INFLAG(pkt1, pkt2, has_ts);
+
+ COMPARE_INFLAG(pkt1, pkt2, color);
+ COMPARE_INFLAG(pkt1, pkt2, drop_eligible);
+ COMPARE_INFLAG(pkt1, pkt2, shaper_len_adjust);
+}
+
+static void packet_compare_udata(odp_packet_t pkt1, odp_packet_t pkt2)
+{
+ uint32_t usize1 = odp_packet_user_area_size(pkt1);
+ uint32_t usize2 = odp_packet_user_area_size(pkt2);
+
+ void *uaddr1 = odp_packet_user_area(pkt1);
+ void *uaddr2 = odp_packet_user_area(pkt2);
+
+ uint32_t cmplen = usize1 <= usize2 ? usize1 : usize2;
+
+ if (cmplen)
+ CU_ASSERT(!memcmp(uaddr1, uaddr2, cmplen));
+}
+
+static void _packet_compare_offset(odp_packet_t pkt1, uint32_t off1,
+ odp_packet_t pkt2, uint32_t off2,
+ uint32_t len, int line)
+{
+ void *pkt1map, *pkt2map;
+ uint32_t seglen1, seglen2, cmplen;
+ int ret;
+
+ if (off1 + len > odp_packet_len(pkt1) ||
+ off2 + len > odp_packet_len(pkt2))
+ return;
+
+ while (len > 0) {
+ seglen1 = 0;
+ seglen2 = 0;
+ pkt1map = odp_packet_offset(pkt1, off1, &seglen1, NULL);
+ pkt2map = odp_packet_offset(pkt2, off2, &seglen2, NULL);
+
+ CU_ASSERT_PTR_NOT_NULL_FATAL(pkt1map);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(pkt2map);
+ cmplen = seglen1 < seglen2 ? seglen1 : seglen2;
+ if (len < cmplen)
+ cmplen = len;
+
+ ret = memcmp(pkt1map, pkt2map, cmplen);
+
+ if (ret) {
+ printf("\ncompare_offset failed: line %i, off1 %"
+ PRIu32 ", off2 %" PRIu32 "\n", line, off1, off2);
+ }
+
+ CU_ASSERT(ret == 0);
+
+ off1 += cmplen;
+ off2 += cmplen;
+ len -= cmplen;
+ }
+}
+
+static void packet_test_meta_data_copy(void)
+{
+ odp_packet_t pkt, copy;
+ odp_pool_param_t pool_param;
+ odp_pool_t pool;
+ odp_pktio_t pktio;
+ odp_time_t t1, t2;
+
+ memcpy(&pool_param, &default_param, sizeof(odp_pool_param_t));
+ pool = odp_pool_create("meta_data_copy", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ pktio = odp_pktio_open("loop", pool, NULL);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ t1 = odp_time_global();
+
+ pkt = odp_packet_alloc(pool, packet_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ packet_check_inflags_all(pkt, 0);
+
+ CU_ASSERT(odp_packet_input(pkt) == ODP_PKTIO_INVALID);
+ CU_ASSERT(odp_packet_l3_offset(pkt) == ODP_PACKET_OFFSET_INVALID);
+ CU_ASSERT(odp_packet_l4_offset(pkt) == ODP_PACKET_OFFSET_INVALID);
+ CU_ASSERT(odp_packet_payload_offset(pkt) == ODP_PACKET_OFFSET_INVALID);
+
+ packet_set_inflags_common(pkt, 1);
+ packet_check_inflags_common(pkt, 1);
+
+ odp_packet_input_set(pkt, pktio);
+ odp_packet_user_ptr_set(pkt, (void *)(uintptr_t)0xdeadbeef);
+ CU_ASSERT(odp_packet_l2_offset_set(pkt, 20) == 0);
+ CU_ASSERT(odp_packet_l3_offset_set(pkt, 30) == 0);
+ CU_ASSERT(odp_packet_l4_offset_set(pkt, 40) == 0);
+ CU_ASSERT(odp_packet_payload_offset_set(pkt, 50) == 0);
+ odp_packet_flow_hash_set(pkt, 0xcafe);
+ odp_packet_ts_set(pkt, t1);
+ odp_packet_color_set(pkt, ODP_PACKET_RED);
+ odp_packet_drop_eligible_set(pkt, 1);
+ odp_packet_shaper_len_adjust_set(pkt, 1);
+
+ /* Make a copy of the packet and check that meta data values are the same */
+ copy = odp_packet_copy(pkt, pool);
+ CU_ASSERT_FATAL(copy != ODP_PACKET_INVALID);
+
+ packet_compare_inflags(pkt, copy);
+ CU_ASSERT(odp_packet_input(copy) == pktio);
+ CU_ASSERT(odp_packet_user_ptr(copy) == (void *)(uintptr_t)0xdeadbeef);
+ CU_ASSERT(odp_packet_l2_offset(copy) == 20);
+ CU_ASSERT(odp_packet_l3_offset(copy) == 30);
+ CU_ASSERT(odp_packet_l4_offset(copy) == 40);
+ CU_ASSERT(odp_packet_payload_offset(copy) == 50);
+ CU_ASSERT(odp_packet_flow_hash(copy) == 0xcafe);
+ t2 = odp_packet_ts(copy);
+ CU_ASSERT(odp_time_cmp(t2, t1) == 0);
+ CU_ASSERT(odp_packet_color(copy) == ODP_PACKET_RED);
+ CU_ASSERT(odp_packet_drop_eligible(copy) == 1);
+ CU_ASSERT(odp_packet_shaper_len_adjust(copy) == 1);
+
+ odp_packet_free(pkt);
+ odp_packet_free(copy);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_test_copy(void)
+{
+ odp_packet_t pkt;
+ odp_packet_t pkt_part;
+ odp_pool_param_t param;
+ odp_pool_t pool, pool_min_uarea, pool_large_uarea;
+ void *pkt_data;
+ uint32_t i, plen, src_offset, dst_offset, uarea_size;
+ uint32_t seg_len = 0;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+
+ param.pkt.uarea_size = 0;
+ pool_min_uarea = odp_pool_create("min_uarea", &param);
+ CU_ASSERT_FATAL(pool_min_uarea != ODP_POOL_INVALID);
+
+ uarea_size = 2 * sizeof(struct udata_struct);
+ if (uarea_size > pool_capa.pkt.max_uarea_size)
+ uarea_size = pool_capa.pkt.max_uarea_size;
+
+ param.pkt.uarea_size = uarea_size;
+
+ pool_large_uarea = odp_pool_create("large_uarea", &param);
+ CU_ASSERT_FATAL(pool_large_uarea != ODP_POOL_INVALID);
+
+ /* Pool with minimal user area */
+ pkt = odp_packet_copy(test_packet, pool_min_uarea);
+ if (pkt != ODP_PACKET_INVALID) {
+ /* Pool has enough user area also when zero was requested */
+ CU_ASSERT(odp_packet_user_area_size(pkt) >= sizeof(struct udata_struct));
+
+ packet_compare_inflags(pkt, test_packet);
+ packet_compare_udata(pkt, test_packet);
+ packet_compare_data(pkt, test_packet);
+
+ odp_packet_free(pkt);
+ }
+
+ /* The same pool */
+ pkt = odp_packet_copy(test_packet, odp_packet_pool(test_packet));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(pkt != test_packet);
+ CU_ASSERT(odp_packet_pool(pkt) == odp_packet_pool(test_packet));
+ CU_ASSERT(odp_packet_user_area(pkt) != odp_packet_user_area(test_packet));
+ CU_ASSERT(odp_packet_user_area_size(pkt) == odp_packet_user_area_size(test_packet));
+ CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(test_packet));
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(test_packet));
+
+ packet_compare_inflags(pkt, test_packet);
+ packet_compare_udata(pkt, test_packet);
+ packet_compare_data(pkt, test_packet);
+
+ odp_packet_free(pkt);
+
+ /* Pool with larger user area */
+ pkt = odp_packet_copy(test_packet, pool_large_uarea);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(pkt != test_packet);
+ CU_ASSERT(odp_packet_pool(pkt) == pool_large_uarea);
+ CU_ASSERT(odp_packet_user_area(pkt) != odp_packet_user_area(test_packet));
+ CU_ASSERT(odp_packet_user_area_size(pkt) >= uarea_size);
+ CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(test_packet));
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(test_packet));
+
+ packet_compare_inflags(pkt, test_packet);
+ packet_compare_udata(pkt, test_packet);
+ packet_compare_data(pkt, test_packet);
+
+ /* Now test copy_part */
+ pool = pool_large_uarea;
+ pkt_part = odp_packet_copy_part(pkt, 0, odp_packet_len(pkt) + 1, pool);
+ CU_ASSERT(pkt_part == ODP_PACKET_INVALID);
+ pkt_part = odp_packet_copy_part(pkt, odp_packet_len(pkt), 1, pool);
+ CU_ASSERT(pkt_part == ODP_PACKET_INVALID);
+
+ pkt_part = odp_packet_copy_part(pkt, 0, odp_packet_len(pkt), pool);
+ CU_ASSERT_FATAL(pkt_part != ODP_PACKET_INVALID);
+ CU_ASSERT(pkt != pkt_part);
+ CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(pkt_part));
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt_part));
+
+ packet_compare_data(pkt, pkt_part);
+ odp_packet_free(pkt_part);
+
+ plen = odp_packet_len(pkt);
+ for (i = 0; i < plen / 2; i += 5) {
+ pkt_part = odp_packet_copy_part(pkt, i, plen / 4, pool);
+ CU_ASSERT_FATAL(pkt_part != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(pkt_part) == plen / 4);
+ packet_compare_offset(pkt_part, 0, pkt, i, plen / 4);
+ odp_packet_free(pkt_part);
+ }
+
+ /* Test copy and move apis */
+ CU_ASSERT(odp_packet_copy_data(pkt, 0, plen - plen / 8, plen / 8) == 0);
+ packet_compare_offset(pkt, 0, pkt, plen - plen / 8, plen / 8);
+ packet_compare_offset(pkt, 0, test_packet, plen - plen / 8, plen / 8);
+
+ /* Test segment crossing if we support segments */
+ pkt_data = odp_packet_offset(pkt, 0, &seg_len, NULL);
+ CU_ASSERT_FATAL(pkt_data != NULL);
+
+ if (seg_len < plen) {
+ src_offset = seg_len - 15;
+ dst_offset = seg_len - 5;
+ } else {
+ src_offset = seg_len - 40;
+ dst_offset = seg_len - 25;
+ }
+
+ pkt_part = odp_packet_copy_part(pkt, src_offset, 20, pool);
+ CU_ASSERT(odp_packet_move_data(pkt, dst_offset, src_offset, 20) == 0);
+ packet_compare_offset(pkt, dst_offset, pkt_part, 0, 20);
+
+ odp_packet_free(pkt_part);
+ odp_packet_free(pkt);
+
+ CU_ASSERT(odp_pool_destroy(pool_min_uarea) == 0);
+ CU_ASSERT(odp_pool_destroy(pool_large_uarea) == 0);
+}
+
+static void packet_test_copydata(void)
+{
+ odp_packet_t pkt = test_packet;
+ uint32_t pkt_len = odp_packet_len(pkt);
+ uint8_t *data_buf;
+ uint32_t i;
+ int correct_memory;
+
+ CU_ASSERT_FATAL(pkt_len > 0);
+
+ data_buf = malloc(pkt_len);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(data_buf);
+
+ for (i = 0; i < pkt_len; i++)
+ data_buf[i] = (uint8_t)i;
+
+ CU_ASSERT(!odp_packet_copy_from_mem(pkt, 0, pkt_len, data_buf));
+ memset(data_buf, 0, pkt_len);
+ CU_ASSERT(!odp_packet_copy_to_mem(pkt, 0, pkt_len, data_buf));
+
+ correct_memory = 1;
+ for (i = 0; i < pkt_len; i++)
+ if (data_buf[i] != (uint8_t)i) {
+ correct_memory = 0;
+ break;
+ }
+ CU_ASSERT(correct_memory);
+
+ free(data_buf);
+
+ pkt = odp_packet_alloc(odp_packet_pool(test_packet), pkt_len / 2);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ CU_ASSERT(odp_packet_copy_from_pkt(pkt, 0, test_packet, 0,
+ pkt_len) < 0);
+ CU_ASSERT(odp_packet_copy_from_pkt(pkt, pkt_len, test_packet, 0,
+ 1) < 0);
+
+ for (i = 0; i < pkt_len / 2; i++) {
+ CU_ASSERT(odp_packet_copy_from_pkt(pkt, i, test_packet, i,
+ 1) == 0);
+ }
+
+ packet_compare_offset(pkt, 0, test_packet, 0, pkt_len / 2);
+ odp_packet_free(pkt);
+
+ pkt = odp_packet_alloc(odp_packet_pool(segmented_test_packet),
+ odp_packet_len(segmented_test_packet) / 2);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ CU_ASSERT(odp_packet_copy_from_pkt(pkt, 0, segmented_test_packet,
+ odp_packet_len(pkt) / 4,
+ odp_packet_len(pkt)) == 0);
+ packet_compare_offset(pkt, 0, segmented_test_packet,
+ odp_packet_len(pkt) / 4,
+ odp_packet_len(pkt));
+ odp_packet_free(pkt);
+}
+
+static void packet_test_concatsplit(void)
+{
+ odp_packet_t pkt, pkt2;
+ uint32_t pkt_len;
+ odp_packet_t splits[4] = {ODP_PACKET_INVALID};
+ odp_pool_t pool;
+
+ pool = odp_packet_pool(test_packet);
+ pkt = odp_packet_copy(test_packet, pool);
+ pkt2 = odp_packet_copy(test_packet, pool);
+ pkt_len = odp_packet_len(test_packet);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID);
+ CU_ASSERT(pkt_len == odp_packet_len(pkt));
+ CU_ASSERT(pkt_len == odp_packet_len(pkt2));
+
+ CU_ASSERT(odp_packet_concat(&pkt, pkt2) >= 0);
+ CU_ASSERT(odp_packet_len(pkt) == pkt_len * 2);
+ packet_sanity_check(pkt);
+ packet_compare_offset(pkt, 0, pkt, pkt_len, pkt_len);
+
+ CU_ASSERT(odp_packet_split(&pkt, pkt_len, &pkt2) == 0);
+ CU_ASSERT(pkt != pkt2);
+ CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(pkt2));
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt2));
+ packet_sanity_check(pkt);
+ packet_sanity_check(pkt2);
+ packet_compare_data(pkt, pkt2);
+ packet_compare_data(pkt, test_packet);
+
+ odp_packet_free(pkt);
+ odp_packet_free(pkt2);
+
+ pkt = odp_packet_copy(segmented_test_packet,
+ odp_packet_pool(segmented_test_packet));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ pkt_len = odp_packet_len(pkt);
+ packet_sanity_check(pkt);
+ packet_compare_data(pkt, segmented_test_packet);
+
+ CU_ASSERT(odp_packet_split(&pkt, pkt_len / 2, &splits[0]) == 0);
+ CU_ASSERT(pkt != splits[0]);
+ CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(splits[0]));
+ CU_ASSERT(odp_packet_len(pkt) == pkt_len / 2);
+ CU_ASSERT(odp_packet_len(pkt) + odp_packet_len(splits[0]) == pkt_len);
+ packet_sanity_check(pkt);
+ packet_sanity_check(splits[0]);
+ packet_compare_offset(pkt, 0, segmented_test_packet, 0, pkt_len / 2);
+ packet_compare_offset(splits[0], 0, segmented_test_packet,
+ pkt_len / 2, odp_packet_len(splits[0]));
+
+ CU_ASSERT(odp_packet_concat(&pkt, splits[0]) >= 0);
+ packet_sanity_check(pkt);
+ packet_compare_offset(pkt, 0, segmented_test_packet, 0, pkt_len / 2);
+ packet_compare_offset(pkt, pkt_len / 2, segmented_test_packet,
+ pkt_len / 2, pkt_len / 2);
+ packet_compare_offset(pkt, 0, segmented_test_packet, 0,
+ pkt_len);
+
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(segmented_test_packet));
+ packet_compare_data(pkt, segmented_test_packet);
+
+ CU_ASSERT(odp_packet_split(&pkt, pkt_len / 2, &splits[0]) == 0);
+ packet_sanity_check(pkt);
+ CU_ASSERT(odp_packet_split(&pkt, pkt_len / 4, &splits[1]) == 0);
+ packet_sanity_check(pkt);
+ CU_ASSERT(odp_packet_split(&pkt, pkt_len / 8, &splits[2]) == 0);
+ packet_sanity_check(pkt);
+
+ packet_sanity_check(splits[0]);
+ packet_sanity_check(splits[1]);
+ packet_sanity_check(splits[2]);
+ CU_ASSERT(odp_packet_len(splits[0]) + odp_packet_len(splits[1]) +
+ odp_packet_len(splits[2]) + odp_packet_len(pkt) == pkt_len);
+
+ CU_ASSERT(odp_packet_concat(&pkt, splits[2]) >= 0);
+ packet_sanity_check(pkt);
+ CU_ASSERT(odp_packet_concat(&pkt, splits[1]) >= 0);
+ packet_sanity_check(pkt);
+ CU_ASSERT(odp_packet_concat(&pkt, splits[0]) >= 0);
+ packet_sanity_check(pkt);
+
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(segmented_test_packet));
+ packet_compare_data(pkt, segmented_test_packet);
+
+ odp_packet_free(pkt);
+}
+
+static void packet_test_concat_small(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_packet_t pkt, pkt2;
+ int ret;
+ uint8_t *data;
+ uint32_t i;
+ uint32_t len = PACKET_POOL_NUM / 4;
+ uint8_t buf[len];
+
+ if (pool_capa.pkt.max_len && pool_capa.pkt.max_len < len)
+ len = pool_capa.pkt.max_len;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = len;
+ param.pkt.num = PACKET_POOL_NUM;
+
+ pool = odp_pool_create("packet_pool_concat", &param);
+ CU_ASSERT(pool != ODP_POOL_INVALID);
+
+ pkt = odp_packet_alloc(pool, 1);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ data = odp_packet_data(pkt);
+ *data = 0;
+
+ for (i = 0; i < len - 1; i++) {
+ pkt2 = odp_packet_alloc(pool, 1);
+ CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID);
+
+ data = odp_packet_data(pkt2);
+ *data = i + 1;
+
+ ret = odp_packet_concat(&pkt, pkt2);
+ CU_ASSERT(ret >= 0);
+
+ if (ret < 0) {
+ odp_packet_free(pkt2);
+ break;
+ }
+
+ if (packet_sanity_check(pkt))
+ break;
+ }
+
+ CU_ASSERT(odp_packet_len(pkt) == len);
+
+ len = odp_packet_len(pkt);
+
+ memset(buf, 0, len);
+ CU_ASSERT(odp_packet_copy_to_mem(pkt, 0, len, buf) == 0);
+
+ for (i = 0; i < len; i++)
+ CU_ASSERT(buf[i] == (i % 256));
+
+ odp_packet_free(pkt);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_test_concat_extend_trunc(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_packet_t pkt, pkt2;
+ int i, ret;
+ uint32_t alloc_len, ext_len, trunc_len, cur_len;
+ uint32_t len = 1900;
+
+ if (pool_capa.pkt.max_len && pool_capa.pkt.max_len < len)
+ len = pool_capa.pkt.max_len;
+
+ alloc_len = len / 8;
+ ext_len = len / 4;
+ trunc_len = len / 3;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = len;
+ param.pkt.num = PACKET_POOL_NUM;
+
+ pool = odp_pool_create("packet_pool_concat", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ pkt = odp_packet_alloc(pool, alloc_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ cur_len = odp_packet_len(pkt);
+
+ for (i = 0; i < 2; i++) {
+ pkt2 = odp_packet_alloc(pool, alloc_len);
+ CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID);
+
+ ret = odp_packet_concat(&pkt, pkt2);
+ CU_ASSERT(ret >= 0);
+ packet_sanity_check(pkt);
+
+ if (ret < 0)
+ odp_packet_free(pkt2);
+
+ CU_ASSERT(odp_packet_len(pkt) == (cur_len + alloc_len));
+ cur_len = odp_packet_len(pkt);
+ }
+
+ ret = odp_packet_extend_tail(&pkt, ext_len, NULL, NULL);
+ CU_ASSERT(ret >= 0);
+ packet_sanity_check(pkt);
+ CU_ASSERT(odp_packet_len(pkt) == (cur_len + ext_len));
+ cur_len = odp_packet_len(pkt);
+
+ ret = odp_packet_extend_head(&pkt, ext_len, NULL, NULL);
+ CU_ASSERT(ret >= 0);
+ packet_sanity_check(pkt);
+ CU_ASSERT(odp_packet_len(pkt) == (cur_len + ext_len));
+ cur_len = odp_packet_len(pkt);
+
+ pkt2 = odp_packet_alloc(pool, alloc_len);
+ CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID);
+
+ ret = odp_packet_concat(&pkt, pkt2);
+ CU_ASSERT(ret >= 0);
+
+ if (ret < 0)
+ odp_packet_free(pkt2);
+
+ packet_sanity_check(pkt);
+ CU_ASSERT(odp_packet_len(pkt) == (cur_len + alloc_len));
+ cur_len = odp_packet_len(pkt);
+
+ ret = odp_packet_trunc_head(&pkt, trunc_len, NULL, NULL);
+ CU_ASSERT(ret >= 0);
+ packet_sanity_check(pkt);
+ CU_ASSERT(odp_packet_len(pkt) == (cur_len - trunc_len));
+ cur_len = odp_packet_len(pkt);
+
+ ret = odp_packet_trunc_tail(&pkt, trunc_len, NULL, NULL);
+ CU_ASSERT(ret >= 0);
+ packet_sanity_check(pkt);
+ CU_ASSERT(odp_packet_len(pkt) == (cur_len - trunc_len));
+ cur_len = odp_packet_len(pkt);
+
+ odp_packet_free(pkt);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_test_extend_small(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_packet_t pkt;
+ int ret, round;
+ uint8_t *data;
+ uint32_t i, seg_len;
+ int tail = 1;
+ uint32_t len = 32000;
+ uint8_t buf[len];
+
+ if (pool_capa.pkt.max_len && pool_capa.pkt.max_len < len)
+ len = pool_capa.pkt.max_len;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = len;
+ param.pkt.num = PACKET_POOL_NUM;
+
+ pool = odp_pool_create("packet_pool_extend", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (round = 0; round < 2; round++) {
+ pkt = odp_packet_alloc(pool, 1);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ data = odp_packet_data(pkt);
+ *data = 0;
+
+ for (i = 0; i < len - 1; i++) {
+ if (tail) {
+ ret = odp_packet_extend_tail(&pkt, 1,
+ (void **)&data,
+ &seg_len);
+ CU_ASSERT(ret >= 0);
+ } else {
+ ret = odp_packet_extend_head(&pkt, 1,
+ (void **)&data,
+ &seg_len);
+ CU_ASSERT(ret >= 0);
+ }
+
+ if (ret < 0)
+ break;
+
+ if (packet_sanity_check(pkt))
+ break;
+
+ if (tail) {
+ /* assert needs brackets */
+ CU_ASSERT(seg_len == 1);
+ } else {
+ CU_ASSERT(seg_len > 0);
+ }
+
+ *data = i + 1;
+ }
+
+ CU_ASSERT(odp_packet_len(pkt) == len);
+
+ len = odp_packet_len(pkt);
+
+ memset(buf, 0, len);
+ CU_ASSERT(odp_packet_copy_to_mem(pkt, 0, len, buf) == 0);
+
+ for (i = 0; i < len; i++) {
+ int match;
+
+ if (tail) {
+ match = (buf[i] == (i % 256));
+ CU_ASSERT(match);
+ } else {
+ match = (buf[len - 1 - i] == (i % 256));
+ CU_ASSERT(match);
+ }
+
+ /* Limit the number of failed asserts to
+ one per packet */
+ if (!match)
+ break;
+ }
+
+ odp_packet_free(pkt);
+
+ tail = 0;
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_test_extend_large(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_packet_t pkt;
+ int ret, round;
+ uint8_t *data;
+ uint32_t i, seg_len, ext_len, cur_len, cur_data;
+ int tail = 1;
+ int num_div = 16;
+ int div = 1;
+ uint32_t len = 32000;
+ uint8_t buf[len];
+
+ if (pool_capa.pkt.max_len && pool_capa.pkt.max_len < len)
+ len = pool_capa.pkt.max_len;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = len;
+ param.pkt.num = PACKET_POOL_NUM;
+
+ pool = odp_pool_create("packet_pool_extend", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (round = 0; round < 2 * num_div; round++) {
+ ext_len = len / div;
+ cur_len = ext_len;
+
+ pkt = odp_packet_alloc(pool, ext_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ cur_data = 0;
+
+ if (tail) {
+ ret = fill_data_forward(pkt, 0, ext_len, &cur_data);
+ CU_ASSERT(ret == 0);
+ } else {
+ ret = fill_data_backward(pkt, 0, ext_len, &cur_data);
+ CU_ASSERT(ret == 0);
+ }
+
+ while (cur_len < len) {
+ if ((len - cur_len) < ext_len)
+ ext_len = len - cur_len;
+
+ if (tail) {
+ ret = odp_packet_extend_tail(&pkt, ext_len,
+ (void **)&data,
+ &seg_len);
+ CU_ASSERT(ret >= 0);
+ } else {
+ ret = odp_packet_extend_head(&pkt, ext_len,
+ (void **)&data,
+ &seg_len);
+ CU_ASSERT(ret >= 0);
+ }
+
+ if (ret < 0)
+ break;
+
+ if (packet_sanity_check(pkt))
+ break;
+
+ if (tail) {
+ /* assert needs brackets */
+ CU_ASSERT((seg_len > 0) &&
+ (seg_len <= ext_len));
+ ret = fill_data_forward(pkt, cur_len, ext_len,
+ &cur_data);
+ CU_ASSERT(ret == 0);
+ } else {
+ CU_ASSERT(seg_len > 0);
+ CU_ASSERT(data == odp_packet_data(pkt));
+ ret = fill_data_backward(pkt, 0, ext_len,
+ &cur_data);
+ CU_ASSERT(ret == 0);
+ }
+
+ cur_len += ext_len;
+ }
+
+ CU_ASSERT(odp_packet_len(pkt) == len);
+
+ len = odp_packet_len(pkt);
+
+ memset(buf, 0, len);
+ CU_ASSERT(odp_packet_copy_to_mem(pkt, 0, len, buf) == 0);
+
+ for (i = 0; i < len; i++) {
+ int match;
+
+ if (tail) {
+ match = (buf[i] == (i % 256));
+ CU_ASSERT(match);
+ } else {
+ match = (buf[len - 1 - i] == (i % 256));
+ CU_ASSERT(match);
+ }
+
+ /* Limit the number of failed asserts to
+ one per packet */
+ if (!match)
+ break;
+ }
+
+ odp_packet_free(pkt);
+
+ div++;
+ if (div > num_div) {
+ /* test extend head */
+ div = 1;
+ tail = 0;
+ }
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_test_extend_mix(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_packet_t pkt;
+ int ret, round;
+ uint8_t *data;
+ uint32_t i, seg_len, ext_len, cur_len, cur_data;
+ int small_count;
+ int tail = 1;
+ uint32_t len = 32000;
+ uint8_t buf[len];
+
+ if (pool_capa.pkt.max_len && pool_capa.pkt.max_len < len)
+ len = pool_capa.pkt.max_len;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = len;
+ param.pkt.num = PACKET_POOL_NUM;
+
+ pool = odp_pool_create("packet_pool_extend", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (round = 0; round < 2; round++) {
+ small_count = 30;
+ ext_len = len / 10;
+ cur_len = ext_len;
+
+ pkt = odp_packet_alloc(pool, ext_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ cur_data = 0;
+
+ if (tail) {
+ ret = fill_data_forward(pkt, 0, ext_len, &cur_data);
+ CU_ASSERT(ret == 0);
+ } else {
+ ret = fill_data_backward(pkt, 0, ext_len, &cur_data);
+ CU_ASSERT(ret == 0);
+ }
+
+ while (cur_len < len) {
+ if (small_count) {
+ small_count--;
+ ext_len = len / 100;
+ } else {
+ ext_len = len / 4;
+ }
+
+ if ((len - cur_len) < ext_len)
+ ext_len = len - cur_len;
+
+ if (tail) {
+ ret = odp_packet_extend_tail(&pkt, ext_len,
+ (void **)&data,
+ &seg_len);
+ CU_ASSERT(ret >= 0);
+ CU_ASSERT((seg_len > 0) &&
+ (seg_len <= ext_len));
+ ret = fill_data_forward(pkt, cur_len, ext_len,
+ &cur_data);
+ CU_ASSERT(ret == 0);
+ } else {
+ ret = odp_packet_extend_head(&pkt, ext_len,
+ (void **)&data,
+ &seg_len);
+ CU_ASSERT(ret >= 0);
+ CU_ASSERT(seg_len > 0);
+ CU_ASSERT(data == odp_packet_data(pkt));
+ ret = fill_data_backward(pkt, 0, ext_len,
+ &cur_data);
+ CU_ASSERT(ret == 0);
+ }
+
+ if (packet_sanity_check(pkt))
+ break;
+
+ cur_len += ext_len;
+ }
+
+ CU_ASSERT(odp_packet_len(pkt) == len);
+
+ len = odp_packet_len(pkt);
+
+ memset(buf, 0, len);
+ CU_ASSERT(odp_packet_copy_to_mem(pkt, 0, len, buf) == 0);
+
+ for (i = 0; i < len; i++) {
+ int match;
+
+ if (tail) {
+ match = (buf[i] == (i % 256));
+ CU_ASSERT(match);
+ } else {
+ match = (buf[len - 1 - i] == (i % 256));
+ CU_ASSERT(match);
+ }
+
+ /* Limit the number of failed asserts to
+ one per packet */
+ if (!match)
+ break;
+ }
+
+ odp_packet_free(pkt);
+
+ tail = 0;
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_test_extend_ref(void)
+{
+ odp_packet_t max_pkt, ref;
+ uint32_t hr, tr, max_len;
+
+ max_pkt = odp_packet_copy(segmented_test_packet,
+ odp_packet_pool(segmented_test_packet));
+ CU_ASSERT_FATAL(max_pkt != ODP_PACKET_INVALID);
+ max_len = odp_packet_len(max_pkt);
+
+ /* Maximize the max pkt */
+ hr = odp_packet_headroom(max_pkt);
+ tr = odp_packet_tailroom(max_pkt);
+ odp_packet_push_head(max_pkt, hr);
+ odp_packet_push_tail(max_pkt, tr);
+
+ /* See if we can trunc and extend anyway */
+ CU_ASSERT(odp_packet_trunc_tail(&max_pkt, hr + tr + 1,
+ NULL, NULL) >= 0);
+ CU_ASSERT(odp_packet_extend_head(&max_pkt, 1, NULL, NULL) >= 0);
+ CU_ASSERT(odp_packet_len(max_pkt) == max_len);
+ packet_sanity_check(max_pkt);
+
+ /* Now try with a reference in place */
+ CU_ASSERT(odp_packet_trunc_tail(&max_pkt, 100, NULL, NULL) >= 0);
+ packet_sanity_check(max_pkt);
+ ref = odp_packet_ref(max_pkt, 100);
+
+ /* Verify ref lengths */
+ CU_ASSERT(ref != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(ref) == max_len - 200);
+ if (odp_packet_has_ref(ref) == 1) {
+ /* And ref's affect on max_pkt */
+ CU_ASSERT(odp_packet_has_ref(max_pkt) == 1);
+ }
+
+ /* Now extend max_pkt and verify effect */
+ CU_ASSERT(odp_packet_extend_head(&max_pkt, 10, NULL, NULL) >= 0);
+ CU_ASSERT(odp_packet_len(max_pkt) == max_len - 90);
+ packet_sanity_check(max_pkt);
+
+ /* Extend on max_pkt should not affect ref */
+ CU_ASSERT(odp_packet_len(ref) == max_len - 200);
+
+ /* Now extend ref and verify effect*/
+ CU_ASSERT(odp_packet_extend_head(&ref, 20, NULL, NULL) >= 0);
+ CU_ASSERT(odp_packet_len(ref) == max_len - 180);
+ packet_sanity_check(max_pkt);
+
+ /* Extend on ref should not affect max_pkt */
+ CU_ASSERT(odp_packet_len(max_pkt) == max_len - 90);
+
+ /* Trunc max_pkt of all unshared len */
+ CU_ASSERT(odp_packet_trunc_head(&max_pkt, 110, NULL, NULL) >= 0);
+ packet_sanity_check(max_pkt);
+
+ /* Verify effect on max_pkt */
+ CU_ASSERT(odp_packet_len(max_pkt) == max_len - 200);
+
+ /* Verify that ref is unchanged */
+ CU_ASSERT(odp_packet_len(ref) == max_len - 180);
+
+ /* Free ref and verify that max_pkt is back to being unreferenced */
+ odp_packet_free(ref);
+ CU_ASSERT(odp_packet_has_ref(max_pkt) == 0);
+ CU_ASSERT(odp_packet_len(max_pkt) == max_len - 200);
+ packet_sanity_check(max_pkt);
+
+ odp_packet_free(max_pkt);
+}
+
+static void packet_test_align(void)
+{
+ odp_packet_t pkt;
+ uint32_t pkt_len, offset;
+ uint32_t seg_len = 0, aligned_seglen = 0;
+ void *pkt_data, *aligned_data;
+ const uint32_t max_align = 32;
+
+ pkt = odp_packet_copy_part(segmented_test_packet, 0,
+ odp_packet_len(segmented_test_packet) / 2,
+ odp_packet_pool(segmented_test_packet));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ pkt_len = odp_packet_len(pkt);
+ seg_len = odp_packet_seg_len(pkt);
+
+ if (odp_packet_is_segmented(pkt)) {
+ /* Can't address across segment boundaries */
+ CU_ASSERT(odp_packet_align(&pkt, 0, pkt_len, 0) < 0);
+
+ offset = seg_len - 5;
+ (void)odp_packet_offset(pkt, offset, &seg_len, NULL);
+
+ /* Realign for addressability */
+ CU_ASSERT(odp_packet_align(&pkt, offset,
+ seg_len + 2, 0) >= 0);
+
+ /* Alignment doesn't change packet length or contents */
+ CU_ASSERT(odp_packet_len(pkt) == pkt_len);
+ (void)odp_packet_offset(pkt, offset, &aligned_seglen, NULL);
+ packet_compare_offset(pkt, offset,
+ segmented_test_packet, offset,
+ aligned_seglen);
+
+ /* Verify requested contiguous addressabilty */
+ CU_ASSERT(aligned_seglen >= seg_len + 2);
+
+ packet_sanity_check(pkt);
+ }
+
+ /* Get a misaligned address */
+ pkt_data = odp_packet_offset(pkt, 0, &seg_len, NULL);
+ offset = seg_len - 5;
+ pkt_data = odp_packet_offset(pkt, offset, &seg_len, NULL);
+ if ((uintptr_t)pkt_data % max_align == 0) {
+ offset--;
+ pkt_data = odp_packet_offset(pkt, offset, &seg_len, NULL);
+ }
+
+ /* Realign for alignment */
+ CU_ASSERT(odp_packet_align(&pkt, offset, 1, max_align) >= 0);
+ aligned_data = odp_packet_offset(pkt, offset, &aligned_seglen, NULL);
+
+ CU_ASSERT(odp_packet_len(pkt) == pkt_len);
+ packet_compare_offset(pkt, offset, segmented_test_packet, offset,
+ aligned_seglen);
+ CU_ASSERT((uintptr_t)aligned_data % max_align == 0);
+ packet_sanity_check(pkt);
+
+ odp_packet_free(pkt);
+}
+
+static void packet_test_offset(void)
+{
+ odp_packet_t pkt = test_packet;
+ uint32_t seg_len = 0;
+ uint32_t full_seg_len;
+ uint8_t *ptr, *start_ptr;
+ uint32_t offset;
+ odp_packet_seg_t seg = ODP_PACKET_SEG_INVALID;
+
+ ptr = odp_packet_offset(pkt, 0, &seg_len, &seg);
+ CU_ASSERT(seg != ODP_PACKET_SEG_INVALID);
+ CU_ASSERT(seg_len > 1);
+ CU_ASSERT(seg_len == odp_packet_seg_len(pkt));
+ CU_ASSERT(seg_len == odp_packet_seg_data_len(pkt, seg));
+ CU_ASSERT_PTR_NOT_NULL(ptr);
+ CU_ASSERT(ptr == odp_packet_data(pkt));
+ CU_ASSERT(ptr == odp_packet_seg_data(pkt, seg));
+
+ /* Query a second byte */
+ start_ptr = ptr;
+ full_seg_len = seg_len;
+ offset = 1;
+
+ ptr = odp_packet_offset(pkt, offset, &seg_len, NULL);
+ CU_ASSERT_PTR_NOT_NULL(ptr);
+ CU_ASSERT(ptr == start_ptr + offset);
+ CU_ASSERT(seg_len == full_seg_len - offset);
+
+ /* Query the last byte in a segment */
+ offset = full_seg_len - 1;
+
+ ptr = odp_packet_offset(pkt, offset, &seg_len, NULL);
+ CU_ASSERT_PTR_NOT_NULL(ptr);
+ CU_ASSERT(ptr == start_ptr + offset);
+ CU_ASSERT(seg_len == full_seg_len - offset);
+
+ /* Query the last byte in a packet */
+ offset = odp_packet_len(pkt) - 1;
+ ptr = odp_packet_offset(pkt, offset, &seg_len, NULL);
+ CU_ASSERT_PTR_NOT_NULL(ptr);
+ CU_ASSERT(seg_len == 1);
+
+ /* Pass NULL to [out] arguments */
+ ptr = odp_packet_offset(pkt, 0, NULL, NULL);
+ CU_ASSERT_PTR_NOT_NULL(ptr);
+}
+
+static void packet_test_ref(void)
+{
+ odp_packet_t base_pkt, segmented_base_pkt, hdr_pkt[4],
+ ref_pkt[4], refhdr_pkt[4], hdr_cpy;
+ odp_packet_t pkt, pkt2, pkt3, ref, ref2;
+ uint32_t pkt_len, segmented_pkt_len, hdr_len[4], offset[4], hr[4],
+ base_hr, ref_len[4];
+ int i, ret;
+ odp_pool_t pool;
+
+ /* Create references and compare data */
+ pool = odp_packet_pool(test_packet);
+
+ pkt = odp_packet_copy(test_packet, pool);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID)
+ ref = odp_packet_ref_static(pkt);
+ CU_ASSERT_FATAL(ref != ODP_PACKET_INVALID)
+ packet_compare_data(pkt, ref);
+ odp_packet_free(ref);
+ odp_packet_free(pkt);
+
+ pkt = odp_packet_copy(test_packet, pool);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID)
+ ref = odp_packet_ref(pkt, 0);
+ CU_ASSERT_FATAL(ref != ODP_PACKET_INVALID)
+ packet_compare_data(pkt, ref);
+ odp_packet_free(ref);
+ odp_packet_free(pkt);
+
+ pkt = odp_packet_copy(test_packet, pool);
+ pkt3 = odp_packet_copy(test_packet, pool);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID)
+ CU_ASSERT_FATAL(pkt3 != ODP_PACKET_INVALID)
+ ret = odp_packet_concat(&pkt3, pkt);
+ CU_ASSERT_FATAL(ret >= 0);
+
+ pkt = odp_packet_copy(test_packet, pool);
+ pkt2 = odp_packet_copy(test_packet, pool);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID)
+ CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID)
+ ref = odp_packet_ref_pkt(pkt, 0, pkt2);
+ CU_ASSERT_FATAL(ref != ODP_PACKET_INVALID)
+ packet_compare_data(pkt3, ref);
+ odp_packet_free(ref);
+ odp_packet_free(pkt);
+ odp_packet_free(pkt3);
+
+ /* Do the same for segmented packets */
+ pool = odp_packet_pool(segmented_test_packet);
+
+ pkt = odp_packet_copy(segmented_test_packet, pool);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID)
+ ref = odp_packet_ref_static(pkt);
+ CU_ASSERT_FATAL(ref != ODP_PACKET_INVALID)
+ packet_compare_data(pkt, ref);
+ odp_packet_free(ref);
+ odp_packet_free(pkt);
+
+ pkt = odp_packet_copy(segmented_test_packet, pool);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID)
+ ref = odp_packet_ref(pkt, 0);
+ CU_ASSERT_FATAL(ref != ODP_PACKET_INVALID)
+ packet_compare_data(pkt, ref);
+ odp_packet_free(ref);
+ odp_packet_free(pkt);
+
+ /* Avoid to create too large packets with concat */
+ pool = odp_packet_pool(test_packet);
+
+ pkt = odp_packet_copy(test_packet, pool);
+ pkt2 = odp_packet_copy(test_packet, pool);
+ pkt3 = odp_packet_copy(test_packet, pool);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID)
+ CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID)
+ CU_ASSERT_FATAL(pkt3 != ODP_PACKET_INVALID)
+ ret = odp_packet_concat(&pkt3, pkt2);
+ CU_ASSERT_FATAL(ret >= 0);
+ ret = odp_packet_concat(&pkt3, pkt);
+ CU_ASSERT_FATAL(ret >= 0);
+
+ pkt = odp_packet_copy(test_packet, pool);
+ pkt2 = odp_packet_copy(test_packet, pool);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID)
+ CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID)
+ ref = odp_packet_ref_pkt(pkt, 0, pkt2);
+ CU_ASSERT_FATAL(ref != ODP_PACKET_INVALID)
+ pkt2 = odp_packet_copy(test_packet, pool);
+ CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID)
+ ref2 = odp_packet_ref_pkt(ref, 0, pkt2);
+ CU_ASSERT_FATAL(ref2 != ODP_PACKET_INVALID)
+ packet_compare_data(pkt3, ref2);
+
+ /* Try print function on a reference */
+ printf("\n\n");
+ odp_packet_print(ref2);
+ odp_packet_print_data(ref2, 0, 100);
+ odp_packet_print_data(ref2, 14, 20);
+
+ odp_packet_free(ref);
+ odp_packet_free(ref2);
+ odp_packet_free(pkt);
+ odp_packet_free(pkt3);
+
+ /* Test has_ref, lengths, etc */
+ base_pkt = odp_packet_copy(test_packet, odp_packet_pool(test_packet));
+ CU_ASSERT_FATAL(base_pkt != ODP_PACKET_INVALID);
+ base_hr = odp_packet_headroom(base_pkt);
+ pkt_len = odp_packet_len(test_packet);
+
+ segmented_base_pkt =
+ odp_packet_copy(segmented_test_packet,
+ odp_packet_pool(segmented_test_packet));
+ segmented_pkt_len = odp_packet_len(segmented_test_packet);
+ CU_ASSERT_FATAL(segmented_base_pkt != ODP_PACKET_INVALID);
+
+ CU_ASSERT(odp_packet_has_ref(base_pkt) == 0);
+
+ hdr_pkt[0] =
+ odp_packet_copy_part(segmented_test_packet, 0,
+ odp_packet_len(segmented_test_packet) / 4,
+ odp_packet_pool(segmented_test_packet));
+ CU_ASSERT_FATAL(hdr_pkt[0] != ODP_PACKET_INVALID);
+ hdr_len[0] = odp_packet_len(hdr_pkt[0]);
+ offset[0] = 0;
+
+ hdr_pkt[1] =
+ odp_packet_copy_part(segmented_test_packet, 10,
+ odp_packet_len(segmented_test_packet) / 8,
+ odp_packet_pool(segmented_test_packet));
+ CU_ASSERT_FATAL(hdr_pkt[1] != ODP_PACKET_INVALID);
+ hdr_len[1] = odp_packet_len(hdr_pkt[1]);
+ offset[1] = 5;
+
+ hdr_pkt[2] = odp_packet_copy_part(test_packet, 0,
+ odp_packet_len(test_packet) / 4,
+ odp_packet_pool(test_packet));
+ CU_ASSERT_FATAL(hdr_pkt[2] != ODP_PACKET_INVALID);
+ hdr_len[2] = odp_packet_len(hdr_pkt[2]);
+ offset[2] = 64;
+
+ hdr_pkt[3] = odp_packet_copy_part(test_packet, 0,
+ odp_packet_len(test_packet) / 4,
+ odp_packet_pool(test_packet));
+ CU_ASSERT_FATAL(hdr_pkt[3] != ODP_PACKET_INVALID);
+ hdr_len[3] = odp_packet_len(hdr_pkt[3]);
+ offset[3] = 64;
+
+ /* Nothing is a ref or has a ref before we start */
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT(odp_packet_has_ref(hdr_pkt[i]) == 0);
+ }
+
+ /* Create a couple of refs */
+ refhdr_pkt[0] = odp_packet_ref_pkt(base_pkt, offset[0], hdr_pkt[0]);
+ refhdr_pkt[1] = odp_packet_ref_pkt(base_pkt, offset[1], hdr_pkt[1]);
+
+ CU_ASSERT(refhdr_pkt[0] != ODP_PACKET_INVALID);
+ CU_ASSERT(refhdr_pkt[1] != ODP_PACKET_INVALID);
+
+ /* If base packet has now references, ref packet should be also
+ * references. */
+ if (odp_packet_has_ref(base_pkt) == 1) {
+ CU_ASSERT(odp_packet_has_ref(refhdr_pkt[0]) == 1);
+ CU_ASSERT(odp_packet_has_ref(refhdr_pkt[1]) == 1);
+ }
+
+ CU_ASSERT(odp_packet_len(refhdr_pkt[0]) ==
+ hdr_len[0] + pkt_len - offset[0]);
+ CU_ASSERT(odp_packet_len(refhdr_pkt[1]) ==
+ hdr_len[1] + pkt_len - offset[1]);
+
+ packet_compare_offset(refhdr_pkt[0], hdr_len[0],
+ base_pkt, offset[0],
+ pkt_len - offset[0]);
+
+ packet_compare_offset(refhdr_pkt[1], hdr_len[1],
+ base_pkt, offset[1],
+ pkt_len - offset[1]);
+
+ /* See if compound references are supported and if so that they
+ * operate properly */
+ hdr_cpy = odp_packet_copy(hdr_pkt[2], odp_packet_pool(hdr_pkt[2]));
+ CU_ASSERT_FATAL(hdr_cpy != ODP_PACKET_INVALID);
+
+ refhdr_pkt[2] = odp_packet_ref_pkt(refhdr_pkt[0], 2, hdr_cpy);
+ CU_ASSERT(refhdr_pkt[2] != ODP_PACKET_INVALID);
+
+ if (odp_packet_has_ref(refhdr_pkt[2]) == 1) {
+ CU_ASSERT(odp_packet_has_ref(refhdr_pkt[0]) == 1);
+ }
+
+ /* Delete the refs */
+ odp_packet_free(refhdr_pkt[0]);
+ odp_packet_free(refhdr_pkt[1]);
+ odp_packet_free(refhdr_pkt[2]);
+
+ /* Verify that base_pkt no longer has a ref */
+ CU_ASSERT(odp_packet_has_ref(base_pkt) == 0);
+
+ /* Now create a two more shared refs */
+ refhdr_pkt[2] = odp_packet_ref_pkt(base_pkt, offset[2], hdr_pkt[2]);
+ refhdr_pkt[3] = odp_packet_ref_pkt(base_pkt, offset[3], hdr_pkt[3]);
+
+ CU_ASSERT(hdr_pkt[2] != ODP_PACKET_INVALID);
+ CU_ASSERT(hdr_pkt[3] != ODP_PACKET_INVALID);
+
+ if (odp_packet_has_ref(base_pkt) == 1) {
+ CU_ASSERT(odp_packet_has_ref(refhdr_pkt[2]) == 1);
+ CU_ASSERT(odp_packet_has_ref(refhdr_pkt[3]) == 1);
+ }
+
+ CU_ASSERT(odp_packet_len(refhdr_pkt[2]) ==
+ odp_packet_len(refhdr_pkt[3]));
+
+ packet_compare_offset(refhdr_pkt[2], 0,
+ refhdr_pkt[3], 0,
+ odp_packet_len(hdr_pkt[2]));
+
+ /* Delete the headers */
+ odp_packet_free(refhdr_pkt[2]);
+ odp_packet_free(refhdr_pkt[3]);
+
+ /* Verify that base_pkt is no longer ref'd */
+ CU_ASSERT(odp_packet_has_ref(base_pkt) == 0);
+
+ /* Create a static reference */
+ ref_pkt[0] = odp_packet_ref_static(base_pkt);
+ CU_ASSERT(ref_pkt[0] != ODP_PACKET_INVALID);
+
+ if (odp_packet_has_ref(base_pkt) == 1) {
+ CU_ASSERT(odp_packet_has_ref(ref_pkt[0]) == 1);
+ }
+
+ CU_ASSERT(odp_packet_len(ref_pkt[0]) == odp_packet_len(base_pkt));
+ packet_compare_offset(ref_pkt[0], 0, base_pkt, 0,
+ odp_packet_len(base_pkt));
+
+ /* Now delete it */
+ odp_packet_free(ref_pkt[0]);
+ CU_ASSERT(odp_packet_has_ref(base_pkt) == 0);
+
+ /* Create references */
+ ref_pkt[0] = odp_packet_ref(segmented_base_pkt, offset[0]);
+ CU_ASSERT_FATAL(ref_pkt[0] != ODP_PACKET_INVALID);
+
+ if (odp_packet_has_ref(ref_pkt[0]) == 1) {
+ /* CU_ASSERT needs braces */
+ CU_ASSERT(odp_packet_has_ref(segmented_base_pkt) == 1);
+ }
+
+ ref_pkt[1] = odp_packet_ref(segmented_base_pkt, offset[1]);
+ CU_ASSERT_FATAL(ref_pkt[1] != ODP_PACKET_INVALID);
+
+ if (odp_packet_has_ref(ref_pkt[1]) == 1) {
+ /* CU_ASSERT needs braces */
+ CU_ASSERT(odp_packet_has_ref(segmented_base_pkt) == 1);
+ }
+
+ /* Verify reference lengths */
+ CU_ASSERT(odp_packet_len(ref_pkt[0]) == segmented_pkt_len - offset[0]);
+ CU_ASSERT(odp_packet_len(ref_pkt[1]) == segmented_pkt_len - offset[1]);
+
+ /* Free the base pkts -- references should still be valid */
+ odp_packet_free(base_pkt);
+ odp_packet_free(segmented_base_pkt);
+
+ packet_compare_offset(ref_pkt[0], 0,
+ segmented_test_packet, offset[0],
+ segmented_pkt_len - offset[0]);
+ packet_compare_offset(ref_pkt[1], 0,
+ segmented_test_packet, offset[1],
+ segmented_pkt_len - offset[1]);
+
+ /* Verify we can modify the refs */
+ hr[0] = odp_packet_headroom(ref_pkt[0]);
+ hr[1] = odp_packet_headroom(ref_pkt[1]);
+
+ CU_ASSERT(odp_packet_push_head(ref_pkt[0], hr[0]) != NULL);
+
+ CU_ASSERT(odp_packet_len(ref_pkt[0]) ==
+ hr[0] + segmented_pkt_len - offset[0]);
+
+ CU_ASSERT(odp_packet_pull_head(ref_pkt[0], hr[0] / 2) != NULL);
+
+ if (hr[1] > 0) {
+ CU_ASSERT(odp_packet_push_head(ref_pkt[1], 1) != NULL);
+ CU_ASSERT(odp_packet_len(ref_pkt[1]) ==
+ 1 + segmented_pkt_len - offset[1]);
+ CU_ASSERT(odp_packet_pull_head(ref_pkt[1], 1) != NULL);
+ CU_ASSERT(odp_packet_len(ref_pkt[1]) ==
+ segmented_pkt_len - offset[1]);
+ }
+
+ odp_packet_free(ref_pkt[0]);
+ odp_packet_free(ref_pkt[1]);
+
+ /* Verify we can modify base packet after reference is created */
+ base_pkt = odp_packet_copy(test_packet, odp_packet_pool(test_packet));
+
+ ref_pkt[1] = odp_packet_ref(base_pkt, offset[1]);
+ CU_ASSERT_FATAL(ref_pkt[1] != ODP_PACKET_INVALID);
+ ref_len[1] = odp_packet_len(ref_pkt[1]);
+ CU_ASSERT(ref_len[1] == odp_packet_len(base_pkt) - offset[1]);
+
+ CU_ASSERT(odp_packet_push_head(base_pkt, base_hr / 2) != NULL);
+
+ CU_ASSERT(odp_packet_len(ref_pkt[1]) == ref_len[1]);
+
+ ref_pkt[0] = odp_packet_ref(base_pkt, offset[0]);
+ CU_ASSERT_FATAL(ref_pkt[0] != ODP_PACKET_INVALID);
+ ref_len[0] = odp_packet_len(ref_pkt[0]);
+ CU_ASSERT(ref_len[0] == odp_packet_len(base_pkt) - offset[0]);
+
+ CU_ASSERT(odp_packet_push_head(base_pkt,
+ base_hr - base_hr / 2) != NULL);
+ CU_ASSERT(odp_packet_len(ref_pkt[1]) == ref_len[1]);
+ CU_ASSERT(odp_packet_len(ref_pkt[0]) == ref_len[0]);
+
+ hr[0] = odp_packet_headroom(ref_pkt[0]);
+ hr[1] = odp_packet_headroom(ref_pkt[1]);
+ CU_ASSERT(odp_packet_push_head(ref_pkt[0], hr[0]) != NULL);
+ CU_ASSERT(odp_packet_push_head(ref_pkt[1], hr[1]) != NULL);
+
+ odp_packet_free(base_pkt);
+ odp_packet_free(ref_pkt[0]);
+ odp_packet_free(ref_pkt[1]);
+}
+
+static void packet_vector_test_event_conversion(void)
+{
+ odp_packet_vector_t pktv0 = pktv_default;
+ odp_packet_vector_t pktv1;
+ odp_event_t event;
+
+ event = odp_packet_vector_to_event(pktv0);
+ CU_ASSERT_FATAL(event != ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_type(event) == ODP_EVENT_PACKET_VECTOR);
+
+ pktv1 = odp_packet_vector_from_event(event);
+ CU_ASSERT_FATAL(pktv1 != ODP_PACKET_VECTOR_INVALID);
+ CU_ASSERT(pktv1 == pktv0);
+}
+
+static int remove_invalid_pkts_tbl(odp_packet_t *pkt_tbl, int num_pkts)
+{
+ int i, j, count = 0;
+
+ for (i = 0; i < (num_pkts - count) ; i++) {
+ if (pkt_tbl[i] == ODP_PACKET_INVALID) {
+ for (j = i; j < num_pkts; j++)
+ pkt_tbl[j] = pkt_tbl[j + 1];
+
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static void packet_vector_test_tbl(void)
+{
+ odp_packet_vector_t pktv = ODP_PACKET_VECTOR_INVALID;
+ odp_packet_t *pkt_tbl, packet;
+ odp_packet_t clone_packet = ODP_PACKET_INVALID;
+ odp_packet_t orig_pkt_tbl[PKT_VEC_SIZE];
+ odp_pool_param_t params;
+ odp_pool_capability_t capa;
+ odp_pool_t pool;
+ uint32_t i, num;
+ uint32_t max_size = PKT_VEC_SIZE;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+ CU_ASSERT_FATAL(capa.vector.max_size > 0);
+
+ if (capa.vector.max_size < max_size)
+ max_size = capa.vector.max_size;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_VECTOR;
+ params.vector.num = 1;
+ params.vector.max_size = max_size;
+
+ pool = odp_pool_create("vector_pool_alloc", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ /* Allocate the only vector from the pool */
+ pktv = odp_packet_vector_alloc(pool);
+ /* Check if vector packet is valid */
+ CU_ASSERT_FATAL(odp_packet_vector_valid(pktv) == 1)
+ CU_ASSERT(odp_packet_vector_to_u64(pktv) !=
+ odp_packet_vector_to_u64(ODP_PACKET_VECTOR_INVALID));
+
+ /* Allocate packets */
+ for (i = 0; i < max_size; i++) {
+ orig_pkt_tbl[i] = odp_packet_alloc(default_pool,
+ default_param.pkt.len);
+ CU_ASSERT_FATAL(orig_pkt_tbl[i] != ODP_PACKET_INVALID);
+ }
+
+ /* Get packet vector table */
+ num = odp_packet_vector_tbl(pktv, &pkt_tbl);
+ /* Make sure there are initially no packets in the vector */
+ CU_ASSERT(num == 0);
+
+ /* Fill the allocated packets in the vector */
+ for (i = 0; i < max_size; i++)
+ pkt_tbl[i] = orig_pkt_tbl[i];
+
+ /* Set number of packets stored in the vector */
+ odp_packet_vector_size_set(pktv, max_size);
+
+ /* Get number of packets in the vector */
+ num = odp_packet_vector_size(pktv);
+ CU_ASSERT(num == max_size);
+
+ if (max_size < 4) {
+ printf("Max vector size too small to run all tests.\n");
+ goto cleanup;
+ }
+
+ /* Preparing a copy of the packet */
+ packet = orig_pkt_tbl[0];
+ clone_packet = odp_packet_copy(packet, odp_packet_pool(packet));
+ CU_ASSERT_FATAL(clone_packet != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_to_u64(clone_packet) != odp_packet_to_u64(packet));
+
+ /* Change one packet handle in the table */
+ pkt_tbl[1] = clone_packet;
+ /* Read packet vector table. */
+ num = odp_packet_vector_tbl(pktv, &pkt_tbl);
+ /* Packets available should be equal to last updated */
+ CU_ASSERT(num == max_size);
+ /* Check if packet handle still corresponds to cloned packet */
+ CU_ASSERT(odp_packet_to_u64(pkt_tbl[1]) ==
+ odp_packet_to_u64(clone_packet));
+
+ /* Mark the first packet as invalid */
+ pkt_tbl[0] = ODP_PACKET_INVALID;
+ /* Reading the table to confirm if the first packet is invalid */
+ num = odp_packet_vector_tbl(pktv, &pkt_tbl);
+ CU_ASSERT(odp_packet_is_valid(pkt_tbl[0]) == 0);
+
+ /* Invalid packet should never be present in the table, following logic
+ * updates the pkt_tble array and returns the number of invalid packets
+ * removed. */
+ num = remove_invalid_pkts_tbl(pkt_tbl, odp_packet_vector_size(pktv));
+ CU_ASSERT(num == 1);
+ /* Update number of valid packets in the table */
+ odp_packet_vector_size_set(pktv, odp_packet_vector_size(pktv) - num);
+ CU_ASSERT(odp_packet_vector_size(pktv) == max_size - num);
+ /* The first packet should be valid now */
+ CU_ASSERT(odp_packet_is_valid(pkt_tbl[0]) == 1);
+
+cleanup:
+ if (clone_packet != ODP_PACKET_INVALID)
+ odp_packet_free(clone_packet);
+ odp_packet_free_multi(orig_pkt_tbl, max_size);
+ odp_packet_vector_free(pktv);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_vector_test_debug(void)
+{
+ CU_ASSERT_FATAL(odp_packet_vector_valid(pktv_default) == 1);
+ printf("\n\n");
+ odp_packet_vector_print(pktv_default);
+}
+
+static void packet_vector_test_alloc_free(void)
+{
+ odp_packet_vector_t pktv = ODP_PACKET_VECTOR_INVALID;
+ odp_pool_param_t params;
+ odp_pool_capability_t capa;
+ odp_pool_t pool;
+ odp_packet_t pkt;
+ odp_packet_t *pkts_tbl;
+ uint32_t max_size = PKT_VEC_SIZE;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+ CU_ASSERT_FATAL(capa.vector.max_size > 0);
+
+ if (capa.vector.max_size < max_size)
+ max_size = capa.vector.max_size;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_VECTOR;
+ params.vector.num = 1;
+ params.vector.max_size = max_size;
+
+ pool = odp_pool_create("vector_pool_alloc", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ /* Allocate the only vector from the pool */
+ pktv = odp_packet_vector_alloc(pool);
+ /* Check if vector packet is valid */
+ CU_ASSERT_FATAL(odp_packet_vector_valid(pktv) == 1)
+ CU_ASSERT(odp_packet_vector_to_u64(pktv) !=
+ odp_packet_vector_to_u64(ODP_PACKET_VECTOR_INVALID));
+
+ /* Vector size and user flag should be initially zero */
+ CU_ASSERT(odp_packet_vector_size(pktv) == 0);
+ CU_ASSERT(odp_packet_vector_user_flag(pktv) == 0);
+ odp_packet_vector_user_flag_set(pktv, 1);
+ CU_ASSERT(odp_packet_vector_user_flag(pktv) != 0);
+ odp_packet_vector_user_flag_set(pktv, 0);
+ CU_ASSERT(odp_packet_vector_user_flag(pktv) == 0);
+
+ /* Included packet should not be freed by odp_packet_vector_free() */
+ pkt = odp_packet_alloc(default_pool, default_param.pkt.len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ CU_ASSERT(odp_packet_vector_tbl(pktv, &pkts_tbl) == 0);
+ pkts_tbl[0] = pkt;
+ odp_packet_vector_size_set(pktv, 1);
+
+ /* Free with flag still set, alloc should clear it. */
+ odp_packet_vector_user_flag_set(pktv, 1);
+ odp_packet_vector_free(pktv);
+
+ /* Check that included packet is still valid */
+ CU_ASSERT(odp_packet_is_valid(pkt));
+
+ pktv = odp_packet_vector_alloc(pool);
+ CU_ASSERT(odp_packet_vector_size(pktv) == 0);
+ CU_ASSERT(odp_packet_vector_user_flag(pktv) == 0);
+
+ /* Since it was only one buffer pool, more vector packets can't be
+ * allocated.
+ */
+ CU_ASSERT_FATAL(odp_packet_vector_alloc(pool) == ODP_PACKET_VECTOR_INVALID);
+
+ /* Freeing the buffer back to pool */
+ odp_packet_vector_free(pktv);
+
+ /* Check that the buffer was returned back to the pool */
+ pktv = odp_packet_vector_alloc(pool);
+ CU_ASSERT_FATAL(pktv != ODP_PACKET_VECTOR_INVALID);
+ CU_ASSERT(odp_packet_vector_size(pktv) == 0);
+
+ /* Free packet vector and included packet using odp_event_free() */
+ CU_ASSERT(odp_packet_vector_tbl(pktv, &pkts_tbl) == 0);
+ pkts_tbl[0] = pkt;
+ odp_packet_vector_size_set(pktv, 1);
+
+ odp_event_free(odp_packet_vector_to_event(pktv));
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_vector_basic_test(void)
+{
+ odp_packet_t *pkt_tbl;
+ odp_pool_capability_t capa;
+ uint32_t i, num;
+ uint32_t max_size = PKT_VEC_PACKET_NUM;
+ odp_event_t ev;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+ if (capa.vector.max_size < max_size)
+ max_size = capa.vector.max_size;
+
+ /* Checking if default vector packet is valid */
+ CU_ASSERT(odp_packet_vector_valid(pktv_default) == 1)
+
+ /* Making sure default vector packet is from default vector pool */
+ CU_ASSERT(odp_packet_vector_pool(pktv_default) == vector_default_pool)
+ ev = odp_packet_vector_to_event(pktv_default);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_pool(ev) == vector_default_pool);
+
+ /* Get packet vector table */
+ num = odp_packet_vector_tbl(pktv_default, &pkt_tbl);
+ /* Making sure initially no packet in the vector */
+ CU_ASSERT(num == 0);
+
+ /* Fill the preallocated packets in vector */
+ for (i = 0; i < max_size; i++)
+ pkt_tbl[i] = pkt_vec[i];
+
+ /* Setting up number of packets stored in vector */
+ odp_packet_vector_size_set(pktv_default, max_size);
+
+ /* Get number of packets in vector */
+ num = odp_packet_vector_size(pktv_default);
+ CU_ASSERT(num == max_size);
+
+ CU_ASSERT(odp_packet_vector_valid(pktv_default) == 1);
+}
+
+static void packet_vector_test_user_area(void)
+{
+ odp_pool_param_t param;
+ odp_pool_t pool;
+ uint32_t i;
+ void *addr;
+ uint32_t num = 10;
+ void *prev = NULL;
+ uint32_t num_alloc = 0;
+ uint32_t size = 1024;
+ const uint32_t max_size = pool_capa.vector.max_uarea_size;
+
+ if (max_size == 0) {
+ ODPH_DBG("Packet vector user area not supported\n");
+ return;
+ }
+
+ if (size > max_size)
+ size = max_size;
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_VECTOR;
+ param.vector.num = num;
+ param.vector.max_size = pool_capa.vector.max_size;
+ param.vector.uarea_size = size;
+
+ odp_packet_vector_t pktv[num];
+
+ pool = odp_pool_create("test_user_area", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < num; i++) {
+ odp_event_t ev;
+ int flag;
+
+ pktv[i] = odp_packet_vector_alloc(pool);
+
+ if (pktv[i] == ODP_PACKET_VECTOR_INVALID)
+ break;
+ num_alloc++;
+
+ addr = odp_packet_vector_user_area(pktv[i]);
+ CU_ASSERT_FATAL(addr != NULL);
+ CU_ASSERT(prev != addr);
+
+ ev = odp_packet_vector_to_event(pktv[i]);
+ CU_ASSERT(odp_event_user_area(ev) == addr);
+ CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == addr);
+ CU_ASSERT(flag == 0);
+
+ prev = addr;
+ memset(addr, 0, size);
+ }
+
+ CU_ASSERT(i == num);
+
+ for (i = 0; i < num_alloc; i++)
+ odp_packet_vector_free(pktv[i]);
+
+ CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
+}
+
+static int packet_vector_suite_init(void)
+{
+ uint32_t num_pkt = PKT_VEC_PACKET_NUM;
+ uint32_t num = PACKET_POOL_NUM;
+ odp_pool_param_t params;
+ uint32_t i, ret, len;
+
+ memset(&pool_capa, 0, sizeof(odp_pool_capability_t));
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ ODPH_ERR("pool_capability failed\n");
+ return -1;
+ }
+
+ if (pool_capa.pkt.max_num != 0 && pool_capa.pkt.max_num < num)
+ num = pool_capa.pkt.max_num;
+
+ /* Creating default packet pool */
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_PACKET;
+ params.pkt.len = pool_capa.pkt.min_seg_len;
+ params.pkt.num = num;
+
+ memcpy(&default_param, &params, sizeof(odp_pool_param_t));
+
+ default_pool = odp_pool_create("default_pool", &params);
+ if (default_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("default pool create failed\n");
+ return -1;
+ }
+
+ /* Allocating ipv4-udp packets */
+ len = sizeof(test_packet_ipv4_udp);
+ ret = odp_packet_alloc_multi(default_pool, len, pkt_vec, num_pkt);
+ if (ret != num_pkt) {
+ ODPH_ERR("packet allocation failed\n");
+ if (ret > 0)
+ odp_packet_free_multi(pkt_vec, ret);
+ goto err;
+ }
+
+ for (i = 0; i < num_pkt; i++) {
+ ret = odp_packet_copy_from_mem(pkt_vec[i], 0, len,
+ test_packet_ipv4_udp);
+ if (ret != 0) {
+ ODPH_ERR("packet preparation failed\n");
+ goto err1;
+ }
+ }
+
+ /* Creating the vector pool */
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_VECTOR;
+ params.vector.num = PKT_VEC_NUM;
+ params.vector.max_size = pool_capa.vector.max_size < PKT_VEC_SIZE ?
+ pool_capa.vector.max_size : PKT_VEC_SIZE;
+
+ vector_default_pool = odp_pool_create("vector_default_pool", &params);
+
+ if (vector_default_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Default vector pool create failed\n");
+ goto err1;
+ }
+
+ /* Allocating a default vector */
+ pktv_default = odp_packet_vector_alloc(vector_default_pool);
+ if (pktv_default == ODP_PACKET_VECTOR_INVALID) {
+ ODPH_ERR("Default vector packet allocation failed\n");
+ goto err2;
+ }
+ return 0;
+err2:
+ odp_pool_destroy(vector_default_pool);
+err1:
+ odp_packet_free_multi(pkt_vec, PKT_VEC_PACKET_NUM);
+err:
+ odp_pool_destroy(default_pool);
+ return -1;
+}
+
+static int packet_vector_suite_term(void)
+{
+ odp_packet_free_multi(pkt_vec, PKT_VEC_PACKET_NUM);
+
+ odp_pool_destroy(default_pool);
+
+ odp_packet_vector_free(pktv_default);
+ odp_pool_destroy(vector_default_pool);
+ return 0;
+}
+static void packet_test_max_pools(void)
+{
+ odp_pool_param_t param;
+ uint32_t i, num_pool, num_pkt;
+ void *addr;
+ odp_event_t ev;
+ uint32_t len = 500;
+ /* Suite init has created one pool already */
+ uint32_t max_pools = pool_capa.pkt.max_pools - 1;
+ odp_pool_t pool[max_pools];
+ odp_packet_t packet[max_pools];
+
+ CU_ASSERT_FATAL(max_pools != 0);
+
+ printf("\n Creating %u pools\n", max_pools);
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = len;
+ param.pkt.num = 1;
+ param.pkt.max_num = 1;
+
+ for (i = 0; i < max_pools; i++) {
+ pool[i] = odp_pool_create(NULL, &param);
+
+ if (pool[i] == ODP_POOL_INVALID)
+ break;
+ }
+
+ num_pool = i;
+
+ CU_ASSERT(num_pool == max_pools);
+ if (num_pool != max_pools)
+ ODPH_ERR("Created only %u pools\n", num_pool);
+
+ for (i = 0; i < num_pool; i++) {
+ packet[i] = odp_packet_alloc(pool[i], len);
+
+ if (packet[i] == ODP_PACKET_INVALID)
+ break;
+
+ CU_ASSERT_FATAL(odp_packet_pool(packet[i]) == pool[i]);
+
+ ev = odp_packet_to_event(packet[i]);
+ CU_ASSERT(odp_packet_from_event(ev) == packet[i]);
+ CU_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
+
+ CU_ASSERT(odp_packet_len(packet[i]) == len);
+ addr = odp_packet_data(packet[i]);
+
+ /* Write packet data */
+ memset(addr, 0, len);
+ }
+
+ num_pkt = i;
+ CU_ASSERT(num_pkt == num_pool);
+
+ if (num_pkt)
+ odp_packet_free_multi(packet, num_pkt);
+
+ for (i = 0; i < num_pool; i++)
+ CU_ASSERT(odp_pool_destroy(pool[i]) == 0);
+}
+
+static void packet_test_user_area(void)
+{
+ odp_pool_param_t param;
+ odp_packet_t pkt;
+ odp_pool_t pool;
+ odp_event_t ev;
+ int flag;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+
+ param.pkt.uarea_size = 0;
+ pool = odp_pool_create("zero_uarea", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ pkt = odp_packet_alloc(pool, param.pkt.len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_user_area_size(pkt) <= pool_capa.pkt.max_uarea_size);
+ if (odp_packet_user_area_size(pkt)) {
+ /* CU_ASSERT needs these extra bracets */
+ CU_ASSERT(odp_packet_user_area(pkt) != NULL);
+ } else {
+ CU_ASSERT(odp_packet_user_area(pkt) == NULL);
+ }
+ ev = odp_packet_to_event(pkt);
+ CU_ASSERT(odp_event_user_area(ev) == odp_packet_user_area(pkt));
+ CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == odp_packet_user_area(pkt));
+ CU_ASSERT(flag == 0);
+
+ odp_packet_free(pkt);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+
+ if (pool_capa.pkt.max_uarea_size == 0)
+ return;
+
+ param.pkt.uarea_size = 1;
+ pool = odp_pool_create("one_uarea", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ pkt = odp_packet_alloc(pool, param.pkt.len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT_FATAL(odp_packet_user_area(pkt) != NULL);
+ ev = odp_packet_to_event(pkt);
+ CU_ASSERT(odp_event_user_area(ev) == odp_packet_user_area(pkt));
+ CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == odp_packet_user_area(pkt));
+ CU_ASSERT(flag == 0);
+ CU_ASSERT(odp_packet_user_area_size(pkt) >= 1);
+ *(char *)odp_packet_user_area(pkt) = 0;
+ CU_ASSERT_FATAL(odp_packet_is_valid(pkt) == 1);
+ odp_packet_free(pkt);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+
+ param.pkt.uarea_size = pool_capa.pkt.max_uarea_size;
+ pool = odp_pool_create("max_uarea", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ pkt = odp_packet_alloc(pool, param.pkt.len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ odp_packet_user_flag_set(pkt, 1);
+ CU_ASSERT_FATAL(odp_packet_user_area(pkt) != NULL);
+ ev = odp_packet_to_event(pkt);
+ CU_ASSERT(odp_event_user_area(ev) == odp_packet_user_area(pkt));
+ CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == odp_packet_user_area(pkt));
+ CU_ASSERT(flag > 0);
+ CU_ASSERT(odp_packet_user_area_size(pkt) == param.pkt.uarea_size);
+ memset(odp_packet_user_area(pkt), 0, param.pkt.uarea_size);
+ CU_ASSERT_FATAL(odp_packet_is_valid(pkt) == 1);
+ odp_packet_free(pkt);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static int packet_parse_suite_init(void)
+{
+ int num_test_pkt, i;
+ uint32_t max_len;
+ odp_pool_param_t param;
+
+ memset(&pool_capa, 0, sizeof(odp_pool_capability_t));
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ ODPH_ERR("odp_pool_capability() failed\n");
+ return -1;
+ }
+
+ num_test_pkt = ODPH_ARRAY_SIZE(parse_test_pkt_len);
+ max_len = 0;
+
+ for (i = 0; i < num_test_pkt; i++) {
+ if (max_len < parse_test_pkt_len[i])
+ max_len = parse_test_pkt_len[i];
+ }
+ max_len += MAX_PARSE_L2_OFFSET;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.seg_len = max_len;
+ param.pkt.len = max_len;
+ param.pkt.num = 100;
+
+ parse_test.pool = odp_pool_create("parse_test_pool", &param);
+
+ if (parse_test.pool == ODP_POOL_INVALID)
+ return -1;
+
+ parse_test.all_chksums.all_chksum = 0;
+ parse_test.all_chksums.chksum.ipv4 = 1;
+ parse_test.all_chksums.chksum.udp = 1;
+ parse_test.all_chksums.chksum.tcp = 1;
+ parse_test.all_chksums.chksum.sctp = 1;
+
+ return 0;
+}
+
+static int packet_parse_suite_term(void)
+{
+ if (odp_pool_destroy(parse_test.pool))
+ return -1;
+
+ return 0;
+}
+
+static void parse_test_alloc(odp_packet_t pkt[], const uint8_t test_packet[],
+ uint32_t len, int num_pkt)
+{
+ int ret, i;
+ static uint32_t l2_offset[PARSE_TEST_NUM_PKT] = {0 /* must be zero */,
+ 2, 8, 12, 19, 36, 64, 120, MAX_PARSE_L2_OFFSET};
+
+ CU_ASSERT_FATAL(num_pkt <= PARSE_TEST_NUM_PKT);
+
+ for (i = 0; i < num_pkt; i++) {
+ uint32_t offs = l2_offset[i];
+ uint32_t data = 0;
+
+ parse_test.l2_offset[i] = offs;
+ pkt[i] = odp_packet_alloc(parse_test.pool, len + offs);
+ CU_ASSERT_FATAL(pkt[i] != ODP_PACKET_INVALID);
+
+ if (offs > 0) {
+ ret = fill_data_forward(pkt[i], 0, offs, &data);
+ CU_ASSERT(ret == 0);
+ }
+ ret = odp_packet_copy_from_mem(pkt[i], offs, len, test_packet);
+ CU_ASSERT_FATAL(ret == 0);
+ }
+}
+
+/* Ethernet/IPv4/UDP */
+static void parse_eth_ipv4_udp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ odp_packet_chksum_status_t chksum_status;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv4_udp,
+ sizeof(test_packet_ipv4_udp), num_pkt);
+
+ for (i = 0; i < num_pkt; i++) {
+ chksum_status = odp_packet_l3_chksum_status(pkt[i]);
+ CU_ASSERT(chksum_status == ODP_PACKET_CHKSUM_UNKNOWN);
+ chksum_status = odp_packet_l4_chksum_status(pkt[i]);
+ CU_ASSERT(chksum_status == ODP_PACKET_CHKSUM_UNKNOWN);
+ }
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_ALL;
+ parse.chksums = parse_test.all_chksums;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT_EQUAL(odp_packet_l2_type(pkt[i]),
+ ODP_PROTO_L2_TYPE_ETH);
+ CU_ASSERT_EQUAL(odp_packet_l3_type(pkt[i]),
+ ODP_PROTO_L3_TYPE_IPV4);
+ CU_ASSERT_EQUAL(odp_packet_l4_type(pkt[i]),
+ ODP_PROTO_L4_TYPE_UDP);
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet SNAP/IPv4/UDP */
+static void parse_eth_snap_ipv4_udp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ odp_packet_chksum_status_t chksum_status;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_snap_ipv4_udp,
+ sizeof(test_packet_snap_ipv4_udp), num_pkt);
+
+ for (i = 0; i < num_pkt; i++) {
+ chksum_status = odp_packet_l3_chksum_status(pkt[i]);
+ CU_ASSERT(chksum_status == ODP_PACKET_CHKSUM_UNKNOWN);
+ chksum_status = odp_packet_l4_chksum_status(pkt[i]);
+ CU_ASSERT(chksum_status == ODP_PACKET_CHKSUM_UNKNOWN);
+ }
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_ALL;
+ parse.chksums = parse_test.all_chksums;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT_EQUAL(odp_packet_l2_type(pkt[i]),
+ ODP_PROTO_L2_TYPE_ETH);
+ CU_ASSERT_EQUAL(odp_packet_l3_type(pkt[i]),
+ ODP_PROTO_L3_TYPE_IPV4);
+ CU_ASSERT_EQUAL(odp_packet_l4_type(pkt[i]),
+ ODP_PROTO_L4_TYPE_UDP);
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* IPv4/UDP */
+static void parse_ipv4_udp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+ uint32_t offset[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv4_udp,
+ sizeof(test_packet_ipv4_udp), num_pkt);
+
+ for (i = 0; i < num_pkt; i++)
+ offset[i] = parse_test.l2_offset[i] + 14;
+
+ parse.proto = ODP_PROTO_IPV4;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], offset[0], &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], &offset[1],
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT_EQUAL(odp_packet_l3_type(pkt[i]),
+ ODP_PROTO_L3_TYPE_IPV4);
+ CU_ASSERT_EQUAL(odp_packet_l4_type(pkt[i]),
+ ODP_PROTO_L4_TYPE_UDP);
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/IPv4/TCP */
+static void parse_eth_ipv4_tcp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv4_tcp,
+ sizeof(test_packet_ipv4_tcp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_udp(pkt[i]));
+ CU_ASSERT_EQUAL(odp_packet_l2_type(pkt[i]),
+ ODP_PROTO_L2_TYPE_ETH);
+ CU_ASSERT_EQUAL(odp_packet_l3_type(pkt[i]),
+ ODP_PROTO_L3_TYPE_IPV4);
+ CU_ASSERT_EQUAL(odp_packet_l4_type(pkt[i]),
+ ODP_PROTO_L4_TYPE_TCP);
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/IPv6/UDP */
+static void parse_eth_ipv6_udp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv6_udp,
+ sizeof(test_packet_ipv6_udp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/IPv6/TCP */
+static void parse_eth_ipv6_tcp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv6_tcp,
+ sizeof(test_packet_ipv6_tcp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_ALL;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(!odp_packet_has_udp(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/VLAN/IPv4/UDP */
+static void parse_eth_vlan_ipv4_udp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_vlan_ipv4_udp,
+ sizeof(test_packet_vlan_ipv4_udp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_vlan(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/VLAN/IPv6/UDP */
+static void parse_eth_vlan_ipv6_udp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_vlan_ipv6_udp,
+ sizeof(test_packet_vlan_ipv6_udp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_vlan(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT_EQUAL(odp_packet_l2_type(pkt[i]),
+ ODP_PROTO_L2_TYPE_ETH);
+ CU_ASSERT_EQUAL(odp_packet_l3_type(pkt[i]),
+ ODP_PROTO_L3_TYPE_IPV6);
+ CU_ASSERT_EQUAL(odp_packet_l4_type(pkt[i]),
+ ODP_PROTO_L4_TYPE_UDP);
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/VLAN/VLAN/IPv4/UDP */
+static void parse_eth_vlan_qinq_ipv4_udp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_vlan_qinq_ipv4_udp,
+ sizeof(test_packet_vlan_qinq_ipv4_udp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_vlan(pkt[i]));
+ CU_ASSERT(odp_packet_has_vlan_qinq(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/ARP */
+static void parse_eth_arp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_arp,
+ sizeof(test_packet_arp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_eth_bcast(pkt[i]));
+ CU_ASSERT(odp_packet_has_arp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_vlan(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_udp(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/IPv4/ICMP */
+static void parse_eth_ipv4_icmp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv4_icmp,
+ sizeof(test_packet_ipv4_icmp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_icmp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_eth_bcast(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/IPv6/ICMP */
+static void parse_eth_ipv6_icmp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv6_icmp,
+ sizeof(test_packet_ipv6_icmp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(odp_packet_has_icmp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_eth_bcast(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/IPv4/SCTP */
+static void parse_eth_ipv4_sctp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv4_sctp,
+ sizeof(test_packet_ipv4_sctp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_sctp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_udp(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/IPv4/IPSEC AH*/
+static void parse_eth_ipv4_ipsec_ah(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv4_ipsec_ah,
+ sizeof(test_packet_ipv4_ipsec_ah), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipsec(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_udp(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/IPv4/IPSEC ESP*/
+static void parse_eth_ipv4_ipsec_esp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv4_ipsec_esp,
+ sizeof(test_packet_ipv4_ipsec_esp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipsec(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_udp(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/IPv6/IPSEC AH*/
+static void parse_eth_ipv6_ipsec_ah(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv6_ipsec_ah,
+ sizeof(test_packet_ipv6_ipsec_ah), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipsec(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_udp(pkt[i]));
+ CU_ASSERT_EQUAL(odp_packet_l2_type(pkt[i]),
+ ODP_PROTO_L2_TYPE_ETH);
+ CU_ASSERT_EQUAL(odp_packet_l3_type(pkt[i]),
+ ODP_PROTO_L3_TYPE_IPV6);
+ CU_ASSERT_EQUAL(odp_packet_l4_type(pkt[i]),
+ ODP_PROTO_L4_TYPE_AH);
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/IPv6/IPSEC ESP*/
+static void parse_eth_ipv6_ipsec_esp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv6_ipsec_esp,
+ sizeof(test_packet_ipv6_ipsec_esp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipsec(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_udp(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet mcast/IPv4 mcast/UDP */
+static void parse_mcast_eth_ipv4_udp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_mcast_eth_ipv4_udp,
+ sizeof(test_packet_mcast_eth_ipv4_udp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_eth_mcast(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_ip_mcast(pkt[i]));
+ CU_ASSERT(odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_eth_bcast(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ip_bcast(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet bcast/IPv4 bcast/UDP */
+static void parse_bcast_eth_ipv4_udp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_bcast_eth_ipv4_udp,
+ sizeof(test_packet_bcast_eth_ipv4_udp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_eth_bcast(pkt[i]));
+ /* API specifies that Ethernet broadcast is also multicast */
+ CU_ASSERT(odp_packet_has_eth_mcast(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_ip_bcast(pkt[i]));
+ CU_ASSERT(odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ip_mcast(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet mcast/IPv6 mcast/UDP */
+static void parse_mcast_eth_ipv6_udp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_mcast_eth_ipv6_udp,
+ sizeof(test_packet_mcast_eth_ipv6_udp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_eth_mcast(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(odp_packet_has_ip_mcast(pkt[i]));
+ CU_ASSERT(odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_eth_bcast(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ip_bcast(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/IPv4/UDP first fragment */
+static void parse_eth_ipv4_udp_first_frag(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv4_udp_first_frag,
+ sizeof(test_packet_ipv4_udp_first_frag), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipfrag(pkt[i]));
+ CU_ASSERT(odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipopt(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/IPv4/UDP last fragment */
+static void parse_eth_ipv4_udp_last_frag(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv4_udp_last_frag,
+ sizeof(test_packet_ipv4_udp_last_frag), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipfrag(pkt[i]));
+ CU_ASSERT(odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipopt(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+/* Ethernet/IPv4 + options (Record route, NOP)/ICMP */
+static void parse_eth_ipv4_rr_nop_icmp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_ipv4_rr_nop_icmp,
+ sizeof(test_packet_ipv4_rr_nop_icmp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_L4;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipopt(pkt[i]));
+ CU_ASSERT(odp_packet_has_icmp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipfrag(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+static void parse_result(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+ odp_packet_parse_result_t result[num_pkt];
+ odp_packet_parse_result_t *result_ptr[num_pkt];
+
+ /* Ethernet/VLAN/IPv6/UDP */
+ parse_test_alloc(pkt, test_packet_vlan_ipv6_udp,
+ sizeof(test_packet_vlan_ipv6_udp), num_pkt);
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_ALL;
+ parse.chksums.all_chksum = 0;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.l2_offset + 1,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ result_ptr[i] = &result[i];
+ memset(&result[i], 0, sizeof(odp_packet_parse_result_t));
+ }
+
+ odp_packet_parse_result(pkt[0], result_ptr[0]);
+ odp_packet_parse_result_multi(&pkt[1], &result_ptr[1], num_pkt - 1);
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_vlan(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT(odp_packet_l2_type(pkt[i]) == ODP_PROTO_L2_TYPE_ETH);
+ CU_ASSERT(odp_packet_l3_type(pkt[i]) == ODP_PROTO_L3_TYPE_IPV6);
+ CU_ASSERT(odp_packet_l4_type(pkt[i]) == ODP_PROTO_L4_TYPE_UDP);
+
+ CU_ASSERT(result[i].flag.all != 0);
+ CU_ASSERT(result[i].flag.has_error ==
+ !!odp_packet_has_error(pkt[i]));
+ CU_ASSERT(result[i].flag.has_l2_error ==
+ !!odp_packet_has_l2_error(pkt[i]));
+ CU_ASSERT(result[i].flag.has_l3_error ==
+ !!odp_packet_has_l3_error(pkt[i]));
+ CU_ASSERT(result[i].flag.has_l4_error ==
+ !!odp_packet_has_l4_error(pkt[i]));
+ CU_ASSERT(result[i].flag.has_l2 ==
+ !!odp_packet_has_l2(pkt[i]));
+ CU_ASSERT(result[i].flag.has_l3 ==
+ !!odp_packet_has_l3(pkt[i]));
+ CU_ASSERT(result[i].flag.has_l4 ==
+ !!odp_packet_has_l4(pkt[i]));
+ CU_ASSERT(result[i].flag.has_eth ==
+ !!odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(result[i].flag.has_eth_bcast ==
+ !!odp_packet_has_eth_bcast(pkt[i]));
+ CU_ASSERT(result[i].flag.has_eth_mcast ==
+ !!odp_packet_has_eth_mcast(pkt[i]));
+ CU_ASSERT(result[i].flag.has_jumbo ==
+ !!odp_packet_has_jumbo(pkt[i]));
+ CU_ASSERT(result[i].flag.has_vlan ==
+ !!odp_packet_has_vlan(pkt[i]));
+ CU_ASSERT(result[i].flag.has_vlan_qinq ==
+ !!odp_packet_has_vlan_qinq(pkt[i]));
+ CU_ASSERT(result[i].flag.has_arp ==
+ !!odp_packet_has_arp(pkt[i]));
+ CU_ASSERT(result[i].flag.has_ipv4 ==
+ !!odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(result[i].flag.has_ipv6 ==
+ !!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(result[i].flag.has_ip_bcast ==
+ !!odp_packet_has_ip_bcast(pkt[i]));
+ CU_ASSERT(result[i].flag.has_ip_mcast ==
+ !!odp_packet_has_ip_mcast(pkt[i]));
+ CU_ASSERT(result[i].flag.has_ipfrag ==
+ !!odp_packet_has_ipfrag(pkt[i]));
+ CU_ASSERT(result[i].flag.has_ipopt ==
+ !!odp_packet_has_ipopt(pkt[i]));
+ CU_ASSERT(result[i].flag.has_ipsec ==
+ !!odp_packet_has_ipsec(pkt[i]));
+ CU_ASSERT(result[i].flag.has_udp ==
+ !!odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(result[i].flag.has_tcp ==
+ !!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT(result[i].flag.has_sctp ==
+ !!odp_packet_has_sctp(pkt[i]));
+ CU_ASSERT(result[i].flag.has_icmp ==
+ !!odp_packet_has_icmp(pkt[i]));
+
+ CU_ASSERT(result[i].packet_len == odp_packet_len(pkt[i]));
+ CU_ASSERT(result[i].l2_offset == odp_packet_l2_offset(pkt[i]));
+ CU_ASSERT(result[i].l3_offset == odp_packet_l3_offset(pkt[i]));
+ CU_ASSERT(result[i].l4_offset == odp_packet_l4_offset(pkt[i]));
+ CU_ASSERT(result[i].l3_chksum_status ==
+ odp_packet_l3_chksum_status(pkt[i]));
+ CU_ASSERT(result[i].l4_chksum_status ==
+ odp_packet_l4_chksum_status(pkt[i]));
+ CU_ASSERT(result[i].l2_type == odp_packet_l2_type(pkt[i]));
+ CU_ASSERT(result[i].l3_type == odp_packet_l3_type(pkt[i]));
+ CU_ASSERT(result[i].l4_type == odp_packet_l4_type(pkt[i]));
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
+odp_testinfo_t packet_suite[] = {
+ ODP_TEST_INFO(packet_test_alloc_free),
+ ODP_TEST_INFO(packet_test_alloc_free_multi),
+ ODP_TEST_INFO(packet_test_free_sp),
+ ODP_TEST_INFO(packet_test_alloc_segmented),
+ ODP_TEST_INFO(packet_test_alloc_max_len),
+ ODP_TEST_INFO(packet_test_alloc_max_segment),
+ ODP_TEST_INFO(packet_test_alloc_align),
+ ODP_TEST_INFO(packet_test_basic_metadata),
+ ODP_TEST_INFO(packet_test_debug),
+ ODP_TEST_INFO(packet_test_segments),
+ ODP_TEST_INFO(packet_test_length),
+ ODP_TEST_INFO(packet_test_reset),
+ ODP_TEST_INFO(packet_test_prefetch),
+ ODP_TEST_INFO(packet_test_headroom),
+ ODP_TEST_INFO(packet_test_tailroom),
+ ODP_TEST_INFO(packet_test_context),
+ ODP_TEST_INFO(packet_test_payload_offset),
+ ODP_TEST_INFO(packet_test_event_conversion),
+ ODP_TEST_INFO(packet_test_layer_offsets),
+ ODP_TEST_INFO(packet_test_segment_last),
+ ODP_TEST_INFO(packet_test_in_flags),
+ ODP_TEST_INFO(packet_test_vlan_flags),
+ ODP_TEST_INFO(packet_test_error_flags),
+ ODP_TEST_INFO(packet_test_add_rem_data),
+ ODP_TEST_INFO(packet_test_meta_data_copy),
+ ODP_TEST_INFO(packet_test_copy),
+ ODP_TEST_INFO(packet_test_copydata),
+ ODP_TEST_INFO(packet_test_concatsplit),
+ ODP_TEST_INFO(packet_test_concat_small),
+ ODP_TEST_INFO(packet_test_concat_extend_trunc),
+ ODP_TEST_INFO(packet_test_extend_small),
+ ODP_TEST_INFO(packet_test_extend_large),
+ ODP_TEST_INFO(packet_test_extend_mix),
+ ODP_TEST_INFO(packet_test_extend_ref),
+ ODP_TEST_INFO(packet_test_align),
+ ODP_TEST_INFO(packet_test_offset),
+ ODP_TEST_INFO(packet_test_ref),
+ ODP_TEST_INFO(packet_test_max_pools),
+ ODP_TEST_INFO(packet_test_user_area),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_testinfo_t packet_vector_parse_suite[] = {
+ ODP_TEST_INFO(packet_vector_test_debug),
+ ODP_TEST_INFO(packet_vector_basic_test),
+ ODP_TEST_INFO(packet_vector_test_alloc_free),
+ ODP_TEST_INFO(packet_vector_test_tbl),
+ ODP_TEST_INFO(packet_vector_test_user_area),
+ ODP_TEST_INFO(packet_vector_test_event_conversion),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_testinfo_t packet_parse_suite[] = {
+ ODP_TEST_INFO(parse_eth_ipv4_udp),
+ ODP_TEST_INFO(parse_eth_snap_ipv4_udp),
+ ODP_TEST_INFO(parse_ipv4_udp),
+ ODP_TEST_INFO(parse_eth_ipv4_tcp),
+ ODP_TEST_INFO(parse_eth_ipv6_udp),
+ ODP_TEST_INFO(parse_eth_ipv6_tcp),
+ ODP_TEST_INFO(parse_eth_vlan_ipv4_udp),
+ ODP_TEST_INFO(parse_eth_vlan_ipv6_udp),
+ ODP_TEST_INFO(parse_eth_vlan_qinq_ipv4_udp),
+ ODP_TEST_INFO(parse_eth_arp),
+ ODP_TEST_INFO(parse_eth_ipv4_icmp),
+ ODP_TEST_INFO(parse_eth_ipv6_icmp),
+ ODP_TEST_INFO(parse_eth_ipv4_sctp),
+ ODP_TEST_INFO(parse_eth_ipv4_ipsec_ah),
+ ODP_TEST_INFO(parse_eth_ipv4_ipsec_esp),
+ ODP_TEST_INFO(parse_eth_ipv6_ipsec_ah),
+ ODP_TEST_INFO(parse_eth_ipv6_ipsec_esp),
+ ODP_TEST_INFO(parse_mcast_eth_ipv4_udp),
+ ODP_TEST_INFO(parse_bcast_eth_ipv4_udp),
+ ODP_TEST_INFO(parse_mcast_eth_ipv6_udp),
+ ODP_TEST_INFO(parse_eth_ipv4_udp_first_frag),
+ ODP_TEST_INFO(parse_eth_ipv4_udp_last_frag),
+ ODP_TEST_INFO(parse_eth_ipv4_rr_nop_icmp),
+ ODP_TEST_INFO(parse_result),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t packet_suites[] = {
+ { .name = "packet tests",
+ .testinfo_tbl = packet_suite,
+ .init_func = packet_suite_init,
+ .term_func = packet_suite_term,
+ },
+ { .name = "packet parse tests",
+ .testinfo_tbl = packet_parse_suite,
+ .init_func = packet_parse_suite_init,
+ .term_func = packet_parse_suite_term,
+ },
+ { .name = "packet vector tests",
+ .testinfo_tbl = packet_vector_parse_suite,
+ .init_func = packet_vector_suite_init,
+ .term_func = packet_vector_suite_term,
+ },
+ ODP_SUITE_INFO_NULL,
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(packet_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/pktio/.gitignore b/test/validation/api/pktio/.gitignore
index 1a5dd46e4..1a5dd46e4 100644
--- a/test/common_plat/validation/api/pktio/.gitignore
+++ b/test/validation/api/pktio/.gitignore
diff --git a/test/validation/api/pktio/Makefile.am b/test/validation/api/pktio/Makefile.am
new file mode 100644
index 000000000..c63809f8c
--- /dev/null
+++ b/test/validation/api/pktio/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = pktio_main
+pktio_main_SOURCES = pktio.c parser.c parser.h lso.c lso.h
diff --git a/test/validation/api/pktio/lso.c b/test/validation/api/pktio/lso.c
new file mode 100644
index 000000000..832c08859
--- /dev/null
+++ b/test/validation/api/pktio/lso.c
@@ -0,0 +1,938 @@
+/* Copyright (c) 2020-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include <test_packet_ipv4.h>
+#include <test_packet_custom.h>
+
+#include <odp/helper/odph_api.h>
+
+#include "lso.h"
+
+#define MAX_NUM_IFACES 2
+#define PKT_POOL_NUM 256
+#define PKT_POOL_LEN (2 * 1024)
+
+/* Maximum number of segments test is prepared to receive per outgoing packet */
+#define MAX_NUM_SEG 256
+
+/* Pktio interface info
+ */
+typedef struct {
+ const char *name;
+ odp_pktio_t hdl;
+ odp_pktout_queue_t pktout;
+ odp_pktin_queue_t pktin;
+ odp_pktio_capability_t capa;
+} pktio_info_t;
+
+/* Interface names used for testing */
+static const char *iface_name[MAX_NUM_IFACES];
+
+/* Test interfaces */
+static pktio_info_t pktios[MAX_NUM_IFACES];
+static pktio_info_t *pktio_a;
+static pktio_info_t *pktio_b;
+
+/* Number of interfaces being used (1=loopback, 2=pair) */
+static int num_ifaces;
+
+/* Some interface types cannot be restarted.
+ * These control test case execution in that case. */
+static int num_starts;
+static int disable_restart;
+
+/* While testing real-world interfaces additional time may be needed for
+ * external network to enable link to pktio interface that just become up. */
+static int wait_for_network;
+
+/* LSO test packet pool */
+odp_pool_t lso_pool = ODP_POOL_INVALID;
+
+/* Check test packet size */
+ODP_STATIC_ASSERT(sizeof(test_packet_ipv4_udp_1500) == 1500, "error: size is not 1500");
+ODP_STATIC_ASSERT(sizeof(test_packet_ipv4_udp_325) == 325, "error: size is not 325");
+ODP_STATIC_ASSERT(sizeof(test_packet_custom_eth_1) == 723, "error: size is not 723");
+
+static inline void wait_linkup(odp_pktio_t pktio)
+{
+ /* wait 1 second for link up */
+ uint64_t wait_ns = (10 * ODP_TIME_MSEC_IN_NS);
+ int wait_num = 100;
+ int i;
+ int ret = -1;
+
+ for (i = 0; i < wait_num; i++) {
+ ret = odp_pktio_link_status(pktio);
+ if (ret == ODP_PKTIO_LINK_STATUS_UNKNOWN || ret == ODP_PKTIO_LINK_STATUS_UP)
+ break;
+ /* link is down, call status again after delay */
+ odp_time_wait_ns(wait_ns);
+ }
+}
+
+static int pkt_pool_create(void)
+{
+ odp_pool_capability_t capa;
+ odp_pool_param_t params;
+
+ if (odp_pool_capability(&capa) != 0) {
+ ODPH_ERR("Pool capability failed\n");
+ return -1;
+ }
+
+ if (capa.pkt.max_num && capa.pkt.max_num < PKT_POOL_NUM) {
+ ODPH_ERR("Packet pool size not supported. Max %" PRIu32 "\n", capa.pkt.max_num);
+ return -1;
+ } else if (capa.pkt.max_len && capa.pkt.max_len < PKT_POOL_LEN) {
+ ODPH_ERR("Packet length not supported.\n");
+ return -1;
+ } else if (capa.pkt.max_seg_len &&
+ capa.pkt.max_seg_len < PKT_POOL_LEN) {
+ ODPH_ERR("Segment length not supported.\n");
+ return -1;
+ }
+
+ odp_pool_param_init(&params);
+ params.pkt.seg_len = PKT_POOL_LEN;
+ params.pkt.len = PKT_POOL_LEN;
+ params.pkt.num = PKT_POOL_NUM;
+ params.type = ODP_POOL_PACKET;
+
+ lso_pool = odp_pool_create("lso_pool", &params);
+ if (lso_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Packet pool create failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static odp_pktio_t create_pktio(int idx, const char *name, odp_pool_t pool)
+{
+ odp_pktio_t pktio;
+ odp_pktio_config_t config;
+ odp_pktio_param_t pktio_param;
+ odp_pktio_capability_t *capa;
+ int tx = (idx == 0) ? 1 : 0;
+ int rx = (idx == 0) ? 0 : 1;
+
+ if (num_ifaces == 1) {
+ tx = 1;
+ rx = 1;
+ }
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+
+ pktio = odp_pktio_open(name, pool, &pktio_param);
+ pktios[idx].hdl = pktio;
+ pktios[idx].name = name;
+ if (pktio == ODP_PKTIO_INVALID) {
+ ODPH_ERR("Failed to open %s\n", name);
+ return ODP_PKTIO_INVALID;
+ }
+
+ if (odp_pktio_capability(pktio, &pktios[idx].capa)) {
+ ODPH_ERR("Pktio capa failed: %s\n", name);
+ return ODP_PKTIO_INVALID;
+ }
+
+ capa = &pktios[idx].capa;
+
+ odp_pktio_config_init(&config);
+
+ if (tx) {
+ if (capa->config.enable_lso)
+ config.enable_lso = 1;
+ else
+ ODPH_DBG("LSO not supported\n");
+ }
+
+ if (rx) {
+ config.parser.layer = ODP_PROTO_LAYER_ALL;
+ if (capa->config.pktin.bit.ipv4_chksum)
+ config.pktin.bit.ipv4_chksum = 1;
+ else
+ ODPH_DBG("IPv4 checksum not verified\n");
+ }
+
+ if (odp_pktio_config(pktio, &config)) {
+ ODPH_ERR("Failed to configure %s\n", name);
+ return ODP_PKTIO_INVALID;
+ }
+
+ /* By default, single input and output queue is used */
+ if (odp_pktin_queue_config(pktio, NULL)) {
+ ODPH_ERR("Failed to config input queue for %s\n", name);
+ return ODP_PKTIO_INVALID;
+ }
+ if (odp_pktout_queue_config(pktio, NULL)) {
+ ODPH_ERR("Failed to config output queue for %s\n", name);
+ return ODP_PKTIO_INVALID;
+ }
+
+ if (wait_for_network)
+ odp_time_wait_ns(ODP_TIME_SEC_IN_NS / 4);
+
+ return pktio;
+}
+
+static odp_packet_t create_packet(const uint8_t *data, uint32_t len)
+{
+ odp_packet_t pkt;
+
+ pkt = odp_packet_alloc(lso_pool, len);
+ if (pkt == ODP_PACKET_INVALID)
+ return ODP_PACKET_INVALID;
+
+ if (odp_packet_copy_from_mem(pkt, 0, len, data)) {
+ ODPH_ERR("Failed to copy test packet data\n");
+ odp_packet_free(pkt);
+ return ODP_PACKET_INVALID;
+ }
+
+ odp_packet_l2_offset_set(pkt, 0);
+
+ return pkt;
+}
+
+static void pktio_pkt_set_macs(odp_packet_t pkt, odp_pktio_t src, odp_pktio_t dst)
+{
+ uint32_t len;
+ odph_ethhdr_t *eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, &len);
+ int ret;
+
+ ret = odp_pktio_mac_addr(src, &eth->src, ODP_PKTIO_MACADDR_MAXSIZE);
+ CU_ASSERT(ret == ODPH_ETHADDR_LEN);
+ CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
+
+ ret = odp_pktio_mac_addr(dst, &eth->dst, ODP_PKTIO_MACADDR_MAXSIZE);
+ CU_ASSERT(ret == ODPH_ETHADDR_LEN);
+ CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
+}
+
+static int send_packets(odp_lso_profile_t lso_profile, pktio_info_t *pktio_a, pktio_info_t *pktio_b,
+ const uint8_t *data, uint32_t len, uint32_t hdr_len, uint32_t max_payload,
+ uint32_t l3_offset, int use_opt)
+{
+ odp_packet_t pkt;
+ int ret;
+ odp_packet_lso_opt_t lso_opt;
+ odp_packet_lso_opt_t *opt_ptr = NULL;
+ int retries = 10;
+
+ pkt = create_packet(data, len);
+ if (pkt == ODP_PACKET_INVALID) {
+ CU_FAIL("failed to generate test packet");
+ return -1;
+ }
+
+ pktio_pkt_set_macs(pkt, pktio_a->hdl, pktio_b->hdl);
+ CU_ASSERT(odp_packet_has_lso_request(pkt) == 0);
+
+ memset(&lso_opt, 0, sizeof(odp_packet_lso_opt_t));
+ lso_opt.lso_profile = lso_profile;
+ lso_opt.payload_offset = hdr_len;
+ lso_opt.max_payload_len = max_payload;
+
+ if (use_opt) {
+ opt_ptr = &lso_opt;
+ } else {
+ if (odp_packet_lso_request(pkt, &lso_opt)) {
+ CU_FAIL("LSO request failed");
+ return -1;
+ }
+
+ CU_ASSERT(odp_packet_has_lso_request(pkt));
+ CU_ASSERT(odp_packet_payload_offset(pkt) == hdr_len);
+ }
+
+ if (l3_offset)
+ odp_packet_l3_offset_set(pkt, l3_offset);
+
+ while (retries) {
+ ret = odp_pktout_send_lso(pktio_a->pktout, &pkt, 1, opt_ptr);
+
+ CU_ASSERT_FATAL(ret < 2);
+
+ if (ret < 0) {
+ CU_FAIL("LSO send failed\n");
+ odp_packet_free(pkt);
+ return -1;
+ }
+ if (ret == 1)
+ break;
+
+ odp_time_wait_ns(10 * ODP_TIME_MSEC_IN_NS);
+ retries--;
+ }
+
+ if (ret < 1) {
+ CU_FAIL("LSO send timeout\n");
+ odp_packet_free(pkt);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int recv_packets(pktio_info_t *pktio_info, uint64_t timeout_ns,
+ odp_packet_t *pkt_out, int max_num)
+{
+ odp_packet_t pkt;
+ odp_time_t wait_time, end;
+ int ret;
+ odp_pktin_queue_t pktin = pktio_info->pktin;
+ int num = 0;
+
+ wait_time = odp_time_local_from_ns(timeout_ns);
+ end = odp_time_sum(odp_time_local(), wait_time);
+
+ do {
+ pkt = ODP_PACKET_INVALID;
+ ret = odp_pktin_recv(pktin, &pkt, 1);
+
+ CU_ASSERT_FATAL(ret < 2);
+ if (ret < 0) {
+ CU_FAIL("Packet receive failed\n");
+ if (num)
+ odp_packet_free_multi(pkt_out, num);
+ return -1;
+ }
+
+ if (ret == 1) {
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ pkt_out[num] = pkt;
+ num++;
+ if (num == max_num) {
+ CU_FAIL("Too many packets received\n");
+ return num;
+ }
+ }
+ } while (odp_time_cmp(end, odp_time_local()) > 0);
+
+ return num;
+}
+
+static int compare_data(odp_packet_t pkt, uint32_t offset, const uint8_t *data, uint32_t len)
+{
+ uint32_t i;
+ uint8_t *u8;
+
+ for (i = 0; i < len; i++) {
+ u8 = odp_packet_offset(pkt, offset + i, NULL, NULL);
+ if (*u8 != data[i])
+ return i;
+ }
+
+ return -1;
+}
+
+static int start_interfaces(void)
+{
+ int i;
+
+ for (i = 0; i < num_ifaces; ++i) {
+ odp_pktio_t pktio = pktios[i].hdl;
+
+ if (odp_pktio_start(pktio)) {
+ ODPH_ERR("Failed to start interface: %s\n", pktios[i].name);
+ return -1;
+ }
+
+ wait_linkup(pktio);
+ }
+
+ return 0;
+}
+
+static int stop_interfaces(void)
+{
+ int i;
+
+ for (i = 0; i < num_ifaces; ++i) {
+ odp_pktio_t pktio = pktios[i].hdl;
+
+ if (odp_pktio_stop(pktio)) {
+ ODPH_ERR("Failed to stop interface: %s\n", pktios[i].name);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int lso_suite_init(void)
+{
+ int i;
+
+ if (getenv("ODP_PKTIO_TEST_DISABLE_START_STOP"))
+ disable_restart = 1;
+
+ if (getenv("ODP_WAIT_FOR_NETWORK"))
+ wait_for_network = 1;
+
+ iface_name[0] = getenv("ODP_PKTIO_IF0");
+ iface_name[1] = getenv("ODP_PKTIO_IF1");
+ num_ifaces = 1;
+
+ if (!iface_name[0]) {
+ printf("No interfaces specified, using default \"loop\".\n");
+ iface_name[0] = "loop";
+ } else if (!iface_name[1]) {
+ printf("Using loopback interface: %s\n", iface_name[0]);
+ } else {
+ num_ifaces = 2;
+ printf("Using paired interfaces: %s %s\n",
+ iface_name[0], iface_name[1]);
+ }
+
+ if (pkt_pool_create() != 0) {
+ ODPH_ERR("Failed to create pool\n");
+ return -1;
+ }
+
+ /* Create pktios and associate input/output queues */
+ for (i = 0; i < num_ifaces; ++i) {
+ odp_pktio_t pktio;
+ const char *name = iface_name[i];
+
+ pktio = create_pktio(i, name, lso_pool);
+
+ if (pktio == ODP_PKTIO_INVALID) {
+ ODPH_ERR("Failed to open interface: %s\n", name);
+ return -1;
+ }
+
+ if (odp_pktout_queue(pktio, &pktios[i].pktout, 1) != 1) {
+ ODPH_ERR("Failed to get pktout queue: %s\n", name);
+ return -1;
+ }
+
+ if (odp_pktin_queue(pktio, &pktios[i].pktin, 1) != 1) {
+ ODPH_ERR("Failed to get pktin queue: %s\n", name);
+ return -1;
+ }
+ }
+
+ pktio_a = &pktios[0];
+ pktio_b = &pktios[1];
+ if (num_ifaces == 1)
+ pktio_b = pktio_a;
+
+ return 0;
+}
+
+int lso_suite_term(void)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < num_ifaces; ++i) {
+ if (odp_pktio_close(pktios[i].hdl)) {
+ ODPH_ERR("Failed to close pktio: %s\n", pktios[i].name);
+ ret = -1;
+ }
+ }
+
+ if (odp_pool_destroy(lso_pool) != 0) {
+ ODPH_ERR("Failed to destroy pool\n");
+ ret = -1;
+ }
+
+ if (odp_cunit_print_inactive())
+ ret = -1;
+
+ return ret;
+}
+
+static int check_lso_custom(void)
+{
+ if (pktio_a->capa.lso.max_profiles == 0 || pktio_a->capa.lso.max_profiles_per_pktio == 0)
+ return ODP_TEST_INACTIVE;
+
+ if (pktio_a->capa.lso.proto.custom == 0 || pktio_a->capa.lso.mod_op.add_segment_num == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int check_lso_custom_segs(uint32_t num)
+{
+ if (check_lso_custom() == ODP_TEST_INACTIVE)
+ return ODP_TEST_INACTIVE;
+
+ if (num > pktio_a->capa.lso.max_segments)
+ return ODP_TEST_INACTIVE;
+
+ if (disable_restart && num_starts > 0)
+ return ODP_TEST_INACTIVE;
+
+ /* Run only one packet IO test case when interface restart is disabled */
+ num_starts++;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int check_lso_custom_segs_1(void)
+{
+ return check_lso_custom_segs(1);
+}
+
+static int check_lso_custom_segs_2(void)
+{
+ return check_lso_custom_segs(2);
+}
+
+static int check_lso_custom_segs_3(void)
+{
+ return check_lso_custom_segs(3);
+}
+
+static int check_lso_ipv4(void)
+{
+ if (pktio_a->capa.lso.max_profiles == 0 || pktio_a->capa.lso.max_profiles_per_pktio == 0)
+ return ODP_TEST_INACTIVE;
+
+ if (pktio_a->capa.lso.proto.ipv4 == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int check_lso_ipv4_segs(uint32_t num)
+{
+ if (check_lso_ipv4() == ODP_TEST_INACTIVE)
+ return ODP_TEST_INACTIVE;
+
+ if (num > pktio_a->capa.lso.max_segments)
+ return ODP_TEST_INACTIVE;
+
+ if (disable_restart && num_starts > 0)
+ return ODP_TEST_INACTIVE;
+
+ num_starts++;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int check_lso_ipv4_segs_1(void)
+{
+ return check_lso_ipv4_segs(1);
+}
+
+static int check_lso_ipv4_segs_2(void)
+{
+ return check_lso_ipv4_segs(2);
+}
+
+static int check_lso_ipv4_segs_3(void)
+{
+ return check_lso_ipv4_segs(3);
+}
+
+static void lso_capability(void)
+{
+ /* LSO not supported when max_profiles is zero */
+ if (pktio_a->capa.lso.max_profiles == 0 || pktio_a->capa.lso.max_profiles_per_pktio == 0)
+ return;
+
+ CU_ASSERT(pktio_a->capa.lso.max_profiles >= pktio_a->capa.lso.max_profiles_per_pktio);
+ CU_ASSERT(pktio_a->capa.lso.max_packet_segments > 0);
+ /* At least 32 bytes of payload */
+ CU_ASSERT(pktio_a->capa.lso.max_payload_len >= 32);
+ /* LSO can create at least two segments */
+ CU_ASSERT(pktio_a->capa.lso.max_segments > 1);
+ /* LSO can copy at least Ethernet header to segments */
+ CU_ASSERT(pktio_a->capa.lso.max_payload_offset >= 14);
+
+ if (pktio_a->capa.lso.proto.custom) {
+ CU_ASSERT(pktio_a->capa.lso.max_num_custom > 0);
+
+ CU_ASSERT(pktio_a->capa.lso.mod_op.add_segment_num ||
+ pktio_a->capa.lso.mod_op.add_payload_len ||
+ pktio_a->capa.lso.mod_op.add_payload_offset)
+ }
+}
+
+static void lso_create_ipv4_profile(void)
+{
+ odp_lso_profile_param_t param;
+ odp_lso_profile_t profile;
+
+ odp_lso_profile_param_init(&param);
+ CU_ASSERT(param.lso_proto == ODP_LSO_PROTO_NONE);
+ CU_ASSERT(param.custom.num_custom == 0);
+
+ param.lso_proto = ODP_LSO_PROTO_IPV4;
+
+ profile = odp_lso_profile_create(pktio_a->hdl, &param);
+ CU_ASSERT_FATAL(profile != ODP_LSO_PROFILE_INVALID);
+
+ CU_ASSERT_FATAL(odp_lso_profile_destroy(profile) == 0);
+}
+
+static void lso_create_custom_profile(void)
+{
+ odp_lso_profile_param_t param_0, param_1;
+ odp_lso_profile_t profile_0, profile_1;
+
+ odp_lso_profile_param_init(&param_0);
+ CU_ASSERT(param_0.lso_proto == ODP_LSO_PROTO_NONE);
+ CU_ASSERT(param_0.custom.num_custom == 0);
+
+ param_0.lso_proto = ODP_LSO_PROTO_CUSTOM;
+ param_0.custom.num_custom = 1;
+ param_0.custom.field[0].mod_op = ODP_LSO_ADD_SEGMENT_NUM;
+ param_0.custom.field[0].offset = 16;
+ param_0.custom.field[0].size = 2;
+
+ profile_0 = odp_lso_profile_create(pktio_a->hdl, &param_0);
+ CU_ASSERT_FATAL(profile_0 != ODP_LSO_PROFILE_INVALID);
+
+ CU_ASSERT_FATAL(odp_lso_profile_destroy(profile_0) == 0);
+
+ if (pktio_a->capa.lso.max_profiles < 2 || pktio_a->capa.lso.max_num_custom < 3)
+ return;
+
+ if (pktio_a->capa.lso.mod_op.add_payload_len == 0 ||
+ pktio_a->capa.lso.mod_op.add_payload_offset == 0)
+ return;
+
+ odp_lso_profile_param_init(&param_1);
+ param_1.lso_proto = ODP_LSO_PROTO_CUSTOM;
+ param_1.custom.num_custom = 3;
+ param_1.custom.field[0].mod_op = ODP_LSO_ADD_PAYLOAD_LEN;
+ param_1.custom.field[0].offset = 14;
+ param_1.custom.field[0].size = 2;
+ param_1.custom.field[1].mod_op = ODP_LSO_ADD_SEGMENT_NUM;
+ param_1.custom.field[1].offset = 16;
+ param_1.custom.field[1].size = 2;
+ param_1.custom.field[2].mod_op = ODP_LSO_ADD_PAYLOAD_OFFSET;
+ param_1.custom.field[2].offset = 18;
+ param_1.custom.field[2].size = 2;
+
+ profile_0 = odp_lso_profile_create(pktio_a->hdl, &param_0);
+ CU_ASSERT_FATAL(profile_0 != ODP_LSO_PROFILE_INVALID);
+
+ profile_1 = odp_lso_profile_create(pktio_a->hdl, &param_1);
+ CU_ASSERT_FATAL(profile_1 != ODP_LSO_PROFILE_INVALID);
+
+ CU_ASSERT_FATAL(odp_lso_profile_destroy(profile_1) == 0);
+ CU_ASSERT_FATAL(odp_lso_profile_destroy(profile_0) == 0);
+}
+
+static void test_lso_request_clear(odp_lso_profile_t lso_profile, const uint8_t *data,
+ uint32_t len, uint32_t hdr_len, uint32_t max_payload)
+{
+ odp_packet_t pkt;
+ odp_packet_lso_opt_t lso_opt;
+
+ memset(&lso_opt, 0, sizeof(odp_packet_lso_opt_t));
+ lso_opt.lso_profile = lso_profile;
+ lso_opt.payload_offset = hdr_len;
+ lso_opt.max_payload_len = max_payload;
+
+ pkt = create_packet(data, len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_lso_request(pkt) == 0);
+ CU_ASSERT(odp_packet_lso_request(pkt, &lso_opt) == 0);
+ CU_ASSERT(odp_packet_has_lso_request(pkt) != 0);
+ CU_ASSERT(odp_packet_payload_offset(pkt) == hdr_len);
+ odp_packet_lso_request_clr(pkt);
+ CU_ASSERT(odp_packet_has_lso_request(pkt) == 0);
+ CU_ASSERT(odp_packet_payload_offset(pkt) == hdr_len);
+ CU_ASSERT(odp_packet_payload_offset_set(pkt, ODP_PACKET_OFFSET_INVALID) == 0);
+ CU_ASSERT(odp_packet_payload_offset(pkt) == ODP_PACKET_OFFSET_INVALID);
+
+ odp_packet_free(pkt);
+}
+
+static void lso_send_custom_eth(const uint8_t *test_packet, uint32_t pkt_len, uint32_t max_payload,
+ int use_opt)
+{
+ int i, ret, num;
+ odp_lso_profile_param_t param;
+ odp_lso_profile_t profile;
+ uint32_t offset, len, payload_len, payload_sum;
+ uint16_t segnum;
+ odp_packet_t pkt_out[MAX_NUM_SEG];
+ /* Ethernet 14B + custom headers 8B */
+ uint32_t hdr_len = 22;
+ /* Offset to "segment number" field */
+ uint32_t segnum_offset = 16;
+ uint32_t sent_payload = pkt_len - hdr_len;
+
+ odp_lso_profile_param_init(&param);
+ param.lso_proto = ODP_LSO_PROTO_CUSTOM;
+ param.custom.num_custom = 1;
+ param.custom.field[0].mod_op = ODP_LSO_ADD_SEGMENT_NUM;
+ param.custom.field[0].offset = segnum_offset;
+ param.custom.field[0].size = 2;
+
+ profile = odp_lso_profile_create(pktio_a->hdl, &param);
+ CU_ASSERT_FATAL(profile != ODP_LSO_PROFILE_INVALID);
+
+ CU_ASSERT_FATAL(start_interfaces() == 0);
+
+ test_lso_request_clear(profile, test_packet, pkt_len, hdr_len, max_payload);
+
+ ret = send_packets(profile, pktio_a, pktio_b, test_packet, pkt_len, hdr_len,
+ max_payload, 0, use_opt);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ODPH_DBG("\n Sent payload length: %u bytes\n", sent_payload);
+
+ /* Wait a bit to receive all created segments. Timeout and MAX_NUM_SEG values should be
+ * large enough to ensure that we receive all created segments. */
+ num = recv_packets(pktio_b, 100 * ODP_TIME_MSEC_IN_NS, pkt_out, MAX_NUM_SEG);
+ CU_ASSERT(num > 0);
+ CU_ASSERT(num < MAX_NUM_SEG);
+
+ offset = hdr_len;
+ payload_sum = 0;
+ segnum = 0xffff;
+ for (i = 0; i < num; i++) {
+ odph_ethhdr_t *eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt_out[i], NULL);
+
+ /* Filter out possible non-test packets */
+ if (odp_be_to_cpu_16(eth->type) != 0x88B5)
+ continue;
+
+ len = odp_packet_len(pkt_out[i]);
+ payload_len = len - hdr_len;
+
+ ret = odp_packet_copy_to_mem(pkt_out[i], segnum_offset, 2, &segnum);
+
+ if (ret == 0) {
+ segnum = odp_be_to_cpu_16(segnum);
+ CU_ASSERT(segnum == i);
+ } else {
+ CU_FAIL("Seg num field read failed\n");
+ }
+
+ ODPH_DBG(" LSO segment[%u] payload: %u bytes\n", segnum, payload_len);
+
+ CU_ASSERT(payload_len <= max_payload);
+
+ if (compare_data(pkt_out[i], hdr_len,
+ test_packet_custom_eth_1 + offset, payload_len) >= 0) {
+ ODPH_ERR(" Payload compare failed at offset %u\n", offset);
+ CU_FAIL("Payload compare failed\n");
+ }
+
+ offset += payload_len;
+ payload_sum += payload_len;
+ }
+
+ ODPH_DBG(" Received payload length: %u bytes\n", payload_sum);
+
+ CU_ASSERT(payload_sum == sent_payload);
+
+ if (num > 0)
+ odp_packet_free_multi(pkt_out, num);
+
+ CU_ASSERT_FATAL(stop_interfaces() == 0);
+
+ CU_ASSERT_FATAL(odp_lso_profile_destroy(profile) == 0);
+}
+
+static void lso_send_custom_eth_723(uint32_t max_payload, int use_opt)
+{
+ uint32_t pkt_len = sizeof(test_packet_custom_eth_1);
+
+ if (max_payload > pktio_a->capa.lso.max_payload_len)
+ max_payload = pktio_a->capa.lso.max_payload_len;
+
+ lso_send_custom_eth(test_packet_custom_eth_1, pkt_len, max_payload, use_opt);
+}
+
+/* No segmentation needed: packet size 723 bytes, LSO segment payload 800 bytes */
+static void lso_send_custom_eth_723_800_pkt_meta(void)
+{
+ lso_send_custom_eth_723(800, 0);
+}
+
+static void lso_send_custom_eth_723_800_opt(void)
+{
+ lso_send_custom_eth_723(800, 1);
+}
+
+/* At least 2 segments: packet size 723 bytes, LSO segment payload 500 bytes */
+static void lso_send_custom_eth_723_500_pkt_meta(void)
+{
+ lso_send_custom_eth_723(500, 0);
+}
+
+static void lso_send_custom_eth_723_500_opt(void)
+{
+ lso_send_custom_eth_723(500, 1);
+}
+
+/* At least 3 segments: packet size 723 bytes, LSO segment payload 288 bytes */
+static void lso_send_custom_eth_723_288_pkt_meta(void)
+{
+ lso_send_custom_eth_723(288, 0);
+}
+
+static void lso_send_custom_eth_723_288_opt(void)
+{
+ lso_send_custom_eth_723(288, 1);
+}
+
+static void lso_send_ipv4(const uint8_t *test_packet, uint32_t pkt_len, uint32_t max_payload,
+ int use_opt)
+{
+ int i, ret, num;
+ odp_lso_profile_param_t param;
+ odp_lso_profile_t profile;
+ uint32_t offset, len, payload_len, payload_sum;
+ odp_packet_t packet[MAX_NUM_SEG];
+ /* Ethernet 14B + IPv4 header 20B */
+ uint32_t hdr_len = 34;
+ uint32_t sent_payload = pkt_len - hdr_len;
+
+ odp_lso_profile_param_init(&param);
+ param.lso_proto = ODP_LSO_PROTO_IPV4;
+
+ profile = odp_lso_profile_create(pktio_a->hdl, &param);
+ CU_ASSERT_FATAL(profile != ODP_LSO_PROFILE_INVALID);
+
+ CU_ASSERT_FATAL(start_interfaces() == 0);
+
+ test_lso_request_clear(profile, test_packet, pkt_len, hdr_len, max_payload);
+
+ ret = send_packets(profile, pktio_a, pktio_b, test_packet, pkt_len,
+ hdr_len, max_payload, 14, use_opt);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ODPH_DBG("\n Sent payload length: %u bytes\n", sent_payload);
+
+ /* Wait a bit to receive all created segments. Timeout and MAX_NUM_SEG values should be
+ * large enough to ensure that we receive all created segments. */
+ num = recv_packets(pktio_b, 100 * ODP_TIME_MSEC_IN_NS, packet, MAX_NUM_SEG);
+ CU_ASSERT(num > 0);
+ CU_ASSERT(num < MAX_NUM_SEG);
+
+ offset = hdr_len;
+ payload_sum = 0;
+ for (i = 0; i < num; i++) {
+ if (!odp_packet_has_ipv4(packet[i]))
+ continue;
+
+ odph_ipv4hdr_t *ip = odp_packet_l3_ptr(packet[i], NULL);
+
+ /* Filter out possible non-test packets */
+ if (odp_be_to_cpu_32(ip->dst_addr) != 0xc0a80101 ||
+ odp_be_to_cpu_32(ip->src_addr) != 0xc0a80102)
+ continue;
+
+ len = odp_packet_len(packet[i]);
+ payload_len = len - hdr_len;
+
+ ODPH_DBG(" LSO segment[%i] payload: %u bytes\n", i, payload_len);
+
+ CU_ASSERT(odp_packet_has_error(packet[i]) == 0);
+ CU_ASSERT(payload_len <= max_payload);
+
+ if (pkt_len > max_payload)
+ CU_ASSERT(odp_packet_has_ipfrag(packet[i]));
+
+ if (compare_data(packet[i], hdr_len, test_packet + offset, payload_len) >= 0) {
+ ODPH_ERR(" Payload compare failed at offset %u\n", offset);
+ CU_FAIL("Payload compare failed\n");
+ }
+
+ offset += payload_len;
+ payload_sum += payload_len;
+ }
+
+ ODPH_DBG(" Received payload length: %u bytes\n", payload_sum);
+
+ CU_ASSERT(payload_sum == sent_payload);
+
+ if (num > 0)
+ odp_packet_free_multi(packet, num);
+
+ CU_ASSERT_FATAL(stop_interfaces() == 0);
+
+ CU_ASSERT_FATAL(odp_lso_profile_destroy(profile) == 0);
+}
+
+static void lso_send_ipv4_udp_325(uint32_t max_payload, int use_opt)
+{
+ uint32_t pkt_len = sizeof(test_packet_ipv4_udp_325);
+
+ if (max_payload > pktio_a->capa.lso.max_payload_len)
+ max_payload = pktio_a->capa.lso.max_payload_len;
+
+ lso_send_ipv4(test_packet_ipv4_udp_325, pkt_len, max_payload, use_opt);
+}
+
+static void lso_send_ipv4_udp_1500(uint32_t max_payload, int use_opt)
+{
+ uint32_t pkt_len = sizeof(test_packet_ipv4_udp_1500);
+
+ if (max_payload > pktio_a->capa.lso.max_payload_len)
+ max_payload = pktio_a->capa.lso.max_payload_len;
+
+ lso_send_ipv4(test_packet_ipv4_udp_1500, pkt_len, max_payload, use_opt);
+}
+
+/* No segmentation needed: packet size 325 bytes, LSO segment payload 700 bytes */
+static void lso_send_ipv4_325_700_pkt_meta(void)
+{
+ lso_send_ipv4_udp_325(700, 0);
+}
+
+static void lso_send_ipv4_325_700_opt(void)
+{
+ lso_send_ipv4_udp_325(700, 1);
+}
+
+/* At least 2 segments: packet size 1500 bytes, LSO segment payload 1000 bytes */
+static void lso_send_ipv4_1500_1000_pkt_meta(void)
+{
+ lso_send_ipv4_udp_1500(1000, 0);
+}
+
+static void lso_send_ipv4_1500_1000_opt(void)
+{
+ lso_send_ipv4_udp_1500(1000, 1);
+}
+
+/* At least 3 segments: packet size 1500 bytes, LSO segment payload 700 bytes */
+static void lso_send_ipv4_1500_700_pkt_meta(void)
+{
+ lso_send_ipv4_udp_1500(700, 0);
+}
+
+static void lso_send_ipv4_1500_700_opt(void)
+{
+ lso_send_ipv4_udp_1500(700, 1);
+}
+
+odp_testinfo_t lso_suite[] = {
+ ODP_TEST_INFO(lso_capability),
+ ODP_TEST_INFO_CONDITIONAL(lso_create_ipv4_profile, check_lso_ipv4),
+ ODP_TEST_INFO_CONDITIONAL(lso_create_custom_profile, check_lso_custom),
+ ODP_TEST_INFO_CONDITIONAL(lso_send_ipv4_325_700_pkt_meta, check_lso_ipv4_segs_1),
+ ODP_TEST_INFO_CONDITIONAL(lso_send_ipv4_325_700_opt, check_lso_ipv4_segs_1),
+ ODP_TEST_INFO_CONDITIONAL(lso_send_ipv4_1500_1000_pkt_meta, check_lso_ipv4_segs_2),
+ ODP_TEST_INFO_CONDITIONAL(lso_send_ipv4_1500_1000_opt, check_lso_ipv4_segs_2),
+ ODP_TEST_INFO_CONDITIONAL(lso_send_ipv4_1500_700_pkt_meta, check_lso_ipv4_segs_3),
+ ODP_TEST_INFO_CONDITIONAL(lso_send_ipv4_1500_700_opt, check_lso_ipv4_segs_3),
+ ODP_TEST_INFO_CONDITIONAL(lso_send_custom_eth_723_800_pkt_meta, check_lso_custom_segs_1),
+ ODP_TEST_INFO_CONDITIONAL(lso_send_custom_eth_723_800_opt, check_lso_custom_segs_1),
+ ODP_TEST_INFO_CONDITIONAL(lso_send_custom_eth_723_500_pkt_meta, check_lso_custom_segs_2),
+ ODP_TEST_INFO_CONDITIONAL(lso_send_custom_eth_723_500_opt, check_lso_custom_segs_2),
+ ODP_TEST_INFO_CONDITIONAL(lso_send_custom_eth_723_288_pkt_meta, check_lso_custom_segs_3),
+ ODP_TEST_INFO_CONDITIONAL(lso_send_custom_eth_723_288_opt, check_lso_custom_segs_3),
+ ODP_TEST_INFO_NULL
+};
diff --git a/test/validation/api/pktio/lso.h b/test/validation/api/pktio/lso.h
new file mode 100644
index 000000000..ce3dc7b64
--- /dev/null
+++ b/test/validation/api/pktio/lso.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2020, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_PKTIO_LSO_H_
+#define _ODP_TEST_PKTIO_LSO_H_
+
+#include <odp_cunit_common.h>
+
+/* test array init/term functions: */
+int lso_suite_term(void);
+int lso_suite_init(void);
+
+/* test arrays: */
+extern odp_testinfo_t lso_suite[];
+
+#endif
diff --git a/test/validation/api/pktio/parser.c b/test/validation/api/pktio/parser.c
new file mode 100644
index 000000000..7d243877c
--- /dev/null
+++ b/test/validation/api/pktio/parser.c
@@ -0,0 +1,609 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include <test_packet_ipv4.h>
+#include <test_packet_ipv6.h>
+
+#include <odp/helper/odph_api.h>
+
+#include <stdlib.h>
+#include "parser.h"
+
+#define MAX_NUM_IFACES 2
+#define PKT_POOL_NUM 256
+#define PKT_POOL_BUF_LEN (2 * 1024)
+
+/**
+ * local container for pktio attributes
+ */
+typedef struct {
+ const char *name;
+ odp_pktio_t hdl;
+ odp_pktout_queue_t pktout;
+ odp_pktin_queue_t pktin;
+} pktio_info_t;
+
+/** Interface names used for testing */
+static const char *iface_name[MAX_NUM_IFACES];
+
+/** Test interfaces */
+pktio_info_t pktios[MAX_NUM_IFACES];
+pktio_info_t *pktio_a;
+pktio_info_t *pktio_b;
+
+/** Number of interfaces being used (1=loopback, 2=pair) */
+static int num_ifaces;
+
+/** While testing real-world interfaces additional time may be needed for
+ * external network to enable link to pktio interface that just become up.
+ */
+static bool wait_for_network;
+
+/** Parser packet pool */
+odp_pool_t parser_pool = ODP_POOL_INVALID;
+
+static inline void wait_linkup(odp_pktio_t pktio)
+{
+ /* wait 1 second for link up */
+ uint64_t wait_ns = (10 * ODP_TIME_MSEC_IN_NS);
+ int wait_num = 100;
+ int i;
+ int ret = -1;
+
+ for (i = 0; i < wait_num; i++) {
+ ret = odp_pktio_link_status(pktio);
+ if (ret == ODP_PKTIO_LINK_STATUS_UNKNOWN || ret == ODP_PKTIO_LINK_STATUS_UP)
+ break;
+ /* link is down, call status again after delay */
+ odp_time_wait_ns(wait_ns);
+ }
+}
+
+static int pkt_pool_create(void)
+{
+ odp_pool_capability_t capa;
+ odp_pool_param_t params;
+
+ if (odp_pool_capability(&capa) != 0) {
+ ODPH_ERR("Unable to query pool capability\n");
+ return -1;
+ }
+
+ if (capa.pkt.max_num && capa.pkt.max_num < PKT_POOL_NUM) {
+ ODPH_ERR("Packet pool size not supported: MAX=%" PRIu32 "\n", capa.pkt.max_num);
+ return -1;
+ } else if (capa.pkt.max_len && capa.pkt.max_len < PKT_POOL_BUF_LEN) {
+ ODPH_ERR("Packet length not supported\n");
+ return -1;
+ } else if (capa.pkt.max_seg_len &&
+ capa.pkt.max_seg_len < PKT_POOL_BUF_LEN) {
+ ODPH_ERR("Segment length not supported\n");
+ return -1;
+ }
+
+ odp_pool_param_init(&params);
+ params.pkt.seg_len = PKT_POOL_BUF_LEN;
+ params.pkt.len = PKT_POOL_BUF_LEN;
+ params.pkt.num = PKT_POOL_NUM;
+ params.type = ODP_POOL_PACKET;
+
+ parser_pool = odp_pool_create("pkt_pool_default", &params);
+ if (parser_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Packet pool create failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static odp_pktio_t create_pktio(int iface_idx, odp_pool_t pool)
+{
+ odp_pktio_t pktio;
+ odp_pktio_config_t config;
+ odp_pktio_param_t pktio_param;
+ const char *iface = iface_name[iface_idx];
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface, pool, &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID) {
+ ODPH_ERR("Failed to open %s\n", iface);
+ return ODP_PKTIO_INVALID;
+ }
+
+ odp_pktio_config_init(&config);
+ config.parser.layer = ODP_PROTO_LAYER_ALL;
+ if (odp_pktio_config(pktio, &config)) {
+ ODPH_ERR("Failed to configure %s\n", iface);
+ return ODP_PKTIO_INVALID;
+ }
+
+ /* By default, single input and output queue is used */
+ if (odp_pktin_queue_config(pktio, NULL)) {
+ ODPH_ERR("Failed to config input queue for %s\n", iface);
+ return ODP_PKTIO_INVALID;
+ }
+ if (odp_pktout_queue_config(pktio, NULL)) {
+ ODPH_ERR("Failed to config output queue for %s\n", iface);
+ return ODP_PKTIO_INVALID;
+ }
+
+ if (wait_for_network)
+ odp_time_wait_ns(ODP_TIME_SEC_IN_NS / 4);
+
+ return pktio;
+}
+
+static odp_packet_t create_packet(const uint8_t *data, uint32_t len)
+{
+ odp_packet_t pkt;
+
+ pkt = odp_packet_alloc(parser_pool, len);
+ if (pkt == ODP_PACKET_INVALID)
+ return ODP_PACKET_INVALID;
+
+ if (odp_packet_copy_from_mem(pkt, 0, len, data)) {
+ ODPH_ERR("Failed to copy test packet data\n");
+ odp_packet_free(pkt);
+ return ODP_PACKET_INVALID;
+ }
+
+ odp_packet_l2_offset_set(pkt, 0);
+
+ return pkt;
+}
+
+/**
+ * Receive incoming packets and compare them to the original. Function returns
+ * a valid packet handle only when the received packet matches to the original
+ * packet.
+ */
+static odp_packet_t recv_and_cmp_packet(odp_pktin_queue_t pktin,
+ odp_packet_t orig_pkt, uint64_t ns)
+{
+ odp_packet_t pkt = ODP_PACKET_INVALID;
+ odp_time_t wait_time, end;
+ uint32_t orig_len;
+ uint8_t *orig_data;
+
+ orig_len = odp_packet_len(orig_pkt);
+ orig_data = odp_packet_data(orig_pkt);
+ wait_time = odp_time_local_from_ns(ns);
+ end = odp_time_sum(odp_time_local(), wait_time);
+
+ do {
+ int ret;
+ odp_packet_t tmp_pkt;
+
+ ret = odp_pktin_recv(pktin, &tmp_pkt, 1);
+ if (ret < 0)
+ break;
+
+ if (ret == 1) {
+ uint32_t len;
+ uint8_t *data;
+
+ len = odp_packet_len(tmp_pkt);
+ data = odp_packet_data(tmp_pkt);
+
+ if (len == orig_len &&
+ memcmp(data, orig_data, len) == 0) {
+ pkt = tmp_pkt;
+ break;
+ }
+ odp_packet_free(tmp_pkt);
+ }
+ } while (odp_time_cmp(end, odp_time_local()) > 0);
+
+ return pkt;
+}
+
+static void pktio_pkt_set_macs(odp_packet_t pkt, odp_pktio_t src, odp_pktio_t dst)
+{
+ uint32_t len;
+ odph_ethhdr_t *eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, &len);
+ int ret;
+
+ ret = odp_pktio_mac_addr(src, &eth->src, ODP_PKTIO_MACADDR_MAXSIZE);
+ CU_ASSERT(ret == ODPH_ETHADDR_LEN);
+ CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
+
+ ret = odp_pktio_mac_addr(dst, &eth->dst, ODP_PKTIO_MACADDR_MAXSIZE);
+ CU_ASSERT(ret == ODPH_ETHADDR_LEN);
+ CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
+}
+
+/**
+ * Creates a test packet from data array and loops it through the test pktio
+ * interfaces forcing packet parsing.
+ */
+static odp_packet_t loopback_packet(pktio_info_t *pktio_a,
+ pktio_info_t *pktio_b, const uint8_t *data,
+ uint32_t len)
+{
+ odp_packet_t pkt;
+ odp_packet_t sent_pkt;
+
+ pkt = create_packet(data, len);
+ if (pkt == ODP_PACKET_INVALID) {
+ CU_FAIL("failed to generate test packet");
+ return ODP_PACKET_INVALID;
+ }
+
+ pktio_pkt_set_macs(pkt, pktio_a->hdl, pktio_b->hdl);
+
+ sent_pkt = odp_packet_copy(pkt, parser_pool);
+ if (sent_pkt == ODP_PACKET_INVALID) {
+ CU_FAIL_FATAL("failed to copy test packet");
+ odp_packet_free(pkt);
+ return ODP_PACKET_INVALID;
+ }
+
+ while (1) {
+ int ret = odp_pktout_send(pktio_a->pktout, &pkt, 1);
+
+ if (ret < 0) {
+ CU_FAIL_FATAL("failed to send test packet");
+ odp_packet_free(pkt);
+ odp_packet_free(sent_pkt);
+ return ODP_PACKET_INVALID;
+ }
+ if (ret == 1)
+ break;
+ }
+
+ /* and wait for them to arrive back */
+ pkt = recv_and_cmp_packet(pktio_b->pktin, sent_pkt, ODP_TIME_SEC_IN_NS);
+ odp_packet_free(sent_pkt);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_input(pkt) == pktio_b->hdl);
+ CU_ASSERT(odp_packet_has_error(pkt) == 0);
+
+ return pkt;
+}
+
+static void parser_test_arp(void)
+{
+ odp_packet_t pkt;
+
+ pkt = loopback_packet(pktio_a, pktio_b, test_packet_arp,
+ sizeof(test_packet_arp));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ CU_ASSERT(odp_packet_has_arp(pkt));
+
+ CU_ASSERT(!odp_packet_has_ipv4(pkt));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt));
+
+ odp_packet_free(pkt);
+}
+
+static void parser_test_ipv4_icmp(void)
+{
+ odp_packet_t pkt;
+
+ pkt = loopback_packet(pktio_a, pktio_b, test_packet_ipv4_icmp,
+ sizeof(test_packet_ipv4_icmp));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ CU_ASSERT(odp_packet_has_ipv4(pkt));
+ CU_ASSERT(odp_packet_has_icmp(pkt));
+
+ CU_ASSERT(!odp_packet_has_ipv6(pkt));
+ CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_udp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
+
+ odp_packet_free(pkt);
+}
+
+static void parser_test_ipv4_tcp(void)
+{
+ odp_packet_t pkt;
+
+ pkt = loopback_packet(pktio_a, pktio_b, test_packet_ipv4_tcp,
+ sizeof(test_packet_ipv4_tcp));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ CU_ASSERT(odp_packet_has_ipv4(pkt));
+ CU_ASSERT(odp_packet_has_tcp(pkt));
+
+ CU_ASSERT(!odp_packet_has_ipv6(pkt));
+ CU_ASSERT(!odp_packet_has_udp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
+
+ odp_packet_free(pkt);
+}
+
+static void parser_test_ipv4_udp(void)
+{
+ odp_packet_t pkt;
+
+ pkt = loopback_packet(pktio_a, pktio_b, test_packet_ipv4_udp,
+ sizeof(test_packet_ipv4_udp));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ CU_ASSERT(odp_packet_has_ipv4(pkt));
+ CU_ASSERT(odp_packet_has_udp(pkt));
+
+ CU_ASSERT(!odp_packet_has_ipv6(pkt));
+ CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
+
+ odp_packet_free(pkt);
+}
+
+static void parser_test_vlan_ipv4_udp(void)
+{
+ odp_packet_t pkt;
+
+ pkt = loopback_packet(pktio_a, pktio_b, test_packet_vlan_ipv4_udp,
+ sizeof(test_packet_vlan_ipv4_udp));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ CU_ASSERT(odp_packet_has_vlan(pkt));
+ CU_ASSERT(odp_packet_has_ipv4(pkt));
+ CU_ASSERT(odp_packet_has_udp(pkt));
+
+ CU_ASSERT(!odp_packet_has_ipv6(pkt));
+ CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
+
+ odp_packet_free(pkt);
+}
+
+static void parser_test_vlan_qinq_ipv4_udp(void)
+{
+ odp_packet_t pkt;
+
+ pkt = loopback_packet(pktio_a, pktio_b, test_packet_vlan_qinq_ipv4_udp,
+ sizeof(test_packet_vlan_qinq_ipv4_udp));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ CU_ASSERT(odp_packet_has_vlan(pkt));
+ CU_ASSERT(odp_packet_has_vlan_qinq(pkt));
+ CU_ASSERT(odp_packet_has_ipv4(pkt));
+ CU_ASSERT(odp_packet_has_udp(pkt));
+
+ CU_ASSERT(!odp_packet_has_ipv6(pkt));
+ CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
+
+ odp_packet_free(pkt);
+}
+
+static void parser_test_ipv4_sctp(void)
+{
+ odp_packet_t pkt;
+
+ pkt = loopback_packet(pktio_a, pktio_b, test_packet_ipv4_sctp,
+ sizeof(test_packet_ipv4_sctp));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ CU_ASSERT(odp_packet_has_ipv4(pkt));
+ CU_ASSERT(odp_packet_has_sctp(pkt));
+
+ CU_ASSERT(!odp_packet_has_ipv6(pkt));
+ CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_udp(pkt));
+
+ odp_packet_free(pkt);
+}
+
+static void parser_test_ipv6_icmp(void)
+{
+ odp_packet_t pkt;
+
+ pkt = loopback_packet(pktio_a, pktio_b, test_packet_ipv6_icmp,
+ sizeof(test_packet_ipv6_icmp));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ CU_ASSERT(odp_packet_has_ipv6(pkt));
+ CU_ASSERT(odp_packet_has_icmp(pkt));
+
+ CU_ASSERT(!odp_packet_has_ipv4(pkt));
+ CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_udp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
+
+ odp_packet_free(pkt);
+}
+
+static void parser_test_ipv6_tcp(void)
+{
+ odp_packet_t pkt;
+
+ pkt = loopback_packet(pktio_a, pktio_b, test_packet_ipv6_tcp,
+ sizeof(test_packet_ipv6_tcp));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ CU_ASSERT(odp_packet_has_ipv6(pkt));
+ CU_ASSERT(odp_packet_has_tcp(pkt));
+
+ CU_ASSERT(!odp_packet_has_ipv4(pkt));
+ CU_ASSERT(!odp_packet_has_udp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
+
+ odp_packet_free(pkt);
+}
+
+static void parser_test_ipv6_udp(void)
+{
+ odp_packet_t pkt;
+
+ pkt = loopback_packet(pktio_a, pktio_b, test_packet_ipv6_udp,
+ sizeof(test_packet_ipv6_udp));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ CU_ASSERT(odp_packet_has_ipv6(pkt));
+ CU_ASSERT(odp_packet_has_udp(pkt));
+
+ CU_ASSERT(!odp_packet_has_ipv4(pkt));
+ CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
+
+ odp_packet_free(pkt);
+}
+
+static void parser_test_vlan_ipv6_udp(void)
+{
+ odp_packet_t pkt;
+
+ pkt = loopback_packet(pktio_a, pktio_b, test_packet_vlan_ipv6_udp,
+ sizeof(test_packet_vlan_ipv6_udp));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ CU_ASSERT(odp_packet_has_vlan(pkt));
+ CU_ASSERT(odp_packet_has_ipv6(pkt));
+ CU_ASSERT(odp_packet_has_udp(pkt));
+
+ CU_ASSERT(!odp_packet_has_ipv4(pkt));
+ CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
+
+ odp_packet_free(pkt);
+}
+
+static void parser_test_ipv6_sctp(void)
+{
+ odp_packet_t pkt;
+
+ pkt = loopback_packet(pktio_a, pktio_b, test_packet_ipv6_sctp,
+ sizeof(test_packet_ipv6_sctp));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ CU_ASSERT(odp_packet_has_ipv6(pkt));
+ CU_ASSERT(odp_packet_has_sctp(pkt));
+
+ CU_ASSERT(!odp_packet_has_ipv4(pkt));
+ CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_udp(pkt));
+
+ odp_packet_free(pkt);
+}
+
+int parser_suite_init(void)
+{
+ int i;
+
+ if (getenv("ODP_WAIT_FOR_NETWORK"))
+ wait_for_network = true;
+
+ iface_name[0] = getenv("ODP_PKTIO_IF0");
+ iface_name[1] = getenv("ODP_PKTIO_IF1");
+ num_ifaces = 1;
+
+ if (!iface_name[0]) {
+ printf("No interfaces specified, using default \"loop\".\n");
+ iface_name[0] = "loop";
+ } else if (!iface_name[1]) {
+ printf("Using loopback interface: %s\n", iface_name[0]);
+ } else {
+ num_ifaces = 2;
+ printf("Using paired interfaces: %s %s\n",
+ iface_name[0], iface_name[1]);
+ }
+
+ if (pkt_pool_create() != 0) {
+ ODPH_ERR("Failed to create parser pool\n");
+ return -1;
+ }
+
+ /* Create pktios and associate input/output queues */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio_info_t *io;
+
+ io = &pktios[i];
+ io->name = iface_name[i];
+ io->hdl = create_pktio(i, parser_pool);
+ if (io->hdl == ODP_PKTIO_INVALID) {
+ ODPH_ERR("Failed to open iface");
+ return -1;
+ }
+
+ if (odp_pktout_queue(io->hdl, &io->pktout, 1) != 1) {
+ ODPH_ERR("Failed to start iface: %s\n", io->name);
+ return -1;
+ }
+
+ if (odp_pktin_queue(io->hdl, &io->pktin, 1) != 1) {
+ ODPH_ERR("Failed to start iface: %s\n", io->name);
+ return -1;
+ }
+
+ if (odp_pktio_start(io->hdl)) {
+ ODPH_ERR("Failed to start iface: %s\n", io->name);
+ return -1;
+ }
+
+ wait_linkup(io->hdl);
+ }
+
+ pktio_a = &pktios[0];
+ pktio_b = &pktios[1];
+ if (num_ifaces == 1)
+ pktio_b = pktio_a;
+
+ return 0;
+}
+
+int parser_suite_term(void)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < num_ifaces; ++i) {
+ if (odp_pktio_stop(pktios[i].hdl)) {
+ ODPH_ERR("Failed to stop pktio: %s\n", pktios[i].name);
+ ret = -1;
+ }
+ if (odp_pktio_close(pktios[i].hdl)) {
+ ODPH_ERR("Failed to close pktio: %s\n", pktios[i].name);
+ ret = -1;
+ }
+ }
+
+ if (odp_pool_destroy(parser_pool) != 0) {
+ ODPH_ERR("Failed to destroy packet pool\n");
+ ret = -1;
+ }
+
+ if (odp_cunit_print_inactive())
+ ret = -1;
+
+ return ret;
+}
+
+/**
+ * Certain tests can only be run with 'loop' pktio.
+ */
+static int loop_pktio(void)
+{
+ if (strcmp(iface_name[0], "loop") == 0)
+ return ODP_TEST_ACTIVE;
+ else
+ return ODP_TEST_INACTIVE;
+}
+
+odp_testinfo_t parser_suite[] = {
+ ODP_TEST_INFO(parser_test_arp),
+ ODP_TEST_INFO(parser_test_ipv4_icmp),
+ ODP_TEST_INFO(parser_test_ipv4_tcp),
+ ODP_TEST_INFO(parser_test_ipv4_udp),
+ ODP_TEST_INFO_CONDITIONAL(parser_test_vlan_ipv4_udp, loop_pktio),
+ ODP_TEST_INFO_CONDITIONAL(parser_test_vlan_qinq_ipv4_udp, loop_pktio),
+ ODP_TEST_INFO(parser_test_ipv4_sctp),
+ ODP_TEST_INFO(parser_test_ipv6_icmp),
+ ODP_TEST_INFO(parser_test_ipv6_tcp),
+ ODP_TEST_INFO(parser_test_ipv6_udp),
+ ODP_TEST_INFO_CONDITIONAL(parser_test_vlan_ipv6_udp, loop_pktio),
+ ODP_TEST_INFO(parser_test_ipv6_sctp),
+ ODP_TEST_INFO_NULL
+};
diff --git a/test/validation/api/pktio/parser.h b/test/validation/api/pktio/parser.h
new file mode 100644
index 000000000..4424737fd
--- /dev/null
+++ b/test/validation/api/pktio/parser.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_PARSER_H_
+#define _ODP_TEST_PARSER_H_
+
+#include <odp_cunit_common.h>
+
+/* test array init/term functions: */
+int parser_suite_term(void);
+int parser_suite_init(void);
+
+/* test arrays: */
+extern odp_testinfo_t parser_suite[];
+
+#endif
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
new file mode 100644
index 000000000..deef4895a
--- /dev/null
+++ b/test/validation/api/pktio/pktio.c
@@ -0,0 +1,5517 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2020-2024, Nokia
+ * Copyright (c) 2020, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+
+#include <odp/helper/odph_api.h>
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include "parser.h"
+#include "lso.h"
+
+#define PKT_BUF_NUM 128
+#define PKT_BUF_SIZE (9 * 1024)
+#define PKT_LEN_NORMAL 64
+#define PKT_LEN_MAX (PKT_BUF_SIZE - ODPH_ETHHDR_LEN - \
+ ODPH_IPV4HDR_LEN - ODPH_UDPHDR_LEN)
+
+#define USE_MTU 0
+#define MAX_NUM_IFACES 2
+#define TEST_SEQ_INVALID ((uint32_t)~0)
+#define TEST_SEQ_MAGIC 0x92749451
+#define TX_BATCH_LEN 4
+#define PKTV_TX_BATCH_LEN 32
+#define PKTV_DEFAULT_SIZE 8
+#define MAX_QUEUES 128
+
+#define PKTIO_TS_INTERVAL (50 * ODP_TIME_MSEC_IN_NS)
+#define PKTIO_TS_MIN_RES 1000
+#define PKTIO_TS_MAX_RES 10000000000
+
+#define PKTIO_SRC_MAC {1, 2, 3, 4, 5, 6}
+#define PKTIO_DST_MAC {6, 5, 4, 3, 2, 1}
+#undef DEBUG_STATS
+
+/** interface names used for testing */
+static const char *iface_name[MAX_NUM_IFACES];
+
+/** number of interfaces being used (1=loopback, 2=pair) */
+static int num_ifaces;
+
+/** while testing real-world interfaces additional time may be
+ needed for external network to enable link to pktio
+ interface that just become up.*/
+static bool wait_for_network;
+
+/* Dummy global variable to avoid compiler optimizing out API calls */
+static volatile uint64_t test_pktio_dummy_u64;
+
+/** local container for pktio attributes */
+typedef struct {
+ const char *name;
+ odp_pktio_t id;
+ odp_pktout_queue_t pktout;
+ odp_queue_t queue_out;
+ odp_queue_t inq;
+ odp_pktin_mode_t in_mode;
+} pktio_info_t;
+
+/** magic number and sequence at start of UDP payload */
+typedef struct ODP_PACKED {
+ odp_u32be_t magic;
+ odp_u32be_t seq;
+} pkt_head_t;
+
+/** magic number at end of UDP payload */
+typedef struct ODP_PACKED {
+ odp_u32be_t magic;
+} pkt_tail_t;
+
+/** Run mode */
+typedef enum {
+ PKT_POOL_UNSEGMENTED,
+ PKT_POOL_SEGMENTED,
+} pkt_segmented_e;
+
+typedef enum {
+ TXRX_MODE_SINGLE,
+ TXRX_MODE_MULTI,
+ TXRX_MODE_MULTI_EVENT
+} txrx_mode_e;
+
+typedef enum {
+ RECV_TMO,
+ RECV_MQ_TMO,
+ RECV_MQ_TMO_NO_IDX,
+} recv_tmo_mode_e;
+
+typedef enum {
+ ETH_UNICAST,
+ ETH_BROADCAST,
+} eth_addr_type_e;
+
+/** size of transmitted packets */
+static uint32_t packet_len = PKT_LEN_NORMAL;
+
+/** default packet pool */
+odp_pool_t default_pkt_pool = ODP_POOL_INVALID;
+
+/** default packet vector pool */
+odp_pool_t default_pktv_pool = ODP_POOL_INVALID;
+
+/** sequence number of IP packets */
+odp_atomic_u32_t ip_seq;
+
+/** Type of pool segmentation */
+pkt_segmented_e pool_segmentation = PKT_POOL_UNSEGMENTED;
+
+odp_pool_t pool[MAX_NUM_IFACES] = {ODP_POOL_INVALID, ODP_POOL_INVALID};
+
+odp_pool_t pktv_pool[MAX_NUM_IFACES] = {ODP_POOL_INVALID, ODP_POOL_INVALID};
+
+static inline void _pktio_wait_linkup(odp_pktio_t pktio)
+{
+ /* wait 1 second for link up */
+ uint64_t wait_ns = (10 * ODP_TIME_MSEC_IN_NS);
+ int wait_num = 100;
+ int i;
+ int ret = -1;
+
+ for (i = 0; i < wait_num; i++) {
+ ret = odp_pktio_link_status(pktio);
+ if (ret == ODP_PKTIO_LINK_STATUS_UNKNOWN || ret == ODP_PKTIO_LINK_STATUS_UP)
+ break;
+ /* link is down, call status again after delay */
+ odp_time_wait_ns(wait_ns);
+ }
+
+ if (ret != -1) {
+ /* assert only if link state supported and
+ * it's down. */
+ CU_ASSERT_FATAL(ret == 1);
+ }
+}
+
+static void set_pool_len(odp_pool_param_t *params, odp_pool_capability_t *capa)
+{
+ uint32_t len;
+ uint32_t seg_len;
+
+ len = (capa->pkt.max_len && capa->pkt.max_len < PKT_BUF_SIZE) ?
+ capa->pkt.max_len : PKT_BUF_SIZE;
+ seg_len = (capa->pkt.max_seg_len && capa->pkt.max_seg_len < PKT_BUF_SIZE) ?
+ capa->pkt.max_seg_len : PKT_BUF_SIZE;
+
+ switch (pool_segmentation) {
+ case PKT_POOL_SEGMENTED:
+ /* Force segment to minimum size */
+ params->pkt.seg_len = 0;
+ params->pkt.len = len;
+ break;
+ case PKT_POOL_UNSEGMENTED:
+ default:
+ params->pkt.seg_len = seg_len;
+ params->pkt.len = len;
+ break;
+ }
+}
+
+static void pktio_pkt_set_macs(odp_packet_t pkt, odp_pktio_t src, odp_pktio_t dst,
+ eth_addr_type_e dst_addr_type)
+{
+ uint32_t len;
+ odph_ethhdr_t *eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, &len);
+ int ret;
+
+ ret = odp_pktio_mac_addr(src, &eth->src, ODP_PKTIO_MACADDR_MAXSIZE);
+ CU_ASSERT(ret == ODPH_ETHADDR_LEN);
+ CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
+
+ if (dst_addr_type == ETH_UNICAST) {
+ ret = odp_pktio_mac_addr(dst, &eth->dst, ODP_PKTIO_MACADDR_MAXSIZE);
+ CU_ASSERT(ret == ODPH_ETHADDR_LEN);
+ CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
+ } else {
+ CU_ASSERT(odph_eth_addr_parse(&eth->dst, "ff:ff:ff:ff:ff:ff") == 0);
+ }
+}
+
+static uint32_t pktio_pkt_set_seq(odp_packet_t pkt, size_t l4_hdr_len)
+{
+ static uint32_t tstseq;
+ size_t off;
+ pkt_head_t head;
+ pkt_tail_t tail;
+
+ off = odp_packet_l4_offset(pkt);
+ if (off == ODP_PACKET_OFFSET_INVALID) {
+ CU_FAIL("packet L4 offset not set");
+ return TEST_SEQ_INVALID;
+ }
+
+ head.magic = TEST_SEQ_MAGIC;
+ head.seq = tstseq;
+
+ off += l4_hdr_len;
+ if (odp_packet_copy_from_mem(pkt, off, sizeof(head), &head) != 0)
+ return TEST_SEQ_INVALID;
+
+ tail.magic = TEST_SEQ_MAGIC;
+ off = odp_packet_len(pkt) - sizeof(pkt_tail_t);
+ if (odp_packet_copy_from_mem(pkt, off, sizeof(tail), &tail) != 0)
+ return TEST_SEQ_INVALID;
+
+ tstseq++;
+
+ return head.seq;
+}
+
+static uint32_t pktio_pkt_seq_hdr(odp_packet_t pkt, size_t l4_hdr_len)
+{
+ size_t off;
+ uint32_t seq = TEST_SEQ_INVALID;
+ pkt_head_t head;
+ pkt_tail_t tail;
+
+ if (pkt == ODP_PACKET_INVALID) {
+ ODPH_ERR("pkt invalid\n");
+ return TEST_SEQ_INVALID;
+ }
+
+ off = odp_packet_l4_offset(pkt);
+ if (off == ODP_PACKET_OFFSET_INVALID) {
+ ODPH_ERR("offset invalid\n");
+ return TEST_SEQ_INVALID;
+ }
+
+ off += l4_hdr_len;
+ if (odp_packet_copy_to_mem(pkt, off, sizeof(head), &head) != 0) {
+ ODPH_ERR("header copy failed\n");
+ return TEST_SEQ_INVALID;
+ }
+
+ if (head.magic != TEST_SEQ_MAGIC) {
+ ODPH_ERR("header magic invalid 0x%" PRIx32 "\n", head.magic);
+ odp_packet_print(pkt);
+ return TEST_SEQ_INVALID;
+ }
+
+ if (odp_packet_len(pkt) == packet_len) {
+ off = packet_len - sizeof(tail);
+ if (odp_packet_copy_to_mem(pkt, off, sizeof(tail),
+ &tail) != 0) {
+ ODPH_ERR("header copy failed\n");
+ return TEST_SEQ_INVALID;
+ }
+
+ if (tail.magic == TEST_SEQ_MAGIC) {
+ seq = head.seq;
+ CU_ASSERT(seq != TEST_SEQ_INVALID);
+ } else {
+ ODPH_ERR("tail magic invalid 0x%" PRIx32 "\n", tail.magic);
+ }
+ } else {
+ ODPH_ERR("packet length invalid: %" PRIu32 "(%" PRIu32 ")\n",
+ odp_packet_len(pkt), packet_len);
+ }
+
+ return seq;
+}
+
+static uint32_t pktio_pkt_seq(odp_packet_t pkt)
+{
+ return pktio_pkt_seq_hdr(pkt, ODPH_UDPHDR_LEN);
+}
+
+static void pktio_init_packet_eth_ipv4(odp_packet_t pkt, uint8_t proto)
+{
+ odph_ethhdr_t *eth;
+ odph_ipv4hdr_t *ip;
+ char *buf;
+ uint16_t seq;
+ uint8_t src_mac[ODP_PKTIO_MACADDR_MAXSIZE] = PKTIO_SRC_MAC;
+ uint8_t dst_mac[ODP_PKTIO_MACADDR_MAXSIZE] = PKTIO_DST_MAC;
+ int pkt_len = odp_packet_len(pkt);
+
+ buf = odp_packet_data(pkt);
+
+ /* Ethernet */
+ odp_packet_l2_offset_set(pkt, 0);
+ eth = (odph_ethhdr_t *)buf;
+ memcpy(eth->src.addr, src_mac, ODPH_ETHADDR_LEN);
+ memcpy(eth->dst.addr, dst_mac, ODPH_ETHADDR_LEN);
+ eth->type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
+
+ /* IP */
+ odp_packet_l3_offset_set(pkt, ODPH_ETHHDR_LEN);
+ ip = (odph_ipv4hdr_t *)(buf + ODPH_ETHHDR_LEN);
+ ip->dst_addr = odp_cpu_to_be_32(0x0a000064);
+ ip->src_addr = odp_cpu_to_be_32(0x0a000001);
+ ip->ver_ihl = ODPH_IPV4 << 4 | ODPH_IPV4HDR_IHL_MIN;
+ ip->tot_len = odp_cpu_to_be_16(pkt_len - ODPH_ETHHDR_LEN);
+ ip->ttl = 128;
+ ip->proto = proto;
+ seq = odp_atomic_fetch_inc_u32(&ip_seq);
+ ip->id = odp_cpu_to_be_16(seq);
+ ip->chksum = 0;
+ ip->frag_offset = 0;
+ ip->tos = 0;
+ odph_ipv4_csum_update(pkt);
+}
+
+static uint32_t pktio_init_packet_udp(odp_packet_t pkt)
+{
+ odph_udphdr_t *udp;
+ char *buf;
+ int pkt_len = odp_packet_len(pkt);
+
+ buf = odp_packet_data(pkt);
+
+ pktio_init_packet_eth_ipv4(pkt, ODPH_IPPROTO_UDP);
+
+ /* UDP */
+ odp_packet_l4_offset_set(pkt, ODPH_ETHHDR_LEN + ODPH_IPV4HDR_LEN);
+ udp = (odph_udphdr_t *)(buf + ODPH_ETHHDR_LEN + ODPH_IPV4HDR_LEN);
+ udp->src_port = odp_cpu_to_be_16(12049);
+ udp->dst_port = odp_cpu_to_be_16(12050);
+ udp->length = odp_cpu_to_be_16(pkt_len -
+ ODPH_ETHHDR_LEN - ODPH_IPV4HDR_LEN);
+ udp->chksum = 0;
+
+ return pktio_pkt_set_seq(pkt, ODPH_UDPHDR_LEN);
+}
+
+static uint32_t pktio_init_packet_sctp(odp_packet_t pkt)
+{
+ odph_sctphdr_t *sctp;
+ char *buf;
+
+ buf = odp_packet_data(pkt);
+
+ pktio_init_packet_eth_ipv4(pkt, ODPH_IPPROTO_SCTP);
+
+ /* SCTP */
+ odp_packet_l4_offset_set(pkt, ODPH_ETHHDR_LEN + ODPH_IPV4HDR_LEN);
+ sctp = (odph_sctphdr_t *)(buf + ODPH_ETHHDR_LEN + ODPH_IPV4HDR_LEN);
+ sctp->src_port = odp_cpu_to_be_16(12049);
+ sctp->dst_port = odp_cpu_to_be_16(12050);
+ sctp->tag = 0;
+ sctp->chksum = 0;
+
+ return pktio_pkt_set_seq(pkt, ODPH_SCTPHDR_LEN);
+}
+
+static int pktio_zero_checksums(odp_packet_t pkt)
+{
+ odph_ipv4hdr_t *ip;
+ uint32_t len;
+
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, &len);
+
+ ip->chksum = 0;
+
+ if (ip->proto == ODPH_IPPROTO_UDP) {
+ odph_udphdr_t *udp;
+
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, &len);
+ udp->chksum = 0;
+ } else if (ip->proto == ODPH_IPPROTO_SCTP) {
+ odph_sctphdr_t *sctp;
+
+ sctp = (odph_sctphdr_t *)odp_packet_l4_ptr(pkt, &len);
+ sctp->chksum = 0;
+ } else {
+ CU_FAIL("unexpected L4 protocol");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int pktio_fixup_checksums(odp_packet_t pkt)
+{
+ odph_ipv4hdr_t *ip;
+
+ pktio_zero_checksums(pkt);
+
+ odph_ipv4_csum_update(pkt);
+
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ if (ip->proto == ODPH_IPPROTO_UDP) {
+ odph_udphdr_t *udp;
+
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ udp->chksum = odph_ipv4_udp_chksum(pkt);
+ } else if (ip->proto == ODPH_IPPROTO_SCTP) {
+ odph_sctp_chksum_set(pkt);
+ } else {
+ CU_FAIL("unexpected L4 protocol");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int default_pool_create(void)
+{
+ odp_pool_param_t params;
+ odp_pool_capability_t pool_capa;
+ char pool_name[ODP_POOL_NAME_LEN];
+
+ if (odp_pool_capability(&pool_capa) != 0)
+ return -1;
+
+ if (default_pkt_pool != ODP_POOL_INVALID)
+ return -1;
+
+ odp_pool_param_init(&params);
+ set_pool_len(&params, &pool_capa);
+ params.pkt.num = PKT_BUF_NUM;
+ params.type = ODP_POOL_PACKET;
+
+ snprintf(pool_name, sizeof(pool_name),
+ "pkt_pool_default_%d", pool_segmentation);
+ default_pkt_pool = odp_pool_create(pool_name, &params);
+ if (default_pkt_pool == ODP_POOL_INVALID)
+ return -1;
+
+ return 0;
+}
+
+static int default_pktv_pool_create(void)
+{
+ char pool_name[ODP_POOL_NAME_LEN];
+ odp_pool_capability_t pool_capa;
+ odp_pool_param_t params;
+
+ if (odp_pool_capability(&pool_capa) != 0)
+ return -1;
+
+ if (pool_capa.vector.max_num < PKT_BUF_NUM)
+ return -1;
+
+ if (default_pktv_pool != ODP_POOL_INVALID)
+ return -1;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_VECTOR;
+ params.vector.num = PKT_BUF_NUM;
+ params.vector.max_size = pool_capa.vector.max_size;
+
+ snprintf(pool_name, sizeof(pool_name),
+ "pktv_pool_default_%d", pool_segmentation);
+ default_pktv_pool = odp_pool_create(pool_name, &params);
+ if (default_pktv_pool == ODP_POOL_INVALID)
+ return -1;
+
+ return 0;
+}
+
+static odp_pktio_t create_pktio(int iface_idx, odp_pktin_mode_t imode,
+ odp_pktout_mode_t omode)
+{
+ odp_pktio_t pktio;
+ odp_pktio_param_t pktio_param;
+ odp_pktin_queue_param_t pktin_param;
+ const char *iface = iface_name[iface_idx];
+
+ odp_pktio_param_init(&pktio_param);
+
+ pktio_param.in_mode = imode;
+ pktio_param.out_mode = omode;
+
+ pktio = odp_pktio_open(iface, pool[iface_idx], &pktio_param);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ CU_ASSERT(odp_pktio_to_u64(pktio) !=
+ odp_pktio_to_u64(ODP_PKTIO_INVALID));
+
+ odp_pktin_queue_param_init(&pktin_param);
+
+ /* Atomic queue when in scheduled mode */
+ pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+
+ /* By default, single input and output queue in all modes. Config can
+ * be overridden before starting the interface. */
+ CU_ASSERT(odp_pktin_queue_config(pktio, &pktin_param) == 0);
+ CU_ASSERT(odp_pktout_queue_config(pktio, NULL) == 0);
+
+ if (wait_for_network)
+ odp_time_wait_ns(ODP_TIME_SEC_IN_NS / 4);
+
+ return pktio;
+}
+
+static odp_pktio_t create_pktv_pktio(int iface_idx, odp_pktin_mode_t imode,
+ odp_pktout_mode_t omode, odp_schedule_sync_t sync_mode)
+{
+ const char *iface = iface_name[iface_idx];
+ odp_pktout_queue_param_t pktout_param;
+ odp_pktin_queue_param_t pktin_param;
+ odp_pktio_param_t pktio_param;
+ odp_pktio_capability_t capa;
+ odp_pktio_t pktio;
+
+ odp_pktio_param_init(&pktio_param);
+
+ pktio_param.in_mode = imode;
+ pktio_param.out_mode = omode;
+
+ pktio = odp_pktio_open(iface, pool[iface_idx], &pktio_param);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT(odp_pktio_capability(pktio, &capa) == 0);
+ if (!capa.vector.supported) {
+ printf("Vector mode is not supported. Test Skipped.\n");
+ return ODP_PKTIO_INVALID;
+ }
+
+ odp_pktin_queue_param_init(&pktin_param);
+
+ if (imode == ODP_PKTIN_MODE_SCHED) {
+ pktin_param.queue_param.sched.prio = odp_schedule_default_prio();
+ pktin_param.queue_param.sched.sync = sync_mode;
+ pktin_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ }
+
+ pktin_param.hash_enable = 0;
+ pktin_param.num_queues = 1;
+ pktin_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ pktin_param.vector.enable = 1;
+ pktin_param.vector.pool = pktv_pool[iface_idx];
+ pktin_param.vector.max_size = capa.vector.max_size < PKTV_DEFAULT_SIZE ?
+ capa.vector.max_size : PKTV_DEFAULT_SIZE;
+ pktin_param.vector.max_tmo_ns = capa.vector.min_tmo_ns;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &pktin_param) == 0);
+
+ odp_pktout_queue_param_init(&pktout_param);
+ pktout_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ pktout_param.num_queues = 1;
+ CU_ASSERT(odp_pktout_queue_config(pktio, &pktout_param) == 0);
+
+ if (wait_for_network)
+ odp_time_wait_ns(ODP_TIME_SEC_IN_NS / 4);
+
+ return pktio;
+}
+
+static int flush_input_queue(odp_pktio_t pktio, odp_pktin_mode_t imode)
+{
+ odp_event_t ev;
+ odp_queue_t queue = ODP_QUEUE_INVALID;
+
+ if (imode == ODP_PKTIN_MODE_QUEUE) {
+ /* Assert breaks else-if without brackets */
+ CU_ASSERT_FATAL(odp_pktin_event_queue(pktio, &queue, 1) == 1);
+ } else if (imode == ODP_PKTIN_MODE_DIRECT) {
+ return 0;
+ }
+
+ /* flush any pending events */
+ while (1) {
+ if (queue != ODP_QUEUE_INVALID)
+ ev = odp_queue_deq(queue);
+ else
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ else
+ break;
+ }
+
+ return 0;
+}
+
+static int create_packets_udp(odp_packet_t pkt_tbl[],
+ uint32_t pkt_seq[],
+ int num,
+ odp_pktio_t pktio_src,
+ odp_pktio_t pktio_dst,
+ odp_bool_t fix_cs,
+ eth_addr_type_e dst_addr_type)
+{
+ int i, ret;
+
+ for (i = 0; i < num; i++) {
+ pkt_tbl[i] = odp_packet_alloc(default_pkt_pool, packet_len);
+ if (pkt_tbl[i] == ODP_PACKET_INVALID)
+ break;
+
+ pkt_seq[i] = pktio_init_packet_udp(pkt_tbl[i]);
+ if (pkt_seq[i] == TEST_SEQ_INVALID) {
+ odp_packet_free(pkt_tbl[i]);
+ break;
+ }
+
+ pktio_pkt_set_macs(pkt_tbl[i], pktio_src, pktio_dst, dst_addr_type);
+
+ /* Set user pointer. It should be NULL on receive side. */
+ odp_packet_user_ptr_set(pkt_tbl[i], (void *)1);
+
+ if (fix_cs)
+ ret = pktio_fixup_checksums(pkt_tbl[i]);
+ else
+ ret = pktio_zero_checksums(pkt_tbl[i]);
+ if (ret != 0) {
+ odp_packet_free(pkt_tbl[i]);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static int create_packets_sctp(odp_packet_t pkt_tbl[],
+ uint32_t pkt_seq[],
+ int num,
+ odp_pktio_t pktio_src,
+ odp_pktio_t pktio_dst)
+{
+ int i, ret;
+
+ for (i = 0; i < num; i++) {
+ pkt_tbl[i] = odp_packet_alloc(default_pkt_pool, packet_len);
+ if (pkt_tbl[i] == ODP_PACKET_INVALID)
+ break;
+
+ pkt_seq[i] = pktio_init_packet_sctp(pkt_tbl[i]);
+ if (pkt_seq[i] == TEST_SEQ_INVALID) {
+ odp_packet_free(pkt_tbl[i]);
+ break;
+ }
+
+ pktio_pkt_set_macs(pkt_tbl[i], pktio_src, pktio_dst, ETH_UNICAST);
+
+ ret = pktio_zero_checksums(pkt_tbl[i]);
+ if (ret != 0) {
+ odp_packet_free(pkt_tbl[i]);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static int create_packets(odp_packet_t pkt_tbl[], uint32_t pkt_seq[], int num,
+ odp_pktio_t pktio_src, odp_pktio_t pktio_dst)
+{
+ return create_packets_udp(pkt_tbl, pkt_seq, num, pktio_src, pktio_dst,
+ true, ETH_UNICAST);
+}
+
+static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
+ int num, txrx_mode_e mode, odp_bool_t vector_mode)
+{
+ odp_event_t evt_tbl[num];
+ int num_evts = 0;
+ int num_pkts = 0;
+ int i, ret;
+
+ if (pktio_rx->in_mode == ODP_PKTIN_MODE_DIRECT) {
+ odp_pktin_queue_t pktin;
+
+ ret = odp_pktin_queue(pktio_rx->id, &pktin, 1);
+
+ if (ret != 1) {
+ CU_FAIL_FATAL("No pktin queues");
+ return -1;
+ }
+
+ return odp_pktin_recv(pktin, pkt_tbl, num);
+ }
+
+ if (mode == TXRX_MODE_MULTI) {
+ if (pktio_rx->in_mode == ODP_PKTIN_MODE_QUEUE)
+ num_evts = odp_queue_deq_multi(pktio_rx->inq, evt_tbl,
+ num);
+ else
+ num_evts = odp_schedule_multi(NULL, ODP_SCHED_NO_WAIT,
+ evt_tbl, num);
+ } else {
+ odp_event_t evt_tmp = ODP_EVENT_INVALID;
+
+ if (pktio_rx->in_mode == ODP_PKTIN_MODE_QUEUE)
+ evt_tmp = odp_queue_deq(pktio_rx->inq);
+ else
+ evt_tmp = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (evt_tmp != ODP_EVENT_INVALID)
+ evt_tbl[num_evts++] = evt_tmp;
+ }
+
+ /* convert events to packets, discarding any non-packet events */
+ for (i = 0; i < num_evts; ++i) {
+ if (odp_event_type(evt_tbl[i]) == ODP_EVENT_PACKET) {
+ pkt_tbl[num_pkts++] = odp_packet_from_event(evt_tbl[i]);
+ } else if (vector_mode && odp_event_type(evt_tbl[i]) == ODP_EVENT_PACKET_VECTOR &&
+ num_pkts < num) {
+ odp_packet_vector_t pktv;
+ odp_packet_t *pkts;
+ int pktv_len;
+
+ pktv = odp_packet_vector_from_event(evt_tbl[i]);
+ pktv_len = odp_packet_vector_tbl(pktv, &pkts);
+ CU_ASSERT(odp_packet_vector_user_flag(pktv) == 0);
+
+ /* Make sure too many packets are not received */
+ if (num_pkts + pktv_len > num) {
+ int new_pkts = num - num_pkts;
+
+ memcpy(&pkt_tbl[num_pkts], pkts, new_pkts * sizeof(odp_packet_t));
+ odp_packet_free_multi(&pkts[new_pkts], pktv_len - new_pkts);
+ num_pkts += new_pkts;
+
+ } else {
+ memcpy(&pkt_tbl[num_pkts], pkts, pktv_len * sizeof(odp_packet_t));
+ num_pkts += pktv_len;
+ }
+ odp_packet_vector_free(pktv);
+ } else {
+ odp_event_free(evt_tbl[i]);
+ }
+ }
+
+ return num_pkts;
+}
+
+static int wait_for_packets_hdr(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
+ uint32_t seq_tbl[], int num, txrx_mode_e mode,
+ uint64_t ns, size_t l4_hdr_len, odp_bool_t vector_mode)
+{
+ odp_time_t wait_time, end, start;
+ int num_rx = 0;
+ int i;
+ odp_packet_t pkt_tmp[num];
+
+ wait_time = odp_time_local_from_ns(ns);
+ start = odp_time_local();
+ end = odp_time_sum(start, wait_time);
+
+ while (num_rx < num && odp_time_cmp(end, odp_time_local()) > 0) {
+ int n = get_packets(pktio_rx, pkt_tmp, num - num_rx, mode, vector_mode);
+
+ if (n < 0)
+ break;
+
+ if (n == 0)
+ continue;
+
+ for (i = 0; i < n; ++i) {
+ if (pktio_pkt_seq_hdr(pkt_tmp[i], l4_hdr_len) ==
+ seq_tbl[num_rx])
+ pkt_tbl[num_rx++] = pkt_tmp[i];
+ else
+ odp_packet_free(pkt_tmp[i]);
+ }
+ }
+
+ return num_rx;
+}
+
+static int wait_for_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
+ uint32_t seq_tbl[], int num, txrx_mode_e mode,
+ uint64_t ns, odp_bool_t vector_mode)
+{
+ return wait_for_packets_hdr(pktio_rx, pkt_tbl, seq_tbl, num, mode, ns,
+ ODPH_UDPHDR_LEN, vector_mode);
+}
+
+static int recv_packets_tmo(odp_pktio_t pktio, odp_packet_t pkt_tbl[],
+ uint32_t seq_tbl[], int num, recv_tmo_mode_e mode,
+ uint64_t tmo, uint64_t ns, int no_pkt)
+{
+ odp_packet_t pkt_tmp[num];
+ odp_pktin_queue_t pktin[MAX_QUEUES];
+ odp_time_t ts1, ts2;
+ int num_rx = 0;
+ int num_q;
+ int i;
+ int n;
+ uint32_t from_val = 0;
+ uint32_t *from = NULL;
+
+ if (mode == RECV_MQ_TMO)
+ from = &from_val;
+
+ num_q = odp_pktin_queue(pktio, pktin, MAX_QUEUES);
+ CU_ASSERT_FATAL(num_q > 0);
+
+ /** Multiple odp_pktin_recv_tmo()/odp_pktin_recv_mq_tmo() calls may be
+ * required to discard possible non-test packets. */
+ do {
+ ts1 = odp_time_global();
+ if (mode == RECV_TMO)
+ n = odp_pktin_recv_tmo(pktin[0], pkt_tmp, num - num_rx,
+ tmo);
+ else
+ n = odp_pktin_recv_mq_tmo(pktin, (uint32_t)num_q, from, pkt_tmp,
+ num - num_rx, tmo);
+ ts2 = odp_time_global();
+
+ CU_ASSERT(n >= 0);
+
+ if (n <= 0)
+ break;
+
+ /* When we don't expect any packets, drop all packets and
+ * retry timeout test. */
+ if (no_pkt) {
+ printf(" drop %i dummy packets\n", n);
+ odp_packet_free_multi(pkt_tmp, n);
+ continue;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (pktio_pkt_seq(pkt_tmp[i]) == seq_tbl[num_rx])
+ pkt_tbl[num_rx++] = pkt_tmp[i];
+ else
+ odp_packet_free(pkt_tmp[i]);
+ }
+ if (mode == RECV_MQ_TMO)
+ CU_ASSERT(from_val < (uint32_t)num_q);
+ } while (num_rx < num);
+
+ if (num_rx < num) {
+ uint64_t diff = odp_time_diff_ns(ts2, ts1);
+
+ if (diff < ns)
+ printf(" diff %" PRIu64 ", ns %" PRIu64 "\n",
+ diff, ns);
+
+ CU_ASSERT(diff >= ns);
+ }
+
+ return num_rx;
+}
+
+static int send_packets(odp_pktout_queue_t pktout,
+ odp_packet_t *pkt_tbl, unsigned pkts)
+{
+ int ret;
+ unsigned sent = 0;
+
+ while (sent < pkts) {
+ ret = odp_pktout_send(pktout, &pkt_tbl[sent], pkts - sent);
+
+ if (ret < 0) {
+ CU_FAIL_FATAL("failed to send test packet");
+ return -1;
+ }
+
+ sent += ret;
+ }
+
+ return 0;
+}
+
+static int send_packet_events(odp_queue_t queue,
+ odp_packet_t *pkt_tbl, unsigned pkts)
+{
+ int ret;
+ unsigned i;
+ unsigned sent = 0;
+ odp_event_t ev_tbl[pkts];
+
+ for (i = 0; i < pkts; i++)
+ ev_tbl[i] = odp_packet_to_event(pkt_tbl[i]);
+
+ while (sent < pkts) {
+ ret = odp_queue_enq_multi(queue, &ev_tbl[sent], pkts - sent);
+
+ if (ret < 0) {
+ CU_FAIL_FATAL("failed to send test packet as events");
+ return -1;
+ }
+
+ sent += ret;
+ }
+
+ return 0;
+}
+
+static void check_parser_capa(odp_pktio_t pktio, int *l2, int *l3, int *l4)
+{
+ int ret;
+ odp_pktio_capability_t capa;
+
+ *l2 = 0;
+ *l3 = 0;
+ *l4 = 0;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ CU_ASSERT(ret == 0);
+
+ if (ret < 0)
+ return;
+
+ switch (capa.config.parser.layer) {
+ case ODP_PROTO_LAYER_ALL:
+ /* Fall through */
+ case ODP_PROTO_LAYER_L4:
+ *l2 = 1;
+ *l3 = 1;
+ *l4 = 1;
+ break;
+ case ODP_PROTO_LAYER_L3:
+ *l2 = 1;
+ *l3 = 1;
+ break;
+ case ODP_PROTO_LAYER_L2:
+ *l2 = 1;
+ break;
+ default:
+ break;
+ }
+}
+
+static void pktio_txrx_multi(pktio_info_t *pktio_info_a,
+ pktio_info_t *pktio_info_b,
+ int num_pkts, txrx_mode_e mode,
+ odp_bool_t vector_mode)
+{
+ odp_packet_t tx_pkt[num_pkts];
+ odp_packet_t rx_pkt[num_pkts];
+ uint32_t tx_seq[num_pkts];
+ int i, ret, num_rx;
+ int parser_l2, parser_l3, parser_l4;
+ odp_pktio_t pktio_a = pktio_info_a->id;
+ odp_pktio_t pktio_b = pktio_info_b->id;
+ int pktio_index_b = odp_pktio_index(pktio_b);
+
+ /* Check RX interface parser capability */
+ check_parser_capa(pktio_b, &parser_l2, &parser_l3, &parser_l4);
+
+ if (packet_len == USE_MTU) {
+ odp_pool_capability_t pool_capa;
+ uint32_t maxlen;
+
+ maxlen = odp_pktout_maxlen(pktio_a);
+ if (odp_pktout_maxlen(pktio_b) < maxlen)
+ maxlen = odp_pktout_maxlen(pktio_b);
+ CU_ASSERT_FATAL(maxlen > 0);
+ packet_len = maxlen;
+ if (packet_len > PKT_LEN_MAX)
+ packet_len = PKT_LEN_MAX;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&pool_capa) == 0);
+
+ if (pool_capa.pkt.max_len &&
+ packet_len > pool_capa.pkt.max_len)
+ packet_len = pool_capa.pkt.max_len;
+ }
+
+ /* generate test packets to send */
+ ret = create_packets(tx_pkt, tx_seq, num_pkts, pktio_a, pktio_b);
+ if (ret != num_pkts) {
+ CU_FAIL("failed to generate test packets");
+ return;
+ }
+
+ /* send packet(s) out */
+ if (mode == TXRX_MODE_SINGLE) {
+ for (i = 0; i < num_pkts; ++i) {
+ ret = odp_pktout_send(pktio_info_a->pktout,
+ &tx_pkt[i], 1);
+ if (ret != 1) {
+ CU_FAIL_FATAL("failed to send test packet");
+ odp_packet_free(tx_pkt[i]);
+ return;
+ }
+ }
+ } else if (mode == TXRX_MODE_MULTI) {
+ send_packets(pktio_info_a->pktout, tx_pkt, num_pkts);
+ } else {
+ send_packet_events(pktio_info_a->queue_out, tx_pkt, num_pkts);
+ }
+
+ /* and wait for them to arrive back */
+ num_rx = wait_for_packets(pktio_info_b, rx_pkt, tx_seq, num_pkts, mode,
+ ODP_TIME_SEC_IN_NS, vector_mode);
+ CU_ASSERT(num_rx == num_pkts);
+ if (num_rx != num_pkts)
+ ODPH_ERR("received %i, out of %i packets\n", num_rx, num_pkts);
+
+ for (i = 0; i < num_rx; ++i) {
+ odp_packet_data_range_t range;
+ uint16_t sum;
+ odp_packet_t pkt = rx_pkt[i];
+
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_input(pkt) == pktio_b);
+ CU_ASSERT(odp_packet_input_index(pkt) == pktio_index_b);
+ CU_ASSERT(odp_packet_has_error(pkt) == 0);
+ if (parser_l2) {
+ CU_ASSERT(odp_packet_has_l2(pkt));
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ }
+ if (parser_l3) {
+ CU_ASSERT(odp_packet_has_l3(pkt));
+ CU_ASSERT(odp_packet_has_ipv4(pkt));
+ }
+ if (parser_l4) {
+ CU_ASSERT(odp_packet_has_l4(pkt));
+ CU_ASSERT(odp_packet_has_udp(pkt));
+ }
+
+ CU_ASSERT(odp_packet_user_flag(pkt) == 0);
+ CU_ASSERT(odp_packet_user_ptr(pkt) == NULL);
+ CU_ASSERT(odp_packet_cls_mark(pkt) == 0);
+
+ odp_packet_input_set(pkt, ODP_PKTIO_INVALID);
+ CU_ASSERT(odp_packet_input(pkt) == ODP_PKTIO_INVALID);
+ CU_ASSERT(odp_packet_input_index(pkt) < 0);
+
+ odp_packet_input_set(pkt, pktio_b);
+ CU_ASSERT(odp_packet_input(pkt) == pktio_b);
+ CU_ASSERT(odp_packet_input_index(pkt) == pktio_index_b);
+
+ /* Dummy read to ones complement in case pktio has set it */
+ sum = odp_packet_ones_comp(pkt, &range);
+ if (range.length > 0)
+ test_pktio_dummy_u64 += sum;
+
+ /* Dummy read to flow hash in case pktio has set it */
+ if (odp_packet_has_flow_hash(pkt))
+ test_pktio_dummy_u64 += odp_packet_flow_hash(pkt);
+
+ odp_packet_free(pkt);
+ }
+}
+
+static void test_txrx(odp_pktin_mode_t in_mode, int num_pkts,
+ txrx_mode_e mode, odp_schedule_sync_t sync_mode,
+ odp_bool_t vector_mode)
+{
+ int ret, i, if_b;
+ pktio_info_t pktios[MAX_NUM_IFACES];
+ pktio_info_t *io;
+
+ /* create pktios and associate input/output queues */
+ for (i = 0; i < num_ifaces; ++i) {
+ odp_pktout_queue_t pktout;
+ odp_queue_t queue = ODP_QUEUE_INVALID;
+ odp_pktout_mode_t out_mode = ODP_PKTOUT_MODE_DIRECT;
+
+ if (mode == TXRX_MODE_MULTI_EVENT)
+ out_mode = ODP_PKTOUT_MODE_QUEUE;
+
+ io = &pktios[i];
+
+ io->name = iface_name[i];
+ if (vector_mode)
+ io->id = create_pktv_pktio(i, in_mode, out_mode, sync_mode);
+ else
+ io->id = create_pktio(i, in_mode, out_mode);
+ if (io->id == ODP_PKTIO_INVALID) {
+ CU_FAIL("failed to open iface");
+ return;
+ }
+
+ if (mode == TXRX_MODE_MULTI_EVENT) {
+ CU_ASSERT_FATAL(odp_pktout_event_queue(io->id,
+ &queue, 1) == 1);
+ } else {
+ CU_ASSERT_FATAL(odp_pktout_queue(io->id,
+ &pktout, 1) == 1);
+ io->pktout = pktout;
+ }
+
+ io->queue_out = queue;
+ io->in_mode = in_mode;
+
+ if (in_mode == ODP_PKTIN_MODE_QUEUE) {
+ CU_ASSERT_FATAL(odp_pktin_event_queue(io->id, &queue, 1)
+ == 1);
+ io->inq = queue;
+ } else {
+ io->inq = ODP_QUEUE_INVALID;
+ }
+
+ ret = odp_pktio_start(io->id);
+ CU_ASSERT(ret == 0);
+
+ _pktio_wait_linkup(io->id);
+ }
+
+ /* if we have two interfaces then send through one and receive on
+ * another but if there's only one assume it's a loopback */
+ if_b = (num_ifaces == 1) ? 0 : 1;
+ pktio_txrx_multi(&pktios[0], &pktios[if_b], num_pkts, mode, vector_mode);
+
+ for (i = 0; i < num_ifaces; ++i) {
+ ret = odp_pktio_stop(pktios[i].id);
+ CU_ASSERT_FATAL(ret == 0);
+ flush_input_queue(pktios[i].id, in_mode);
+ ret = odp_pktio_close(pktios[i].id);
+ CU_ASSERT(ret == 0);
+ }
+}
+
+static void pktio_test_plain_queue(void)
+{
+ test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_SINGLE, 0, false);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_SINGLE, 0, false);
+}
+
+static void pktio_test_plain_multi(void)
+{
+ test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_MULTI, 0, false);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_MULTI, 0, false);
+}
+
+static void pktio_test_plain_multi_event(void)
+{
+ test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_MULTI_EVENT, 0, false);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT, 0, false);
+}
+
+static void pktio_test_sched_queue(void)
+{
+ test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_SINGLE, 0, false);
+ test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_SINGLE, 0, false);
+}
+
+static void pktio_test_sched_multi(void)
+{
+ test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_MULTI, 0, false);
+ test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_MULTI, 0, false);
+}
+
+static void pktio_test_sched_multi_event(void)
+{
+ test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_MULTI_EVENT, 0, false);
+ test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT, 0, false);
+}
+
+static void pktio_test_recv(void)
+{
+ test_txrx(ODP_PKTIN_MODE_DIRECT, 1, TXRX_MODE_SINGLE, 0, false);
+}
+
+static void pktio_test_recv_multi(void)
+{
+ test_txrx(ODP_PKTIN_MODE_DIRECT, TX_BATCH_LEN, TXRX_MODE_MULTI, 0, false);
+}
+
+static void pktio_test_recv_multi_event(void)
+{
+ test_txrx(ODP_PKTIN_MODE_DIRECT, 1, TXRX_MODE_MULTI_EVENT, 0, false);
+ test_txrx(ODP_PKTIN_MODE_DIRECT, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT, 0, false);
+}
+
+static void pktio_test_recv_queue(void)
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {0};
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t in_queue_param;
+ odp_pktout_queue_param_t out_queue_param;
+ odp_pktout_queue_t pktout_queue[MAX_QUEUES];
+ odp_pktin_queue_t pktin_queue[MAX_QUEUES];
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ odp_packet_t tmp_pkt[TX_BATCH_LEN];
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ odp_time_t wait_time, end;
+ int num_rx = 0;
+ int num_queues;
+ int ret;
+ int i;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &capa) == 0);
+
+ odp_pktin_queue_param_init(&in_queue_param);
+ num_queues = capa.max_input_queues;
+ in_queue_param.num_queues = num_queues;
+ in_queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ in_queue_param.hash_proto.proto.ipv4_udp = 1;
+
+ ret = odp_pktin_queue_config(pktio[i], &in_queue_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ odp_pktout_queue_param_init(&out_queue_param);
+ out_queue_param.num_queues = capa.max_output_queues;
+
+ ret = odp_pktout_queue_config(pktio[i], &out_queue_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; ++i)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ if (num_ifaces > 1)
+ pktio_rx = pktio[1];
+ else
+ pktio_rx = pktio_tx;
+
+ /* Allocate and initialize test packets */
+ ret = create_packets(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
+ pktio_rx);
+ if (ret != TX_BATCH_LEN) {
+ CU_FAIL("Failed to generate test packets");
+ return;
+ }
+
+ /* Send packets */
+ num_queues = odp_pktout_queue(pktio_tx, pktout_queue, MAX_QUEUES);
+ CU_ASSERT_FATAL(num_queues > 0);
+ if (num_queues > MAX_QUEUES)
+ num_queues = MAX_QUEUES;
+
+ ret = odp_pktout_send(pktout_queue[num_queues - 1], pkt_tbl,
+ TX_BATCH_LEN);
+ CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
+
+ /* Receive packets */
+ num_queues = odp_pktin_queue(pktio_rx, pktin_queue, MAX_QUEUES);
+ CU_ASSERT_FATAL(num_queues > 0);
+ if (num_queues > MAX_QUEUES)
+ num_queues = MAX_QUEUES;
+
+ wait_time = odp_time_local_from_ns(ODP_TIME_SEC_IN_NS);
+ end = odp_time_sum(odp_time_local(), wait_time);
+ do {
+ int n = 0;
+
+ for (i = 0; i < num_queues; i++) {
+ n = odp_pktin_recv(pktin_queue[i], tmp_pkt,
+ TX_BATCH_LEN);
+ if (n != 0)
+ break;
+ }
+ if (n < 0)
+ break;
+ for (i = 0; i < n; i++) {
+ if (pktio_pkt_seq(tmp_pkt[i]) == pkt_seq[num_rx])
+ pkt_tbl[num_rx++] = tmp_pkt[i];
+ else
+ odp_packet_free(tmp_pkt[i]);
+ }
+ } while (num_rx < TX_BATCH_LEN &&
+ odp_time_cmp(end, odp_time_local()) > 0);
+
+ CU_ASSERT(num_rx == TX_BATCH_LEN);
+
+ for (i = 0; i < num_rx; i++)
+ odp_packet_free(pkt_tbl[i]);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static void test_recv_tmo(recv_tmo_mode_e mode)
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {0};
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t in_queue_param;
+ odp_pktout_queue_t pktout_queue;
+ int test_pkt_count = 6;
+ odp_packet_t pkt_tbl[test_pkt_count];
+ uint32_t pkt_seq[test_pkt_count];
+ uint64_t ns;
+ uint32_t num_q;
+ int ret;
+ int i;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &capa) == 0);
+
+ odp_pktin_queue_param_init(&in_queue_param);
+ if (mode == RECV_TMO)
+ num_q = 1;
+ else
+ num_q = (capa.max_input_queues < MAX_QUEUES) ?
+ capa.max_input_queues : MAX_QUEUES;
+ in_queue_param.num_queues = num_q;
+ in_queue_param.hash_enable = (num_q > 1) ? 1 : 0;
+ in_queue_param.hash_proto.proto.ipv4_udp = 1;
+
+ ret = odp_pktin_queue_config(pktio[i], &in_queue_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; i++)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
+ CU_ASSERT_FATAL(ret > 0);
+
+ memset(pkt_seq, 0, sizeof(pkt_seq));
+
+ ns = 100 * ODP_TIME_MSEC_IN_NS;
+
+ ret = create_packets(pkt_tbl, pkt_seq, test_pkt_count, pktio_tx,
+ pktio_rx);
+ CU_ASSERT_FATAL(ret == test_pkt_count);
+
+ ret = odp_pktout_send(pktout_queue, pkt_tbl, test_pkt_count);
+ CU_ASSERT_FATAL(ret == test_pkt_count);
+
+ ret = recv_packets_tmo(pktio_rx, &pkt_tbl[0], &pkt_seq[0], 1, mode,
+ odp_pktin_wait_time(10 * ODP_TIME_SEC_IN_NS),
+ 0, 0);
+ CU_ASSERT_FATAL(ret == 1);
+
+ ret = recv_packets_tmo(pktio_rx, &pkt_tbl[1], &pkt_seq[1], 1, mode,
+ ODP_PKTIN_NO_WAIT, 0, 0);
+ CU_ASSERT_FATAL(ret == 1);
+
+ ret = recv_packets_tmo(pktio_rx, &pkt_tbl[2], &pkt_seq[2], 1, mode,
+ odp_pktin_wait_time(0), 0, 0);
+ CU_ASSERT_FATAL(ret == 1);
+
+ ret = recv_packets_tmo(pktio_rx, &pkt_tbl[3], &pkt_seq[3], 3, mode,
+ odp_pktin_wait_time(ns), ns, 0);
+ CU_ASSERT_FATAL(ret == 3);
+
+ for (i = 0; i < test_pkt_count; i++)
+ odp_packet_free(pkt_tbl[i]);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static void pktio_test_recv_tmo(void)
+{
+ test_recv_tmo(RECV_TMO);
+}
+
+static void pktio_test_recv_mq_tmo(void)
+{
+ test_recv_tmo(RECV_MQ_TMO);
+ test_recv_tmo(RECV_MQ_TMO_NO_IDX);
+}
+
+static void pktio_test_recv_mtu(void)
+{
+ packet_len = USE_MTU;
+ pktio_test_sched_multi();
+ packet_len = PKT_LEN_NORMAL;
+}
+
+static void pktio_test_maxlen(void)
+{
+ int ret;
+ uint32_t maxlen;
+
+ odp_pktio_t pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ maxlen = odp_pktout_maxlen(pktio);
+ CU_ASSERT(maxlen > 0);
+
+ maxlen = odp_pktin_maxlen(pktio);
+ CU_ASSERT(maxlen > 0);
+
+ ret = odp_pktio_close(pktio);
+ CU_ASSERT(ret == 0);
+}
+
+static int pktio_check_maxlen_set(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || !capa.set_op.op.maxlen)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_maxlen_set(void)
+{
+ odp_pktio_capability_t capa;
+ int ret;
+ uint32_t maxlen, input_orig, output_orig;
+
+ odp_pktio_t pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(!odp_pktio_capability(pktio, &capa));
+
+ input_orig = odp_pktin_maxlen(pktio);
+ CU_ASSERT(input_orig > 0);
+
+ output_orig = odp_pktout_maxlen(pktio);
+ CU_ASSERT(output_orig > 0);
+
+ if (capa.maxlen.equal) { /* Input and output values have to be equal */
+ CU_ASSERT(capa.maxlen.min_input == capa.maxlen.min_output);
+ CU_ASSERT(capa.maxlen.max_input == capa.maxlen.max_output);
+ CU_ASSERT(capa.maxlen.max_input > capa.maxlen.min_input);
+
+ maxlen = capa.maxlen.min_input;
+ CU_ASSERT(!odp_pktio_maxlen_set(pktio, maxlen, maxlen));
+ CU_ASSERT(odp_pktin_maxlen(pktio) == maxlen);
+ CU_ASSERT(odp_pktout_maxlen(pktio) == maxlen);
+
+ maxlen = capa.maxlen.max_input;
+ CU_ASSERT(!odp_pktio_maxlen_set(pktio, maxlen, maxlen));
+ CU_ASSERT(odp_pktin_maxlen(pktio) == maxlen);
+ CU_ASSERT(odp_pktout_maxlen(pktio) == maxlen);
+
+ CU_ASSERT(!odp_pktio_maxlen_set(pktio, input_orig, input_orig));
+ } else {
+ CU_ASSERT(capa.maxlen.max_input || capa.maxlen.max_output);
+ if (capa.maxlen.max_output == 0) { /* Only input supported */
+ CU_ASSERT(capa.maxlen.min_output == 0);
+ CU_ASSERT(capa.maxlen.min_input < capa.maxlen.max_input);
+
+ CU_ASSERT(!odp_pktio_maxlen_set(pktio, capa.maxlen.min_input, 0));
+ CU_ASSERT(odp_pktin_maxlen(pktio) == capa.maxlen.min_input);
+ CU_ASSERT(!odp_pktio_maxlen_set(pktio, capa.maxlen.max_input, 0));
+ CU_ASSERT(odp_pktin_maxlen(pktio) == capa.maxlen.max_input);
+ CU_ASSERT(!odp_pktio_maxlen_set(pktio, input_orig, 0));
+ } else if (capa.maxlen.max_input == 0) { /* Only output supported */
+ CU_ASSERT(capa.maxlen.min_input == 0);
+ CU_ASSERT(capa.maxlen.min_output < capa.maxlen.max_output);
+
+ CU_ASSERT(!odp_pktio_maxlen_set(pktio, 0, capa.maxlen.min_output));
+ CU_ASSERT(odp_pktout_maxlen(pktio) == capa.maxlen.min_output);
+ CU_ASSERT(!odp_pktio_maxlen_set(pktio, 0, capa.maxlen.max_output));
+ CU_ASSERT(odp_pktout_maxlen(pktio) == capa.maxlen.max_output);
+ CU_ASSERT(!odp_pktio_maxlen_set(pktio, 0, output_orig));
+ } else { /* Both directions supported */
+ CU_ASSERT(capa.maxlen.min_input < capa.maxlen.max_input);
+ CU_ASSERT(capa.maxlen.min_output < capa.maxlen.max_output);
+
+ CU_ASSERT(!odp_pktio_maxlen_set(pktio, capa.maxlen.min_input,
+ capa.maxlen.min_output));
+ CU_ASSERT(odp_pktin_maxlen(pktio) == capa.maxlen.min_input);
+ CU_ASSERT(odp_pktout_maxlen(pktio) == capa.maxlen.min_output);
+
+ CU_ASSERT(!odp_pktio_maxlen_set(pktio, capa.maxlen.max_input,
+ capa.maxlen.max_output));
+ CU_ASSERT(odp_pktin_maxlen(pktio) == capa.maxlen.max_input);
+ CU_ASSERT(odp_pktout_maxlen(pktio) == capa.maxlen.max_output);
+
+ CU_ASSERT(!odp_pktio_maxlen_set(pktio, capa.maxlen.max_input,
+ capa.maxlen.min_output));
+ CU_ASSERT(odp_pktin_maxlen(pktio) == capa.maxlen.max_input);
+ CU_ASSERT(odp_pktout_maxlen(pktio) == capa.maxlen.min_output);
+
+ CU_ASSERT(!odp_pktio_maxlen_set(pktio, capa.maxlen.min_input,
+ capa.maxlen.max_output));
+ CU_ASSERT(odp_pktin_maxlen(pktio) == capa.maxlen.min_input);
+ CU_ASSERT(odp_pktout_maxlen(pktio) == capa.maxlen.max_output);
+
+ CU_ASSERT(!odp_pktio_maxlen_set(pktio, input_orig, output_orig));
+ }
+ }
+ CU_ASSERT(odp_pktin_maxlen(pktio) == input_orig);
+ CU_ASSERT(odp_pktout_maxlen(pktio) == output_orig);
+ ret = odp_pktio_close(pktio);
+ CU_ASSERT(ret == 0);
+}
+
+static void pktio_test_promisc(void)
+{
+ int ret;
+ odp_pktio_capability_t capa;
+
+ odp_pktio_t pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0);
+
+ ret = odp_pktio_promisc_mode(pktio);
+ CU_ASSERT(ret >= 0);
+ CU_ASSERT(ret == 0 || ret == 1);
+
+ if (capa.set_op.op.promisc_mode) {
+ /* Disabled by default */
+ CU_ASSERT(ret == 0);
+ }
+
+ if (!capa.set_op.op.promisc_mode) {
+ printf("promiscuous mode not supported\n");
+ ret = odp_pktio_close(pktio);
+ CU_ASSERT(ret == 0);
+ return;
+ }
+
+ ret = odp_pktio_promisc_mode_set(pktio, 1);
+ CU_ASSERT(0 == ret);
+
+ /* Verify that promisc mode set */
+ ret = odp_pktio_promisc_mode(pktio);
+ CU_ASSERT(1 == ret);
+
+ ret = odp_pktio_promisc_mode_set(pktio, 0);
+ CU_ASSERT(0 == ret);
+
+ /* Verify that promisc mode is not set */
+ ret = odp_pktio_promisc_mode(pktio);
+ CU_ASSERT(0 == ret);
+
+ ret = odp_pktio_close(pktio);
+ CU_ASSERT(ret == 0);
+}
+
+static void pktio_test_mac(void)
+{
+ unsigned char mac_addr[ODP_PKTIO_MACADDR_MAXSIZE];
+ unsigned char mac_addr_ref[ODP_PKTIO_MACADDR_MAXSIZE] = {
+ 0xA0, 0xB0, 0xC0, 0xD0, 0xE0, 0xF0};
+ int mac_len;
+ int ret;
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ printf("testing mac for %s\n", iface_name[0]);
+
+ mac_len = odp_pktio_mac_addr(pktio, mac_addr,
+ ODP_PKTIO_MACADDR_MAXSIZE);
+ CU_ASSERT(ODPH_ETHADDR_LEN == mac_len);
+ CU_ASSERT(ODP_PKTIO_MACADDR_MAXSIZE >= mac_len);
+
+ printf(" %X:%X:%X:%X:%X:%X ",
+ mac_addr[0], mac_addr[1], mac_addr[2],
+ mac_addr[3], mac_addr[4], mac_addr[5]);
+
+ /* Fail case: wrong addr_size. Expected <0. */
+ mac_len = odp_pktio_mac_addr(pktio, mac_addr, 2);
+ CU_ASSERT(mac_len < 0);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0);
+ if (capa.set_op.op.mac_addr) {
+ /* Fail case: wrong addr_size. Expected <0. */
+ ret = odp_pktio_mac_addr_set(pktio, mac_addr_ref, 2);
+ CU_ASSERT_FATAL(ret < 0);
+
+ ret = odp_pktio_mac_addr_set(pktio, mac_addr_ref,
+ ODPH_ETHADDR_LEN);
+ CU_ASSERT_FATAL(ret == 0);
+
+ mac_len = odp_pktio_mac_addr(pktio, mac_addr,
+ ODPH_ETHADDR_LEN);
+ CU_ASSERT(ODPH_ETHADDR_LEN == mac_len);
+
+ CU_ASSERT(odp_memcmp(mac_addr_ref, mac_addr,
+ ODPH_ETHADDR_LEN) == 0);
+ } else
+ printf("\n mac address set not supported for %s!\n",
+ iface_name[0]);
+
+ ret = odp_pktio_close(pktio);
+ CU_ASSERT(0 == ret);
+}
+
+static void test_defaults(uint8_t fill)
+{
+ odp_pktio_param_t pktio_p;
+ odp_pktin_queue_param_t qp_in;
+ odp_pktout_queue_param_t qp_out;
+ odp_pktio_config_t pktio_conf;
+
+ memset(&pktio_p, fill, sizeof(pktio_p));
+ odp_pktio_param_init(&pktio_p);
+ CU_ASSERT_EQUAL(pktio_p.in_mode, ODP_PKTIN_MODE_DIRECT);
+ CU_ASSERT_EQUAL(pktio_p.out_mode, ODP_PKTOUT_MODE_DIRECT);
+
+ memset(&qp_in, fill, sizeof(qp_in));
+ odp_pktin_queue_param_init(&qp_in);
+ CU_ASSERT_EQUAL(qp_in.op_mode, ODP_PKTIO_OP_MT);
+ CU_ASSERT_EQUAL(qp_in.classifier_enable, 0);
+ CU_ASSERT_EQUAL(qp_in.hash_enable, 0);
+ CU_ASSERT_EQUAL(qp_in.hash_proto.all_bits, 0);
+ CU_ASSERT_EQUAL(qp_in.num_queues, 1);
+ CU_ASSERT_EQUAL(qp_in.queue_size[0], 0);
+ CU_ASSERT_EQUAL(qp_in.queue_param.enq_mode, ODP_QUEUE_OP_MT);
+ CU_ASSERT_EQUAL(qp_in.queue_param.sched.prio, odp_schedule_default_prio());
+ CU_ASSERT_EQUAL(qp_in.queue_param.sched.sync, ODP_SCHED_SYNC_PARALLEL);
+ CU_ASSERT_EQUAL(qp_in.queue_param.sched.group, ODP_SCHED_GROUP_ALL);
+ CU_ASSERT_EQUAL(qp_in.queue_param.sched.lock_count, 0);
+ CU_ASSERT_EQUAL(qp_in.queue_param.order, ODP_QUEUE_ORDER_KEEP);
+ CU_ASSERT_EQUAL(qp_in.queue_param.nonblocking, ODP_BLOCKING);
+ CU_ASSERT_EQUAL(qp_in.queue_param.context, NULL);
+ CU_ASSERT_EQUAL(qp_in.queue_param.context_len, 0);
+ CU_ASSERT_EQUAL(qp_in.queue_param_ovr, NULL);
+ CU_ASSERT_EQUAL(qp_in.vector.enable, false);
+
+ memset(&qp_out, fill, sizeof(qp_out));
+ odp_pktout_queue_param_init(&qp_out);
+ CU_ASSERT_EQUAL(qp_out.op_mode, ODP_PKTIO_OP_MT);
+ CU_ASSERT_EQUAL(qp_out.num_queues, 1);
+ CU_ASSERT_EQUAL(qp_out.queue_size[0], 0);
+
+ memset(&pktio_conf, fill, sizeof(pktio_conf));
+ odp_pktio_config_init(&pktio_conf);
+ CU_ASSERT_EQUAL(pktio_conf.pktin.all_bits, 0);
+ CU_ASSERT_EQUAL(pktio_conf.pktout.all_bits, 0);
+ CU_ASSERT_EQUAL(pktio_conf.parser.layer, ODP_PROTO_LAYER_ALL);
+ CU_ASSERT_EQUAL(pktio_conf.enable_loop, false);
+ CU_ASSERT_EQUAL(pktio_conf.inbound_ipsec, false);
+ CU_ASSERT_EQUAL(pktio_conf.outbound_ipsec, false);
+ CU_ASSERT_EQUAL(pktio_conf.enable_lso, false);
+ CU_ASSERT_EQUAL(pktio_conf.reassembly.en_ipv4, false);
+ CU_ASSERT_EQUAL(pktio_conf.reassembly.en_ipv6, false);
+ CU_ASSERT_EQUAL(pktio_conf.reassembly.max_wait_time, 0);
+ CU_ASSERT_EQUAL(pktio_conf.reassembly.max_num_frags, 2);
+}
+
+static void pktio_test_default_values(void)
+{
+ test_defaults(0);
+ test_defaults(0xff);
+}
+
+static void pktio_test_open(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_param_t pktio_param;
+ int i;
+
+ /* test the sequence open->close->open->close() */
+ for (i = 0; i < 2; ++i) {
+ pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+ }
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open("nothere", default_pkt_pool, &pktio_param);
+ CU_ASSERT(pktio == ODP_PKTIO_INVALID);
+}
+
+static void pktio_test_lookup(void)
+{
+ odp_pktio_t pktio, pktio_inval;
+ odp_pktio_param_t pktio_param;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface_name[0], default_pkt_pool, &pktio_param);
+ CU_ASSERT(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT(odp_pktio_lookup(iface_name[0]) == pktio);
+
+ pktio_inval = odp_pktio_open(iface_name[0], default_pkt_pool,
+ &pktio_param);
+ CU_ASSERT(pktio_inval == ODP_PKTIO_INVALID);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+
+ CU_ASSERT(odp_pktio_lookup(iface_name[0]) == ODP_PKTIO_INVALID);
+}
+
+static void pktio_test_index(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_param_t pktio_param;
+ int ndx;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface_name[0], default_pkt_pool, &pktio_param);
+ CU_ASSERT(pktio != ODP_PKTIO_INVALID);
+
+ ndx = odp_pktio_index(pktio);
+ CU_ASSERT(ndx >= 0);
+
+ CU_ASSERT(ODP_PKTIO_MAX_INDEX >= odp_pktio_max_index());
+ CU_ASSERT(ODP_PKTIO_MAX_INDEX >= 0 && ODP_PKTIO_MAX_INDEX <= 1024);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+}
+
+static void pktio_test_print(void)
+{
+ odp_pktio_t pktio;
+ int i;
+
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio = create_pktio(i, ODP_PKTIN_MODE_QUEUE,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ /* Print pktio debug info and test that the
+ * odp_pktio_print() function is implemented. */
+ odp_pktio_print(pktio);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+ }
+}
+
+static void pktio_test_pktio_config(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+ const char *iface = iface_name[0];
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ memset(&config, 0xff, sizeof(config));
+ odp_pktio_config_init(&config);
+
+ /* Check default values */
+ CU_ASSERT(config.pktin.all_bits == 0);
+ CU_ASSERT(config.pktout.all_bits == 0);
+ CU_ASSERT(config.parser.layer == ODP_PROTO_LAYER_ALL);
+ CU_ASSERT(!config.enable_loop);
+ CU_ASSERT(!config.inbound_ipsec);
+ CU_ASSERT(!config.outbound_ipsec);
+ CU_ASSERT(!config.enable_lso);
+ CU_ASSERT(!config.reassembly.en_ipv4);
+ CU_ASSERT(!config.reassembly.en_ipv6);
+ CU_ASSERT(config.reassembly.max_wait_time == 0);
+ CU_ASSERT(config.reassembly.max_num_frags == 2);
+ CU_ASSERT(config.flow_control.pause_rx == ODP_PKTIO_LINK_PAUSE_OFF);
+ CU_ASSERT(config.flow_control.pause_tx == ODP_PKTIO_LINK_PAUSE_OFF);
+
+ /* Indicate packet refs might be used */
+ config.pktout.bit.no_packet_refs = 0;
+
+ CU_ASSERT(odp_pktio_config(pktio, NULL) == 0);
+
+ CU_ASSERT(odp_pktio_config(pktio, &config) == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0);
+
+ /* Loop interface supports loopback mode by definition */
+ if (!strcmp(iface, "loop"))
+ CU_ASSERT(capa.config.enable_loop);
+
+ config = capa.config;
+
+ /* Disable inbound_ipsec as it requires IPsec config to be done */
+ config.inbound_ipsec = 0;
+
+ CU_ASSERT(odp_pktio_config(pktio, &config) == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+}
+
+static void pktio_test_info(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_info_t pktio_info;
+ int i;
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio = create_pktio(i, ODP_PKTIN_MODE_QUEUE,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_info(pktio, &pktio_info) == 0);
+
+ printf("pktio %d\n name %s\n driver %s\n", i,
+ pktio_info.name, pktio_info.drv_name);
+
+ CU_ASSERT(strcmp(pktio_info.name, iface_name[i]) == 0);
+ CU_ASSERT(pktio_info.pool == pool[i]);
+ CU_ASSERT(pktio_info.param.in_mode == ODP_PKTIN_MODE_QUEUE);
+ CU_ASSERT(pktio_info.param.out_mode == ODP_PKTOUT_MODE_DIRECT);
+
+ CU_ASSERT(odp_pktio_info(ODP_PKTIO_INVALID, &pktio_info) < 0);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+ }
+}
+
+static void pktio_test_link_info(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_link_info_t link_info;
+ int i;
+
+ for (i = 0; i < num_ifaces; i++) {
+ memset(&link_info, 0, sizeof(link_info));
+
+ pktio = create_pktio(i, ODP_PKTIN_MODE_QUEUE,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_link_info(pktio, &link_info) == 0);
+
+ CU_ASSERT(link_info.autoneg == ODP_PKTIO_LINK_AUTONEG_UNKNOWN ||
+ link_info.autoneg == ODP_PKTIO_LINK_AUTONEG_ON ||
+ link_info.autoneg == ODP_PKTIO_LINK_AUTONEG_OFF);
+ CU_ASSERT(link_info.duplex == ODP_PKTIO_LINK_DUPLEX_UNKNOWN ||
+ link_info.duplex == ODP_PKTIO_LINK_DUPLEX_HALF ||
+ link_info.duplex == ODP_PKTIO_LINK_DUPLEX_FULL);
+ CU_ASSERT(link_info.pause_rx == ODP_PKTIO_LINK_PAUSE_UNKNOWN ||
+ link_info.pause_rx == ODP_PKTIO_LINK_PAUSE_OFF ||
+ link_info.pause_rx == ODP_PKTIO_LINK_PAUSE_ON ||
+ link_info.pause_rx == ODP_PKTIO_LINK_PFC_ON);
+ CU_ASSERT(link_info.pause_tx == ODP_PKTIO_LINK_PAUSE_UNKNOWN ||
+ link_info.pause_tx == ODP_PKTIO_LINK_PAUSE_OFF ||
+ link_info.pause_tx == ODP_PKTIO_LINK_PAUSE_ON ||
+ link_info.pause_tx == ODP_PKTIO_LINK_PFC_ON);
+ CU_ASSERT(link_info.status == ODP_PKTIO_LINK_STATUS_UNKNOWN ||
+ link_info.status == ODP_PKTIO_LINK_STATUS_UP ||
+ link_info.status == ODP_PKTIO_LINK_STATUS_DOWN);
+ CU_ASSERT(link_info.media != NULL);
+
+ CU_ASSERT(odp_pktio_link_info(ODP_PKTIO_INVALID, &link_info) < 0);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+ }
+}
+
+static int pktio_check_flow_control(int pfc, int rx)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0)
+ return ODP_TEST_INACTIVE;
+
+ if (pfc == 0 && rx == 1 && capa.flow_control.pause_rx == 1)
+ return ODP_TEST_ACTIVE;
+
+ if (pfc == 1 && rx == 1 && capa.flow_control.pfc_rx == 1)
+ return ODP_TEST_ACTIVE;
+
+ if (pfc == 0 && rx == 0 && capa.flow_control.pause_tx == 1)
+ return ODP_TEST_ACTIVE;
+
+ if (pfc == 1 && rx == 0 && capa.flow_control.pfc_tx == 1)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int pktio_check_pause_rx(void)
+{
+ return pktio_check_flow_control(0, 1);
+}
+
+static int pktio_check_pause_tx(void)
+{
+ return pktio_check_flow_control(0, 0);
+}
+
+static int pktio_check_pause_both(void)
+{
+ int rx = pktio_check_pause_rx();
+ int tx = pktio_check_pause_tx();
+
+ if (rx == ODP_TEST_ACTIVE && tx == ODP_TEST_ACTIVE)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int pktio_check_pfc_rx(void)
+{
+ return pktio_check_flow_control(1, 1);
+}
+
+static int pktio_check_pfc_tx(void)
+{
+ return pktio_check_flow_control(1, 0);
+}
+
+static int pktio_check_pfc_both(void)
+{
+ int rx = pktio_check_pfc_rx();
+ int tx = pktio_check_pfc_tx();
+
+ if (rx == ODP_TEST_ACTIVE && tx == ODP_TEST_ACTIVE)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static odp_cos_t set_default_cos(odp_pktio_t pktio, odp_queue_t queue)
+{
+ odp_cls_cos_param_t cos_param;
+ odp_cos_t cos;
+ int ret;
+
+ odp_cls_cos_param_init(&cos_param);
+ cos_param.queue = queue;
+ cos_param.pool = pool[0];
+
+ cos = odp_cls_cos_create("Default CoS", &cos_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ ret = odp_pktio_default_cos_set(pktio, cos);
+ CU_ASSERT_FATAL(ret == 0);
+
+ return cos;
+}
+
+static odp_cos_t create_pfc_cos(odp_cos_t default_cos, odp_queue_t queue, odp_pmr_t *pmr_out)
+{
+ odp_cls_cos_param_t cos_param;
+ odp_cos_t cos;
+ odp_pmr_param_t pmr_param;
+ odp_pmr_t pmr;
+ uint8_t pcp = 1;
+ uint8_t mask = 0x7;
+
+ /* Setup a CoS to control generation of PFC frame generation. PFC for the VLAN
+ * priority level is generated when queue/pool resource usage gets above 80%. */
+ odp_cls_cos_param_init(&cos_param);
+ cos_param.queue = queue;
+ cos_param.pool = pool[0];
+ cos_param.bp.enable = 1;
+ cos_param.bp.threshold.type = ODP_THRESHOLD_PERCENT;
+ cos_param.bp.threshold.percent.max = 80;
+ cos_param.bp.pfc_level = pcp;
+
+ cos = odp_cls_cos_create("PFC CoS", &cos_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_VLAN_PCP_0;
+ pmr_param.match.value = &pcp;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = 1;
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT_FATAL(pmr != ODP_PMR_INVALID);
+
+ *pmr_out = pmr;
+
+ return cos;
+}
+
+static void pktio_config_flow_control(int pfc, int rx, int tx)
+{
+ odp_pktio_t pktio;
+ odp_pktio_config_t config;
+ int ret;
+ odp_cos_t default_cos = ODP_COS_INVALID;
+ odp_cos_t cos = ODP_COS_INVALID;
+ odp_pmr_t pmr = ODP_PMR_INVALID;
+ odp_queue_t queue = ODP_QUEUE_INVALID;
+ odp_pktio_link_pause_t mode = ODP_PKTIO_LINK_PAUSE_ON;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ odp_pktio_config_init(&config);
+
+ if (pfc)
+ mode = ODP_PKTIO_LINK_PFC_ON;
+
+ if (rx)
+ config.flow_control.pause_rx = mode;
+
+ if (tx)
+ config.flow_control.pause_tx = mode;
+
+ ret = odp_pktio_config(pktio, &config);
+ CU_ASSERT_FATAL(ret == 0);
+
+ if (pfc && tx) {
+ /* Enable classifier for PFC backpressure configuration. Overrides previous
+ * pktin queue config. */
+ odp_pktin_queue_param_t pktin_param;
+
+ odp_pktin_queue_param_init(&pktin_param);
+
+ pktin_param.classifier_enable = 1;
+
+ ret = odp_pktin_queue_config(pktio, &pktin_param);
+ CU_ASSERT_FATAL(ret == 0);
+ }
+
+ ret = odp_pktio_start(pktio);
+ CU_ASSERT(ret == 0);
+
+ if (pfc && tx) {
+ odp_queue_param_t qparam;
+
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+
+ queue = odp_queue_create("CoS queue", &qparam);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ default_cos = set_default_cos(pktio, queue);
+
+ cos = create_pfc_cos(default_cos, queue, &pmr);
+ }
+
+ if (pmr != ODP_PMR_INVALID)
+ odp_cls_pmr_destroy(pmr);
+
+ if (cos != ODP_COS_INVALID)
+ odp_cos_destroy(cos);
+
+ if (default_cos != ODP_COS_INVALID) {
+ odp_pktio_default_cos_set(pktio, ODP_COS_INVALID);
+ odp_cos_destroy(default_cos);
+ }
+
+ if (queue != ODP_QUEUE_INVALID)
+ odp_queue_destroy(queue);
+
+ CU_ASSERT(odp_pktio_stop(pktio) == 0);
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+}
+
+static void pktio_test_enable_pause_rx(void)
+{
+ pktio_config_flow_control(0, 1, 0);
+}
+
+static void pktio_test_enable_pause_tx(void)
+{
+ pktio_config_flow_control(0, 0, 1);
+}
+
+static void pktio_test_enable_pause_both(void)
+{
+ pktio_config_flow_control(0, 1, 1);
+}
+
+static void pktio_test_enable_pfc_rx(void)
+{
+ pktio_config_flow_control(1, 1, 0);
+}
+
+static void pktio_test_enable_pfc_tx(void)
+{
+ pktio_config_flow_control(1, 0, 1);
+}
+
+static void pktio_test_enable_pfc_both(void)
+{
+ pktio_config_flow_control(1, 1, 1);
+}
+
+static void pktio_test_pktin_queue_config_direct(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t queue_param;
+ odp_pktin_queue_t pktin_queues[MAX_QUEUES];
+ odp_queue_t in_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT(odp_pktio_capability(ODP_PKTIO_INVALID, &capa) < 0);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = capa.max_input_queues;
+
+ odp_pktin_queue_param_init(&queue_param);
+
+ queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ queue_param.hash_proto.proto.ipv4_udp = 1;
+ queue_param.num_queues = num_queues;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES)
+ == num_queues);
+ CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES) < 0);
+
+ queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ queue_param.num_queues = 1;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_queue_config(ODP_PKTIO_INVALID, &queue_param) < 0);
+
+ queue_param.num_queues = capa.max_input_queues + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+}
+
+static void pktio_test_pktin_queue_config_sched(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t queue_param;
+ odp_pktin_queue_t pktin_queues[MAX_QUEUES];
+ odp_queue_t in_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = capa.max_input_queues;
+
+ odp_pktin_queue_param_init(&queue_param);
+
+ queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ queue_param.hash_proto.proto.ipv4_udp = 1;
+ queue_param.num_queues = num_queues;
+ queue_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES)
+ == num_queues);
+ CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES) < 0);
+
+ queue_param.num_queues = 1;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ queue_param.num_queues = capa.max_input_queues + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+}
+
+static void pktio_test_pktin_queue_config_multi_sched(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t queue_param;
+ odp_queue_t in_queues[MAX_QUEUES];
+ odp_pktin_queue_param_ovr_t queue_param_ovr[MAX_QUEUES];
+ int num_queues, i;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = (capa.max_input_queues < MAX_QUEUES) ?
+ capa.max_input_queues : MAX_QUEUES;
+
+ odp_pktin_queue_param_init(&queue_param);
+
+ queue_param.hash_enable = 0;
+ queue_param.num_queues = num_queues;
+ queue_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+
+ for (i = 0; i < num_queues; i++)
+ queue_param_ovr[i].group = ODP_SCHED_GROUP_ALL;
+ queue_param.queue_param_ovr = queue_param_ovr;
+
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES)
+ == num_queues);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+}
+
+static void pktio_test_pktin_queue_config_queue(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t queue_param;
+ odp_pktin_queue_t pktin_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = capa.max_input_queues;
+ CU_ASSERT_FATAL(num_queues <= ODP_PKTIN_MAX_QUEUES);
+
+ CU_ASSERT(capa.min_input_queue_size <= capa.max_input_queue_size);
+
+ odp_pktin_queue_param_init(&queue_param);
+
+ queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ queue_param.hash_proto.proto.ipv4_udp = 1;
+ queue_param.num_queues = num_queues;
+ for (int i = 0; i < num_queues; i++)
+ queue_param.queue_size[i] = capa.max_input_queue_size;
+
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES) == num_queues);
+
+ queue_param.num_queues = 1;
+ queue_param.queue_size[0] = capa.min_input_queue_size;
+
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ queue_param.num_queues = capa.max_input_queues + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+}
+
+static void pktio_test_pktout_queue_config(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktout_queue_param_t queue_param;
+ odp_pktout_queue_t pktout_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_output_queues > 0);
+ num_queues = capa.max_output_queues;
+ CU_ASSERT_FATAL(num_queues <= ODP_PKTOUT_MAX_QUEUES);
+
+ CU_ASSERT(capa.min_output_queue_size <= capa.max_output_queue_size);
+
+ odp_pktout_queue_param_init(&queue_param);
+
+ queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ queue_param.num_queues = num_queues;
+ for (int i = 0; i < num_queues; i++)
+ queue_param.queue_size[i] = capa.max_output_queue_size;
+
+ CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktout_queue(pktio, pktout_queues, MAX_QUEUES)
+ == num_queues);
+
+ queue_param.op_mode = ODP_PKTIO_OP_MT;
+ queue_param.num_queues = 1;
+ queue_param.queue_size[0] = capa.min_output_queue_size;
+
+ CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktout_queue_config(ODP_PKTIO_INVALID, &queue_param) < 0);
+
+ queue_param.num_queues = capa.max_output_queues + 1;
+ CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+}
+
+#ifdef DEBUG_STATS
+static void _print_pktio_stats(odp_pktio_stats_t *s, const char *name)
+{
+ ODPH_ERR("\n%s:\n"
+ " in_octets %" PRIu64 "\n"
+ " in_packets %" PRIu64 "\n"
+ " in_ucast_pkts %" PRIu64 "\n"
+ " in_mcast_pkts %" PRIu64 "\n"
+ " in_bcast_pkts %" PRIu64 "\n"
+ " in_discards %" PRIu64 "\n"
+ " in_errors %" PRIu64 "\n"
+ " out_octets %" PRIu64 "\n"
+ " out_packets %" PRIu64 "\n"
+ " out_ucast_pkts %" PRIu64 "\n"
+ " out_mcast_pkts %" PRIu64 "\n"
+ " out_bcast_pkts %" PRIu64 "\n"
+ " out_discards %" PRIu64 "\n"
+ " out_errors %" PRIu64 "\n",
+ name,
+ s->in_octets,
+ s->in_packets,
+ s->in_ucast_pkts,
+ s->in_mcast_pkts,
+ s->in_bcast_pkts,
+ s->in_discards,
+ s->in_errors,
+ s->out_octets,
+ s->out_packets,
+ s->out_ucast_pkts,
+ s->out_mcast_pkts,
+ s->out_bcast_pkts,
+ s->out_discards,
+ s->out_errors);
+}
+#endif
+
+static int pktio_check_statistics_counters(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || capa.stats.pktio.all_counters == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_statistics_counters(void)
+{
+ odp_pktio_t pktio_rx, pktio_tx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {
+ ODP_PKTIO_INVALID, ODP_PKTIO_INVALID
+ };
+ odp_packet_t pkt;
+ odp_packet_t tx_pkt[1000];
+ uint32_t pkt_seq[1000];
+ odp_event_t ev;
+ int i, pkts, tx_pkts, ret, alloc = 0;
+ odp_pktout_queue_t pktout;
+ uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+ odp_pktio_stats_t stats[2];
+ odp_pktio_stats_t *rx_stats, *tx_stats;
+ odp_pktio_capability_t rx_capa, tx_capa;
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &tx_capa) == 0);
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &rx_capa) == 0);
+
+ CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
+
+ ret = odp_pktio_start(pktio_tx);
+ CU_ASSERT(ret == 0);
+ if (num_ifaces > 1) {
+ ret = odp_pktio_start(pktio_rx);
+ CU_ASSERT(ret == 0);
+ }
+
+ alloc = create_packets(tx_pkt, pkt_seq, 1000, pktio_tx, pktio_rx);
+
+ ret = odp_pktio_stats_reset(pktio_tx);
+ CU_ASSERT(ret == 0);
+ if (num_ifaces > 1) {
+ ret = odp_pktio_stats_reset(pktio_rx);
+ CU_ASSERT(ret == 0);
+ }
+
+ /* send */
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_pktout_send(pktout, &tx_pkt[pkts], alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to send packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+ tx_pkts = pkts;
+
+ /* get */
+ for (i = 0, pkts = 0; i < 1000 && pkts != tx_pkts; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ }
+
+ CU_ASSERT(pkts == tx_pkts);
+
+ ret = odp_pktio_stats(pktio_tx, &stats[0]);
+ CU_ASSERT(ret == 0);
+ tx_stats = &stats[0];
+
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_octets == 0) ||
+ (tx_stats->out_octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_packets == 0) ||
+ (tx_stats->out_packets >= (uint64_t)pkts));
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_ucast_pkts == 0) ||
+ (tx_stats->out_ucast_pkts >= (uint64_t)pkts));
+ CU_ASSERT(tx_stats->out_discards == 0);
+ CU_ASSERT(tx_stats->out_errors == 0);
+
+ rx_stats = &stats[0];
+ if (num_ifaces > 1) {
+ rx_stats = &stats[1];
+ ret = odp_pktio_stats(pktio_rx, rx_stats);
+ CU_ASSERT(ret == 0);
+ }
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_octets == 0) ||
+ (rx_stats->in_octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_packets == 0) ||
+ (rx_stats->in_packets >= (uint64_t)pkts));
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_ucast_pkts == 0) ||
+ (rx_stats->in_ucast_pkts >= (uint64_t)pkts));
+ CU_ASSERT(rx_stats->in_discards == 0);
+ CU_ASSERT(rx_stats->in_errors == 0);
+
+ /* Check that all unsupported counters are still zero */
+ if (!rx_capa.stats.pktio.counter.in_octets)
+ CU_ASSERT(rx_stats->in_octets == 0);
+ if (!rx_capa.stats.pktio.counter.in_packets)
+ CU_ASSERT(rx_stats->in_packets == 0);
+ if (!rx_capa.stats.pktio.counter.in_ucast_pkts)
+ CU_ASSERT(rx_stats->in_ucast_pkts == 0);
+ if (!rx_capa.stats.pktio.counter.in_mcast_pkts)
+ CU_ASSERT(rx_stats->in_mcast_pkts == 0);
+ if (!rx_capa.stats.pktio.counter.in_bcast_pkts)
+ CU_ASSERT(rx_stats->in_bcast_pkts == 0);
+ if (!rx_capa.stats.pktio.counter.in_discards)
+ CU_ASSERT(rx_stats->in_discards == 0);
+ if (!rx_capa.stats.pktio.counter.in_errors)
+ CU_ASSERT(rx_stats->in_errors == 0);
+
+ if (!tx_capa.stats.pktio.counter.out_octets)
+ CU_ASSERT(tx_stats->out_octets == 0);
+ if (!tx_capa.stats.pktio.counter.out_packets)
+ CU_ASSERT(tx_stats->out_packets == 0);
+ if (!tx_capa.stats.pktio.counter.out_ucast_pkts)
+ CU_ASSERT(tx_stats->out_ucast_pkts == 0);
+ if (!tx_capa.stats.pktio.counter.out_mcast_pkts)
+ CU_ASSERT(tx_stats->out_mcast_pkts == 0);
+ if (!tx_capa.stats.pktio.counter.out_bcast_pkts)
+ CU_ASSERT(tx_stats->out_bcast_pkts == 0);
+ if (!tx_capa.stats.pktio.counter.out_discards)
+ CU_ASSERT(tx_stats->out_discards == 0);
+ if (!tx_capa.stats.pktio.counter.out_errors)
+ CU_ASSERT(tx_stats->out_errors == 0);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+#ifdef DEBUG_STATS
+ _print_pktio_stats(&stats[i], iface_name[i]);
+#endif
+ flush_input_queue(pktio[i], ODP_PKTIN_MODE_SCHED);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static int pktio_check_statistics_counters_bcast(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || (capa.stats.pktio.counter.in_bcast_pkts == 0 &&
+ capa.stats.pktio.counter.out_bcast_pkts == 0))
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_statistics_counters_bcast(void)
+{
+ odp_pktio_t pktio_rx, pktio_tx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {
+ ODP_PKTIO_INVALID, ODP_PKTIO_INVALID
+ };
+ odp_packet_t pkt;
+ odp_packet_t tx_pkt[1000];
+ uint32_t pkt_seq[1000];
+ odp_event_t ev;
+ int i, pkts, tx_pkts, ret, alloc = 0;
+ odp_pktout_queue_t pktout;
+ uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+ odp_pktio_stats_t stats[2];
+ odp_pktio_stats_t *rx_stats, *tx_stats;
+ odp_pktio_capability_t rx_capa, tx_capa;
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &tx_capa) == 0);
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &rx_capa) == 0);
+
+ CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_rx) == 0);
+
+ alloc = create_packets_udp(tx_pkt, pkt_seq, 1000, pktio_tx, pktio_rx,
+ true, ETH_BROADCAST);
+
+ CU_ASSERT(odp_pktio_stats_reset(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT(odp_pktio_stats_reset(pktio_rx) == 0);
+
+ /* send */
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_pktout_send(pktout, &tx_pkt[pkts], alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to send packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+ tx_pkts = pkts;
+
+ /* get */
+ for (i = 0, pkts = 0; i < 1000 && pkts != tx_pkts; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ }
+
+ CU_ASSERT(pkts == tx_pkts);
+
+ CU_ASSERT(odp_pktio_stats(pktio_tx, &stats[0]) == 0);
+ tx_stats = &stats[0];
+
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_bcast_pkts == 0) ||
+ (tx_stats->out_bcast_pkts >= (uint64_t)pkts));
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_octets == 0) ||
+ (tx_stats->out_octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_packets == 0) ||
+ (tx_stats->out_packets >= (uint64_t)pkts));
+
+ rx_stats = &stats[0];
+ if (num_ifaces > 1) {
+ rx_stats = &stats[1];
+ CU_ASSERT(odp_pktio_stats(pktio_rx, rx_stats) == 0);
+ }
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_bcast_pkts == 0) ||
+ (rx_stats->in_bcast_pkts >= (uint64_t)pkts));
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_octets == 0) ||
+ (rx_stats->in_octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_packets == 0) ||
+ (rx_stats->in_packets >= (uint64_t)pkts));
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+#ifdef DEBUG_STATS
+ _print_pktio_stats(&stats[i], iface_name[i]);
+#endif
+ flush_input_queue(pktio[i], ODP_PKTIN_MODE_SCHED);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static int pktio_check_queue_statistics_counters(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || (capa.stats.pktin_queue.all_counters == 0 &&
+ capa.stats.pktout_queue.all_counters == 0))
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_queue_statistics_counters(void)
+{
+ odp_pktio_t pktio_rx, pktio_tx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {
+ ODP_PKTIO_INVALID, ODP_PKTIO_INVALID
+ };
+ odp_packet_t tx_pkt[1000];
+ uint32_t pkt_seq[1000];
+ int i, pkts, tx_pkts, ret, alloc = 0;
+ odp_pktout_queue_t pktout;
+ odp_pktin_queue_t pktin;
+ uint64_t wait = odp_pktin_wait_time(ODP_TIME_SEC_IN_NS);
+ odp_pktin_queue_stats_t rx_stats;
+ odp_pktout_queue_stats_t tx_stats;
+ odp_pktio_capability_t rx_capa, tx_capa;
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &tx_capa) == 0);
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &rx_capa) == 0);
+
+ CU_ASSERT_FATAL(odp_pktin_queue(pktio_rx, &pktin, 1) == 1);
+ CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_rx) == 0);
+
+ alloc = create_packets(tx_pkt, pkt_seq, 1000, pktio_tx, pktio_rx);
+
+ CU_ASSERT(odp_pktio_stats_reset(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT(odp_pktio_stats_reset(pktio_rx) == 0);
+
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_pktout_send(pktout, &tx_pkt[pkts], alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to send packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+ tx_pkts = pkts;
+
+ for (i = 0, pkts = 0; i < 1000 && pkts != tx_pkts; i++) {
+ odp_packet_t pkt;
+
+ if (odp_pktin_recv_tmo(pktin, &pkt, 1, wait) != 1)
+ break;
+
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+
+ odp_packet_free(pkt);
+ }
+
+ CU_ASSERT(pkts == tx_pkts);
+
+ CU_ASSERT_FATAL(odp_pktout_queue_stats(pktout, &tx_stats) == 0);
+ CU_ASSERT((!tx_capa.stats.pktout_queue.counter.octets) ||
+ (tx_stats.octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((!tx_capa.stats.pktout_queue.counter.packets) ||
+ (tx_stats.packets >= (uint64_t)pkts));
+ CU_ASSERT(tx_stats.discards == 0);
+ CU_ASSERT(tx_stats.errors == 0);
+
+ CU_ASSERT_FATAL(odp_pktin_queue_stats(pktin, &rx_stats) == 0);
+ CU_ASSERT((!rx_capa.stats.pktin_queue.counter.octets) ||
+ (rx_stats.octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((!rx_capa.stats.pktin_queue.counter.packets) ||
+ (rx_stats.packets >= (uint64_t)pkts));
+ CU_ASSERT(rx_stats.discards == 0);
+ CU_ASSERT(rx_stats.errors == 0);
+
+ /* Check that all unsupported counters are still zero */
+ if (!rx_capa.stats.pktin_queue.counter.octets)
+ CU_ASSERT(rx_stats.octets == 0);
+ if (!rx_capa.stats.pktin_queue.counter.packets)
+ CU_ASSERT(rx_stats.packets == 0);
+ if (!tx_capa.stats.pktout_queue.counter.octets)
+ CU_ASSERT(tx_stats.octets == 0);
+ if (!tx_capa.stats.pktout_queue.counter.packets)
+ CU_ASSERT(tx_stats.packets == 0);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static int pktio_check_event_queue_statistics_counters(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_QUEUE;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || (capa.stats.pktin_queue.all_counters == 0 &&
+ capa.stats.pktout_queue.all_counters == 0))
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_event_queue_statistics_counters(void)
+{
+ odp_pktio_t pktio_rx, pktio_tx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {
+ ODP_PKTIO_INVALID, ODP_PKTIO_INVALID
+ };
+ odp_packet_t pkt;
+ odp_packet_t tx_pkt[1000];
+ uint32_t pkt_seq[1000];
+ odp_event_t ev;
+ int i, pkts, tx_pkts;
+ odp_queue_t pktout;
+ odp_queue_t pktin;
+ uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+ odp_pktin_queue_stats_t rx_stats;
+ odp_pktout_queue_stats_t tx_stats;
+ odp_pktio_capability_t rx_capa, tx_capa;
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_QUEUE);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &tx_capa) == 0);
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &rx_capa) == 0);
+
+ CU_ASSERT_FATAL(odp_pktin_event_queue(pktio_rx, &pktin, 1) == 1);
+ CU_ASSERT_FATAL(odp_pktout_event_queue(pktio_tx, &pktout, 1) == 1);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_rx) == 0);
+
+ tx_pkts = create_packets(tx_pkt, pkt_seq, 1000, pktio_tx, pktio_rx);
+
+ CU_ASSERT(odp_pktio_stats_reset(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT(odp_pktio_stats_reset(pktio_rx) == 0);
+
+ CU_ASSERT_FATAL(send_packet_events(pktout, tx_pkt, tx_pkts) == 0);
+
+ /* Receive */
+ for (i = 0, pkts = 0; i < 1000 && pkts != tx_pkts; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ }
+ CU_ASSERT(pkts == tx_pkts);
+
+ CU_ASSERT_FATAL(odp_pktout_event_queue_stats(pktio_tx, pktout, &tx_stats) == 0);
+ CU_ASSERT((!tx_capa.stats.pktout_queue.counter.octets) ||
+ (tx_stats.octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((!tx_capa.stats.pktout_queue.counter.packets) ||
+ (tx_stats.packets >= (uint64_t)pkts));
+ CU_ASSERT(tx_stats.discards == 0);
+ CU_ASSERT(tx_stats.errors == 0);
+
+ CU_ASSERT_FATAL(odp_pktin_event_queue_stats(pktio_rx, pktin, &rx_stats) == 0);
+ CU_ASSERT((!rx_capa.stats.pktin_queue.counter.octets) ||
+ (rx_stats.octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((!rx_capa.stats.pktin_queue.counter.packets) ||
+ (rx_stats.packets >= (uint64_t)pkts));
+ CU_ASSERT(rx_stats.discards == 0);
+ CU_ASSERT(rx_stats.errors == 0);
+
+ /* Check that all unsupported counters are still zero */
+ if (!rx_capa.stats.pktin_queue.counter.octets)
+ CU_ASSERT(rx_stats.octets == 0);
+ if (!rx_capa.stats.pktin_queue.counter.packets)
+ CU_ASSERT(rx_stats.packets == 0);
+ if (!tx_capa.stats.pktout_queue.counter.octets)
+ CU_ASSERT(tx_stats.octets == 0);
+ if (!tx_capa.stats.pktout_queue.counter.packets)
+ CU_ASSERT(tx_stats.packets == 0);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+ flush_input_queue(pktio[i], ODP_PKTIN_MODE_SCHED);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static void pktio_test_extra_stats(void)
+{
+ odp_pktio_t pktio;
+ int num_info, num_stats, i, ret;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID)
+ CU_ASSERT_FATAL(odp_pktio_start(pktio) == 0);
+
+ num_info = odp_pktio_extra_stat_info(pktio, NULL, 0);
+ CU_ASSERT_FATAL(num_info >= 0);
+
+ num_stats = odp_pktio_extra_stats(pktio, NULL, 0);
+ CU_ASSERT_FATAL(num_stats >= 0);
+
+ CU_ASSERT_FATAL(num_info == num_stats);
+
+ /* No extra statistics supported */
+ if (num_stats == 0) {
+ CU_ASSERT(odp_pktio_stop(pktio) == 0);
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+ return;
+ }
+
+ odp_pktio_extra_stat_info_t stats_info[num_stats];
+ uint64_t extra_stats[num_stats];
+
+ ret = odp_pktio_extra_stat_info(pktio, stats_info, num_stats);
+ CU_ASSERT(ret == num_stats);
+ num_info = ret;
+
+ ret = odp_pktio_extra_stats(pktio, extra_stats, num_stats);
+ CU_ASSERT(ret == num_stats);
+ CU_ASSERT_FATAL(ret <= num_stats);
+ num_stats = ret;
+
+ CU_ASSERT_FATAL(num_info == num_stats);
+
+ printf("\nPktio extra statistics\n----------------------\n");
+ for (i = 0; i < num_stats; i++)
+ printf(" %s=%" PRIu64 "\n", stats_info[i].name, extra_stats[i]);
+
+ for (i = 0; i < num_stats; i++) {
+ uint64_t stat = 0;
+
+ CU_ASSERT(odp_pktio_extra_stat_counter(pktio, i, &stat) == 0);
+ }
+
+ odp_pktio_extra_stats_print(pktio);
+
+ CU_ASSERT(odp_pktio_stop(pktio) == 0);
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+}
+
+static int pktio_check_proto_statistics_counters(void)
+{
+ odp_proto_stats_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ odp_pktio_t pktio;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_proto_stats_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || capa.tx.counters.all_bits == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void validate_proto_stats(odp_proto_stats_t stat, odp_packet_proto_stats_opt_t opt,
+ odp_proto_stats_capability_t capa, uint64_t pkts)
+{
+ odp_proto_stats_data_t data;
+ int ret;
+
+ ret = odp_proto_stats(stat, &data);
+ CU_ASSERT(ret == 0);
+
+ CU_ASSERT(!(capa.tx.counters.bit.tx_pkt_drops && (data.tx_pkt_drops > 0)));
+ CU_ASSERT(!(capa.tx.counters.bit.tx_oct_count0_drops && (data.tx_oct_count0_drops > 0)));
+ CU_ASSERT(!(capa.tx.counters.bit.tx_oct_count1_drops && (data.tx_oct_count1_drops > 0)));
+ CU_ASSERT(!(capa.tx.counters.bit.tx_pkts && (data.tx_pkts != pkts)));
+
+ if (capa.tx.counters.bit.tx_oct_count0) {
+ int64_t counted_bytes = PKT_LEN_NORMAL;
+
+ if (capa.tx.oct_count0_adj)
+ counted_bytes += opt.oct_count0_adj;
+ CU_ASSERT(data.tx_oct_count0 == counted_bytes * pkts);
+ }
+
+ if (capa.tx.counters.bit.tx_oct_count1) {
+ int64_t counted_bytes = PKT_LEN_NORMAL;
+
+ if (capa.tx.oct_count1_adj)
+ counted_bytes += opt.oct_count1_adj;
+ CU_ASSERT(data.tx_oct_count1 == counted_bytes * pkts);
+ }
+}
+
+static void pktio_test_proto_statistics_counters(void)
+{
+ odp_pktio_t pktio_rx, pktio_tx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {
+ ODP_PKTIO_INVALID, ODP_PKTIO_INVALID
+ };
+ odp_packet_t pkt;
+ const uint32_t num_pkts = 10;
+ odp_packet_t tx_pkt[num_pkts];
+ uint32_t pkt_seq[num_pkts];
+ odp_event_t ev;
+ int i, pkts, tx_pkts, ret, alloc = 0;
+ odp_pktout_queue_t pktout;
+ uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+ uint64_t flow0_pkts = 0, flow1_pkts = 0;
+ odp_proto_stats_capability_t capa;
+ odp_packet_proto_stats_opt_t opt0;
+ odp_packet_proto_stats_opt_t opt1;
+ odp_proto_stats_param_t param;
+ odp_pktio_config_t config;
+ odp_proto_stats_t stat0;
+ odp_proto_stats_t stat1;
+
+ memset(&pktout, 0, sizeof(pktout));
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ /* Enable protocol stats on Tx interface */
+ odp_pktio_config_init(&config);
+ config.pktout.bit.proto_stats_ena = 1;
+ ret = odp_pktio_config(pktio_tx, &config);
+ CU_ASSERT(ret == 0);
+
+ CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
+
+ ret = odp_pktio_start(pktio_tx);
+ CU_ASSERT(ret == 0);
+ if (num_ifaces > 1) {
+ ret = odp_pktio_start(pktio_rx);
+ CU_ASSERT(ret == 0);
+ }
+
+ odp_proto_stats_param_init(&param);
+ odp_proto_stats_capability(pktio_tx, &capa);
+ CU_ASSERT(capa.tx.counters.all_bits != 0);
+ param.counters.all_bits = capa.tx.counters.all_bits;
+ /* Create statistics object with all supported counters */
+ stat0 = odp_proto_stats_create("flow0_stat", &param);
+ CU_ASSERT_FATAL(stat0 != ODP_PROTO_STATS_INVALID);
+ stat1 = odp_proto_stats_create("flow1_stat", &param);
+ CU_ASSERT_FATAL(stat1 != ODP_PROTO_STATS_INVALID);
+
+ /* Flow-0 options */
+ opt0.stat = stat0;
+ opt0.oct_count0_adj = 0;
+ /* oct1 contains byte count of packets excluding Ethernet header */
+ opt0.oct_count1_adj = -14;
+
+ /* Flow-1 options */
+ opt1.stat = stat1;
+ opt1.oct_count0_adj = -8;
+ opt1.oct_count1_adj = 14;
+
+ alloc = create_packets(tx_pkt, pkt_seq, num_pkts, pktio_tx, pktio_rx);
+
+ /* Attach statistics object to all Tx packets */
+ for (pkts = 0; pkts < alloc; pkts++) {
+ if ((pkts % 2) == 0) {
+ odp_packet_proto_stats_request(tx_pkt[pkts], &opt0);
+ flow0_pkts++;
+ } else {
+ odp_packet_proto_stats_request(tx_pkt[pkts], &opt1);
+ flow1_pkts++;
+ }
+ }
+
+ /* send */
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_pktout_send(pktout, &tx_pkt[pkts], alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to send packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+ tx_pkts = pkts;
+
+ /* get */
+ for (i = 0, pkts = 0; i < (int)num_pkts && pkts != tx_pkts; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ }
+
+ CU_ASSERT(pkts == tx_pkts);
+
+ /* Validate Flow-0 packet statistics */
+ validate_proto_stats(stat0, opt0, capa, flow0_pkts);
+
+ /* Validate Flow-1 packet statistics */
+ validate_proto_stats(stat1, opt1, capa, flow1_pkts);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+ flush_input_queue(pktio[i], ODP_PKTIN_MODE_SCHED);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+
+ /* Destroy proto statistics object */
+ CU_ASSERT(odp_proto_stats_destroy(stat0) == 0);
+ CU_ASSERT(odp_proto_stats_destroy(stat1) == 0);
+}
+
+static int pktio_check_start_stop(void)
+{
+ if (getenv("ODP_PKTIO_TEST_DISABLE_START_STOP"))
+ return ODP_TEST_INACTIVE;
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_start_stop(void)
+{
+ odp_pktio_t pktio[MAX_NUM_IFACES];
+ odp_pktio_t pktio_in;
+ odp_packet_t pkt;
+ odp_packet_t tx_pkt[1000];
+ uint32_t pkt_seq[1000];
+ odp_event_t ev;
+ int i, pkts, ret, alloc = 0;
+ odp_pktout_queue_t pktout;
+ uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+
+ CU_ASSERT_FATAL(odp_pktout_queue(pktio[0], &pktout, 1) == 1);
+
+ /* Interfaces are stopped by default,
+ * Check that stop when stopped generates an error */
+ ret = odp_pktio_stop(pktio[0]);
+ CU_ASSERT(ret < 0);
+
+ /* start first */
+ ret = odp_pktio_start(pktio[0]);
+ CU_ASSERT(ret == 0);
+ /* Check that start when started generates an error */
+ ret = odp_pktio_start(pktio[0]);
+ CU_ASSERT(ret < 0);
+
+ _pktio_wait_linkup(pktio[0]);
+
+ /* Test Rx on a stopped interface. Only works if there are 2 */
+ if (num_ifaces > 1) {
+ alloc = create_packets(tx_pkt, pkt_seq, 1000, pktio[0],
+ pktio[1]);
+
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_pktout_send(pktout, &tx_pkt[pkts],
+ alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to enqueue packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+ /* check that packets did not arrive */
+ for (i = 0, pkts = 0; i < 1000; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ if (pkts)
+ CU_FAIL("pktio stopped, received unexpected events");
+
+ /* start both, send and get packets */
+ /* 0 already started */
+ ret = odp_pktio_start(pktio[1]);
+ CU_ASSERT(ret == 0);
+
+ _pktio_wait_linkup(pktio[1]);
+
+ /* flush packets with magic number in pipes */
+ for (i = 0; i < 1000; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ }
+ }
+
+ if (num_ifaces > 1)
+ pktio_in = pktio[1];
+ else
+ pktio_in = pktio[0];
+
+ alloc = create_packets(tx_pkt, pkt_seq, 1000, pktio[0], pktio_in);
+
+ /* send */
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_pktout_send(pktout, &tx_pkt[pkts], alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to enqueue packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+
+ /* get */
+ for (i = 0, pkts = 0; i < 1000; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ }
+ CU_ASSERT(pkts == alloc);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+
+ /* Verify that a schedule call after stop and close does not generate
+ errors. */
+ ev = odp_schedule(NULL, wait);
+ CU_ASSERT(ev == ODP_EVENT_INVALID);
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+}
+
+static void pktio_test_recv_on_wonly(void)
+{
+ odp_pktio_t pktio;
+ int ret;
+ odp_pktin_queue_t pktin;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DISABLED,
+ ODP_PKTOUT_MODE_DIRECT);
+
+ if (pktio == ODP_PKTIO_INVALID) {
+ CU_FAIL("failed to open pktio");
+ return;
+ }
+
+ CU_ASSERT(odp_pktin_queue(pktio, &pktin, 1) == 0);
+
+ ret = odp_pktio_start(pktio);
+ CU_ASSERT_FATAL(ret == 0);
+
+ _pktio_wait_linkup(pktio);
+
+ ret = odp_pktio_stop(pktio);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_pktio_close(pktio);
+ CU_ASSERT_FATAL(ret == 0);
+}
+
+static void pktio_test_send_on_ronly(void)
+{
+ odp_pktio_t pktio;
+ int ret;
+ odp_pktout_queue_t pktout;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DISABLED);
+
+ if (pktio == ODP_PKTIO_INVALID) {
+ CU_FAIL("failed to open pktio");
+ return;
+ }
+
+ CU_ASSERT(odp_pktout_queue(pktio, &pktout, 1) == 0);
+
+ ret = odp_pktio_start(pktio);
+ CU_ASSERT_FATAL(ret == 0);
+
+ _pktio_wait_linkup(pktio);
+
+ ret = odp_pktio_stop(pktio);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_pktio_close(pktio);
+ CU_ASSERT_FATAL(ret == 0);
+}
+
+static int pktio_check_pktin_ts(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || !capa.config.pktin.bit.ts_all)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_pktin_ts(void)
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {0};
+ pktio_info_t pktio_rx_info;
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+ odp_pktout_queue_t pktout_queue;
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ uint64_t ns1, ns2;
+ uint64_t res, res_ns, input_delay;
+ odp_time_t ts_prev;
+ odp_time_t ts;
+ int num_rx = 0;
+ int ret;
+ int i;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktin.bit.ts_all);
+
+ odp_pktio_config_init(&config);
+ config.pktin.bit.ts_all = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio[i], &config) == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; i++)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+ pktio_rx_info.id = pktio_rx;
+ pktio_rx_info.inq = ODP_QUEUE_INVALID;
+ pktio_rx_info.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ /* Test odp_pktio_ts_res() and odp_pktio_ts_from_ns() */
+ res = odp_pktio_ts_res(pktio_tx);
+ CU_ASSERT(res > PKTIO_TS_MIN_RES);
+ CU_ASSERT(res < PKTIO_TS_MAX_RES);
+ ns1 = 100;
+ ts = odp_pktio_ts_from_ns(pktio_tx, ns1);
+ ns2 = odp_time_to_ns(ts);
+ CU_ASSERT_FATAL(res != 0);
+ res_ns = ODP_TIME_SEC_IN_NS / res;
+ if (ODP_TIME_SEC_IN_NS % res)
+ res_ns++;
+ /* Allow some arithmetic tolerance */
+ CU_ASSERT((ns2 <= (ns1 + res_ns)) && (ns2 >= (ns1 - res_ns)));
+
+ ret = create_packets(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
+ pktio_rx);
+ CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
+
+ ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
+ CU_ASSERT_FATAL(ret > 0);
+
+ /* Send packets one at a time and add delay between the packets */
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ CU_ASSERT_FATAL(odp_pktout_send(pktout_queue,
+ &pkt_tbl[i], 1) == 1);
+ ret = wait_for_packets(&pktio_rx_info, &pkt_tbl[i], &pkt_seq[i],
+ 1, TXRX_MODE_SINGLE, ODP_TIME_SEC_IN_NS, false);
+ if (ret != 1)
+ break;
+
+ /* Compare to packet IO time to input timestamp */
+ ts = odp_pktio_time(pktio_rx_info.id, NULL);
+ CU_ASSERT_FATAL(odp_packet_has_ts(pkt_tbl[i]));
+ ts_prev = odp_packet_ts(pkt_tbl[i]);
+ CU_ASSERT(odp_time_cmp(ts, ts_prev) >= 0);
+ input_delay = odp_time_diff_ns(ts, ts_prev);
+ if (input_delay > 100 * ODP_TIME_MSEC_IN_NS) {
+ printf(" Test packet %d input delay: %" PRIu64 "ns\n", i, input_delay);
+ CU_FAIL("Packet input delay too long");
+ }
+
+ odp_time_wait_ns(PKTIO_TS_INTERVAL);
+ }
+ num_rx = i;
+ CU_ASSERT(num_rx == TX_BATCH_LEN);
+
+ ts_prev = ODP_TIME_NULL;
+ for (i = 0; i < num_rx; i++) {
+ ts = odp_packet_ts(pkt_tbl[i]);
+
+ CU_ASSERT(odp_time_cmp(ts, ts_prev) > 0);
+
+ ts_prev = ts;
+ odp_packet_free(pkt_tbl[i]);
+ }
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static int pktio_check_pktout_ts(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || !capa.config.pktout.bit.ts_ena)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_pktout_ts(void)
+{
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {0};
+ odp_pktout_queue_t pktout_queue;
+ odp_pktio_t pktio_tx, pktio_rx;
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ pktio_info_t pktio_rx_info;
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+ odp_time_t ts_prev;
+ odp_time_t ts;
+ int num_rx = 0;
+ int ret;
+ int i;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktin.bit.ts_all);
+
+ odp_pktio_config_init(&config);
+ config.pktout.bit.ts_ena = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio[i], &config) == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; i++)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+ pktio_rx_info.id = pktio_rx;
+ pktio_rx_info.inq = ODP_QUEUE_INVALID;
+ pktio_rx_info.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ ret = create_packets(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
+ pktio_rx);
+ CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
+
+ ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
+ CU_ASSERT_FATAL(ret > 0);
+
+ /* Start with current pktio time */
+ ts_prev = odp_pktio_time(pktio_tx, NULL);
+
+ odp_time_wait_ns(PKTIO_TS_INTERVAL);
+
+ /* Send packets one at a time and add delay between the packets */
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ /* Enable ts capture on this pkt */
+ odp_packet_ts_request(pkt_tbl[i], 1);
+
+ CU_ASSERT_FATAL(odp_pktout_send(pktout_queue,
+ &pkt_tbl[i], 1) == 1);
+ ret = wait_for_packets(&pktio_rx_info, &pkt_tbl[i], &pkt_seq[i],
+ 1, TXRX_MODE_SINGLE, ODP_TIME_SEC_IN_NS,
+ false);
+ if (ret != 1)
+ break;
+
+ /* Since we got packet back, check for sent ts */
+ CU_ASSERT_FATAL(odp_pktout_ts_read(pktio_tx, &ts) == 0);
+
+ CU_ASSERT(odp_time_cmp(ts, ts_prev) > 0);
+ ts_prev = ts;
+
+ odp_time_wait_ns(PKTIO_TS_INTERVAL);
+ }
+ num_rx = i;
+ CU_ASSERT(num_rx == TX_BATCH_LEN);
+
+ for (i = 0; i < num_rx; i++)
+ odp_packet_free(pkt_tbl[i]);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static void pktio_test_pktout_compl_event(bool use_plain_queue)
+{
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {ODP_PKTIO_INVALID};
+ odp_queue_t compl_queue[TX_BATCH_LEN];
+ odp_schedule_capability_t sched_capa;
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ char queuename[ODP_QUEUE_NAME_LEN];
+ odp_pktio_capability_t pktio_capa;
+ odp_queue_capability_t queue_capa;
+ uint16_t seq_found[TX_BATCH_LEN];
+ odp_pktout_queue_t pktout_queue;
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_packet_tx_compl_t tx_compl;
+ odp_packet_tx_compl_opt_t opt;
+ pktio_info_t pktio_rx_info;
+ odp_pktio_config_t config;
+ odp_queue_param_t qparam;
+ int flag, ret, i, num_rx = 0;
+ odp_event_t ev;
+ uint64_t wait;
+
+ /* Create queues to receive PKTIO Tx completion events */
+ CU_ASSERT_FATAL(!odp_schedule_capability(&sched_capa));
+ CU_ASSERT_FATAL(!odp_queue_capability(&queue_capa));
+
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ sprintf(queuename, "TxComplQueue%u", i);
+ odp_queue_param_init(&qparam);
+
+ if (use_plain_queue) {
+ qparam.type = ODP_QUEUE_TYPE_PLAIN;
+ } else {
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.prio = odp_schedule_default_prio();
+ qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ qparam.sched.group = ODP_SCHED_GROUP_ALL;
+ }
+ compl_queue[i] = odp_queue_create(queuename, &qparam);
+ CU_ASSERT_FATAL(compl_queue[i] != ODP_QUEUE_INVALID);
+ }
+
+ memset(&pktout_queue, 0, sizeof(pktout_queue));
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &pktio_capa) == 0);
+
+ /* Configure Tx completion offload for PKTIO Tx */
+ if (i == 0) {
+ CU_ASSERT_FATAL(pktio_capa.tx_compl.mode_event == 1);
+ CU_ASSERT_FATAL(pktio_capa.tx_compl.mode_all ==
+ pktio_capa.tx_compl.mode_event);
+ if (use_plain_queue) {
+ /* CU_ASSERT needs these extra braces */
+ CU_ASSERT_FATAL(pktio_capa.tx_compl.queue_type_plain != 0);
+ } else {
+ CU_ASSERT_FATAL(pktio_capa.tx_compl.queue_type_sched != 0);
+ }
+
+ odp_pktio_config_init(&config);
+ config.tx_compl.mode_event = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio[i], &config) == 0);
+ }
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; i++)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+ pktio_rx_info.id = pktio_rx;
+ pktio_rx_info.inq = ODP_QUEUE_INVALID;
+ pktio_rx_info.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ ret = create_packets(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx, pktio_rx);
+ CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
+
+ ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
+ CU_ASSERT_FATAL(ret > 0);
+
+ memset(&opt, 0, sizeof(opt));
+
+ /* Disabled by default */
+ CU_ASSERT(odp_packet_has_tx_compl_request(pkt_tbl[0]) == 0);
+
+ /* Check that disable works. Also COMPL_ALL should be still supported. */
+ opt.queue = compl_queue[0];
+ opt.mode = ODP_PACKET_TX_COMPL_ALL;
+ odp_packet_tx_compl_request(pkt_tbl[0], &opt);
+ CU_ASSERT(odp_packet_has_tx_compl_request(pkt_tbl[0]) != 0);
+ opt.mode = ODP_PACKET_TX_COMPL_DISABLED;
+ odp_packet_tx_compl_request(pkt_tbl[0], &opt);
+ CU_ASSERT(odp_packet_has_tx_compl_request(pkt_tbl[0]) == 0);
+ opt.queue = compl_queue[0];
+ opt.mode = ODP_PACKET_TX_COMPL_EVENT;
+ odp_packet_tx_compl_request(pkt_tbl[0], &opt);
+ CU_ASSERT(odp_packet_has_tx_compl_request(pkt_tbl[0]) != 0);
+ opt.mode = ODP_PACKET_TX_COMPL_DISABLED;
+ odp_packet_tx_compl_request(pkt_tbl[0], &opt);
+ CU_ASSERT(odp_packet_has_tx_compl_request(pkt_tbl[0]) == 0);
+
+ /* Prepare batch of pkts with different tx completion queues */
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ CU_ASSERT(odp_packet_has_tx_compl_request(pkt_tbl[i]) == 0);
+ opt.queue = compl_queue[i];
+ opt.mode = ODP_PACKET_TX_COMPL_EVENT;
+ odp_packet_tx_compl_request(pkt_tbl[i], &opt);
+ CU_ASSERT(odp_packet_has_tx_compl_request(pkt_tbl[i]) != 0);
+ /* Set pkt sequence number as its user ptr */
+ odp_packet_user_ptr_set(pkt_tbl[i], (const void *)&pkt_seq[i]);
+ }
+
+ CU_ASSERT_FATAL(odp_pktout_send(pktout_queue, pkt_tbl, TX_BATCH_LEN) == TX_BATCH_LEN);
+
+ num_rx = wait_for_packets(&pktio_rx_info, pkt_tbl, pkt_seq, TX_BATCH_LEN, TXRX_MODE_SINGLE,
+ ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT(num_rx == TX_BATCH_LEN);
+ for (i = 0; i < num_rx; i++)
+ odp_packet_free(pkt_tbl[i]);
+
+ wait = odp_schedule_wait_time(ODP_TIME_SEC_IN_NS);
+ memset(seq_found, 0, sizeof(seq_found));
+
+ /* Receive Packet Tx completion events for all sent/dropped pkts */
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ if (use_plain_queue) {
+ ev = odp_queue_deq(compl_queue[i]);
+
+ /* Event validation */
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT_FATAL(odp_event_is_valid(ev) == 1);
+ CU_ASSERT_FATAL(odp_event_type(ev) == ODP_EVENT_PACKET_TX_COMPL);
+ CU_ASSERT_FATAL(odp_packet_tx_compl_from_event(ev) !=
+ ODP_PACKET_TX_COMPL_INVALID);
+
+ tx_compl = odp_packet_tx_compl_from_event(ev);
+ CU_ASSERT_FATAL(odp_packet_tx_compl_to_event(tx_compl) == ev);
+
+ /* User ptr should be same as packet's user ptr */
+ CU_ASSERT(odp_packet_tx_compl_user_ptr(tx_compl) ==
+ (const void *)&pkt_seq[i]);
+
+ /* No user area or source pool for TX completion events */
+ CU_ASSERT(odp_event_user_area(ev) == NULL);
+ CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == NULL);
+ CU_ASSERT(flag < 0);
+
+ CU_ASSERT(odp_event_pool(ev) == ODP_POOL_INVALID);
+
+ /* Alternatively call event free / compl free */
+ if (i % 2)
+ odp_packet_tx_compl_free(tx_compl);
+ else
+ odp_event_free(ev);
+ } else {
+ odp_queue_t rcv_queue;
+ int j;
+
+ ev = odp_schedule(&rcv_queue, wait);
+
+ /* Event validation */
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT_FATAL(odp_event_is_valid(ev) == 1);
+ CU_ASSERT_FATAL(odp_event_type(ev) == ODP_EVENT_PACKET_TX_COMPL);
+ CU_ASSERT_FATAL(odp_packet_tx_compl_from_event(ev) !=
+ ODP_PACKET_TX_COMPL_INVALID);
+
+ tx_compl = odp_packet_tx_compl_from_event(ev);
+ CU_ASSERT_FATAL(odp_packet_tx_compl_to_event(tx_compl) == ev);
+
+ /* User ptr should be same as packet's user ptr i.e seq array ptr */
+ for (j = 0; j < TX_BATCH_LEN; j++) {
+ if (!seq_found[j] &&
+ ((const void *)&pkt_seq[j] ==
+ odp_packet_tx_compl_user_ptr(tx_compl))) {
+ /* Mark that sequence number is found */
+ seq_found[j] = 1;
+
+ /* Receive queue validation */
+ CU_ASSERT(rcv_queue == compl_queue[j]);
+ break;
+ }
+ }
+
+ /* No user area or source pool for TX completion events */
+ CU_ASSERT(odp_event_user_area(ev) == NULL);
+ CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == NULL);
+ CU_ASSERT(flag < 0);
+
+ CU_ASSERT(odp_event_pool(ev) == ODP_POOL_INVALID);
+
+ /* Check that sequence number is found */
+ CU_ASSERT(j < TX_BATCH_LEN);
+
+ /* Alternatively call event free / compl free */
+ if (i % 2)
+ odp_packet_tx_compl_free(tx_compl);
+ else
+ odp_event_free(ev);
+ }
+ }
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+
+ odp_schedule_pause();
+
+ while (1) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ odp_event_free(ev);
+ }
+
+ odp_schedule_resume();
+
+ for (i = 0; i < TX_BATCH_LEN; i++)
+ odp_queue_destroy(compl_queue[i]);
+}
+
+static void pktio_test_pktout_compl_poll(void)
+{
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {ODP_PKTIO_INVALID};
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ odp_pktio_capability_t pktio_capa;
+ odp_pktout_queue_t pktout_queue;
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_packet_tx_compl_opt_t opt;
+ pktio_info_t pktio_rx_info;
+ odp_pktio_config_t config;
+ int ret, i, num_rx = 0;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &pktio_capa) == 0);
+
+ /* Configure Tx completion offload for PKTIO Tx */
+ if (i == 0) {
+ CU_ASSERT_FATAL(pktio_capa.tx_compl.mode_poll == 1);
+ CU_ASSERT_FATAL(pktio_capa.tx_compl.max_compl_id >= (TX_BATCH_LEN - 1));
+
+ odp_pktio_config_init(&config);
+ config.tx_compl.mode_poll = 1;
+ config.tx_compl.max_compl_id = TX_BATCH_LEN - 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio[i], &config) == 0);
+ }
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; i++)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+ pktio_rx_info.id = pktio_rx;
+ pktio_rx_info.inq = ODP_QUEUE_INVALID;
+ pktio_rx_info.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ /* Completion status is initially zero */
+ CU_ASSERT(odp_packet_tx_compl_done(pktio_tx, i) == 0);
+ }
+
+ ret = create_packets(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx, pktio_rx);
+ CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
+
+ ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
+ CU_ASSERT_FATAL(ret > 0);
+
+ memset(&opt, 0, sizeof(opt));
+
+ /* Disabled by default */
+ CU_ASSERT(odp_packet_has_tx_compl_request(pkt_tbl[0]) == 0);
+
+ /* Check that disable works */
+ opt.compl_id = 0;
+ opt.mode = ODP_PACKET_TX_COMPL_POLL;
+ odp_packet_tx_compl_request(pkt_tbl[0], &opt);
+ CU_ASSERT(odp_packet_has_tx_compl_request(pkt_tbl[0]) != 0);
+ opt.mode = ODP_PACKET_TX_COMPL_DISABLED;
+ odp_packet_tx_compl_request(pkt_tbl[0], &opt);
+ CU_ASSERT(odp_packet_has_tx_compl_request(pkt_tbl[0]) == 0);
+
+ /* Prepare batch of pkts with different tx completion identifiers */
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ CU_ASSERT(odp_packet_has_tx_compl_request(pkt_tbl[i]) == 0);
+ opt.compl_id = i;
+ opt.mode = ODP_PACKET_TX_COMPL_POLL;
+ odp_packet_tx_compl_request(pkt_tbl[i], &opt);
+ CU_ASSERT(odp_packet_has_tx_compl_request(pkt_tbl[i]) != 0);
+ /* Set pkt sequence number as its user ptr */
+ odp_packet_user_ptr_set(pkt_tbl[i], (const void *)&pkt_seq[i]);
+
+ /* Completion status should be still zero after odp_packet_tx_compl_request() */
+ CU_ASSERT(odp_packet_tx_compl_done(pktio_tx, i) == 0);
+ }
+
+ CU_ASSERT_FATAL(odp_pktout_send(pktout_queue, pkt_tbl, TX_BATCH_LEN) == TX_BATCH_LEN);
+
+ num_rx = wait_for_packets(&pktio_rx_info, pkt_tbl, pkt_seq, TX_BATCH_LEN, TXRX_MODE_SINGLE,
+ ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT(num_rx == TX_BATCH_LEN);
+ for (i = 0; i < num_rx; i++)
+ odp_packet_free(pkt_tbl[i]);
+
+ for (i = 0; i < num_rx; i++) {
+ /* Transmits should be complete since we received the packets already */
+ CU_ASSERT(odp_packet_tx_compl_done(pktio_tx, i) > 0);
+
+ /* Check that the previous call did not clear the status */
+ CU_ASSERT(odp_packet_tx_compl_done(pktio_tx, i) > 0);
+ }
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static int pktio_check_pktout_compl_event(bool plain)
+{
+ odp_pktio_param_t pktio_param;
+ odp_pktio_capability_t capa;
+ odp_pktio_t pktio;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || !capa.tx_compl.mode_event ||
+ (plain && !capa.tx_compl.queue_type_plain) ||
+ (!plain && !capa.tx_compl.queue_type_sched))
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int pktio_check_pktout_compl_poll(void)
+{
+ odp_pktio_param_t pktio_param;
+ odp_pktio_capability_t capa;
+ odp_pktio_t pktio;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || capa.tx_compl.mode_poll == 0 ||
+ capa.tx_compl.max_compl_id < (TX_BATCH_LEN - 1))
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int pktio_check_pktout_compl_event_plain_queue(void)
+{
+ return pktio_check_pktout_compl_event(true);
+}
+
+static int pktio_check_pktout_compl_event_sched_queue(void)
+{
+ return pktio_check_pktout_compl_event(false);
+}
+
+static void pktio_test_pktout_compl_event_plain_queue(void)
+{
+ pktio_test_pktout_compl_event(true);
+}
+
+static void pktio_test_pktout_compl_event_sched_queue(void)
+{
+ pktio_test_pktout_compl_event(false);
+}
+
+static void pktio_test_pktout_dont_free(void)
+{
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {ODP_PKTIO_INVALID};
+ odp_packet_t pkt, rx_pkt;
+ odp_pktio_capability_t pktio_capa;
+ odp_pktout_queue_t pktout_queue;
+ odp_pktio_t pktio_tx, pktio_rx;
+ pktio_info_t pktio_rx_info;
+ uint32_t pkt_seq;
+ int ret, i;
+ const int num_pkt = 1;
+ int transmits = 5;
+ int num_rx = 0;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ /* Check TX interface capa */
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &pktio_capa) == 0);
+ CU_ASSERT_FATAL(pktio_capa.free_ctrl.dont_free == 1);
+
+ for (i = 0; i < num_ifaces; i++)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_rx_info.id = pktio_rx;
+ pktio_rx_info.inq = ODP_QUEUE_INVALID;
+ pktio_rx_info.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ ret = create_packets(&pkt, &pkt_seq, num_pkt, pktio_tx, pktio_rx);
+ CU_ASSERT_FATAL(ret == num_pkt);
+
+ ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
+ CU_ASSERT_FATAL(ret > 0);
+
+ /* Set don't free flag */
+ CU_ASSERT(odp_packet_free_ctrl(pkt) == ODP_PACKET_FREE_CTRL_DISABLED);
+ odp_packet_free_ctrl_set(pkt, ODP_PACKET_FREE_CTRL_DONT_FREE);
+ CU_ASSERT_FATAL(odp_packet_free_ctrl(pkt) == ODP_PACKET_FREE_CTRL_DONT_FREE);
+
+ while (transmits--) {
+ /* Retransmit the same packet after it has been received from the RX interface */
+ CU_ASSERT_FATAL(odp_pktout_send(pktout_queue, &pkt, num_pkt) == num_pkt);
+
+ num_rx = wait_for_packets(&pktio_rx_info, &rx_pkt, &pkt_seq, num_pkt,
+ TXRX_MODE_SINGLE, ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT(num_rx == num_pkt);
+
+ if (num_rx != num_pkt)
+ break;
+
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(rx_pkt));
+ odp_packet_free(rx_pkt);
+ }
+
+ odp_packet_free(pkt);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static int pktio_check_pktout_dont_free(void)
+{
+ odp_pktio_param_t pktio_param;
+ odp_pktio_capability_t capa;
+ odp_pktio_t pktio;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret == 0 && capa.free_ctrl.dont_free == 1)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static void pktio_test_chksum(void (*config_fn)(odp_pktio_t, odp_pktio_t),
+ void (*prep_fn)(odp_packet_t pkt),
+ void (*test_fn)(odp_packet_t pkt))
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {ODP_PKTIO_INVALID};
+ pktio_info_t pktio_rx_info;
+ odp_pktout_queue_t pktout_queue;
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ int ret;
+ int i, num_rx;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+ pktio_rx_info.id = pktio_rx;
+ pktio_rx_info.inq = ODP_QUEUE_INVALID;
+ pktio_rx_info.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ config_fn(pktio_tx, pktio_rx);
+
+ for (i = 0; i < num_ifaces; ++i) {
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ _pktio_wait_linkup(pktio[i]);
+ }
+
+ ret = create_packets_udp(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
+ pktio_rx, false, ETH_UNICAST);
+ CU_ASSERT(ret == TX_BATCH_LEN);
+ if (ret != TX_BATCH_LEN) {
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+ return;
+ }
+
+ /* Provide L3 and L4 proto for pktout HW checksum generation */
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ odp_packet_has_ipv4_set(pkt_tbl[i], true);
+ odp_packet_has_udp_set(pkt_tbl[i], true);
+ }
+
+ ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
+ CU_ASSERT_FATAL(ret > 0);
+
+ for (i = 0; i < TX_BATCH_LEN; i++)
+ if (prep_fn)
+ prep_fn(pkt_tbl[i]);
+
+ send_packets(pktout_queue, pkt_tbl, TX_BATCH_LEN);
+ num_rx = wait_for_packets(&pktio_rx_info, pkt_tbl, pkt_seq,
+ TX_BATCH_LEN, TXRX_MODE_MULTI,
+ ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT(num_rx == TX_BATCH_LEN);
+ for (i = 0; i < num_rx; i++) {
+ test_fn(pkt_tbl[i]);
+ odp_packet_free(pkt_tbl[i]);
+ }
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static void pktio_test_chksum_sctp(void (*config_fn)(odp_pktio_t, odp_pktio_t),
+ void (*prep_fn)(odp_packet_t pkt),
+ void (*test_fn)(odp_packet_t pkt))
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {ODP_PKTIO_INVALID};
+ pktio_info_t pktio_rx_info;
+ odp_pktout_queue_t pktout_queue;
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ int ret;
+ int i, num_rx;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+ pktio_rx_info.id = pktio_rx;
+ pktio_rx_info.inq = ODP_QUEUE_INVALID;
+ pktio_rx_info.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ config_fn(pktio_tx, pktio_rx);
+
+ for (i = 0; i < num_ifaces; ++i) {
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ _pktio_wait_linkup(pktio[i]);
+ }
+
+ ret = create_packets_sctp(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
+ pktio_rx);
+ CU_ASSERT(ret == TX_BATCH_LEN);
+ if (ret != TX_BATCH_LEN) {
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+ return;
+ }
+
+ /* Provide L3 and L4 proto for pktout HW checksum generation */
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ odp_packet_has_ipv4_set(pkt_tbl[i], true);
+ odp_packet_has_sctp_set(pkt_tbl[i], true);
+ }
+
+ ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
+ CU_ASSERT_FATAL(ret > 0);
+
+ for (i = 0; i < TX_BATCH_LEN; i++)
+ if (prep_fn)
+ prep_fn(pkt_tbl[i]);
+
+ send_packets(pktout_queue, pkt_tbl, TX_BATCH_LEN);
+ num_rx = wait_for_packets_hdr(&pktio_rx_info, pkt_tbl, pkt_seq,
+ TX_BATCH_LEN, TXRX_MODE_MULTI,
+ ODP_TIME_SEC_IN_NS, ODPH_SCTPHDR_LEN, false);
+ CU_ASSERT(num_rx == TX_BATCH_LEN);
+ for (i = 0; i < num_rx; i++) {
+ test_fn(pkt_tbl[i]);
+ odp_packet_free(pkt_tbl[i]);
+ }
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static int pktio_check_chksum_in_ipv4(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int idx = (num_ifaces == 1) ? 0 : 1;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[idx], pool[idx], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 ||
+ !capa.config.pktin.bit.ipv4_chksum)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_chksum_in_ipv4_config(odp_pktio_t pktio_tx ODP_UNUSED,
+ odp_pktio_t pktio_rx)
+{
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktin.bit.ipv4_chksum);
+
+ odp_pktio_config_init(&config);
+ config.pktin.bit.ipv4_chksum = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio_rx, &config) == 0);
+}
+
+static void pktio_test_chksum_in_ipv4_prep(odp_packet_t pkt)
+{
+ odph_ipv4_csum_update(pkt);
+}
+
+static void pktio_test_chksum_in_ipv4_test(odp_packet_t pkt)
+{
+ CU_ASSERT(odp_packet_l3_chksum_status(pkt) == ODP_PACKET_CHKSUM_OK);
+}
+
+static void pktio_test_chksum_in_ipv4(void)
+{
+ pktio_test_chksum(pktio_test_chksum_in_ipv4_config,
+ pktio_test_chksum_in_ipv4_prep,
+ pktio_test_chksum_in_ipv4_test);
+}
+
+static int pktio_check_chksum_in_udp(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int idx = (num_ifaces == 1) ? 0 : 1;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[idx], pool[idx], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 ||
+ !capa.config.pktin.bit.udp_chksum)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_chksum_in_udp_config(odp_pktio_t pktio_tx ODP_UNUSED,
+ odp_pktio_t pktio_rx)
+{
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktin.bit.udp_chksum);
+
+ odp_pktio_config_init(&config);
+ config.pktin.bit.udp_chksum = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio_rx, &config) == 0);
+}
+
+static void pktio_test_chksum_in_udp_prep(odp_packet_t pkt)
+{
+ odp_packet_has_ipv4_set(pkt, 1);
+ odp_packet_has_udp_set(pkt, 1);
+ odph_ipv4_csum_update(pkt);
+ odph_udp_chksum_set(pkt);
+}
+
+static void pktio_test_chksum_in_udp_test(odp_packet_t pkt)
+{
+ CU_ASSERT(odp_packet_l4_chksum_status(pkt) == ODP_PACKET_CHKSUM_OK);
+}
+
+static void pktio_test_chksum_in_udp(void)
+{
+ pktio_test_chksum(pktio_test_chksum_in_udp_config,
+ pktio_test_chksum_in_udp_prep,
+ pktio_test_chksum_in_udp_test);
+}
+
+static int pktio_check_chksum_in_sctp(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int idx = (num_ifaces == 1) ? 0 : 1;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[idx], pool[idx], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 ||
+ !capa.config.pktin.bit.sctp_chksum)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_chksum_in_sctp_config(odp_pktio_t pktio_tx ODP_UNUSED,
+ odp_pktio_t pktio_rx)
+{
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktin.bit.sctp_chksum);
+
+ odp_pktio_config_init(&config);
+ config.pktin.bit.sctp_chksum = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio_rx, &config) == 0);
+}
+
+static void pktio_test_chksum_in_sctp_prep(odp_packet_t pkt)
+{
+ odp_packet_has_ipv4_set(pkt, 1);
+ odp_packet_has_sctp_set(pkt, 1);
+ odph_ipv4_csum_update(pkt);
+ odph_sctp_chksum_set(pkt);
+}
+
+static void pktio_test_chksum_in_sctp_test(odp_packet_t pkt)
+{
+ CU_ASSERT(odp_packet_l4_chksum_status(pkt) == ODP_PACKET_CHKSUM_OK);
+}
+
+static void pktio_test_chksum_in_sctp(void)
+{
+ pktio_test_chksum_sctp(pktio_test_chksum_in_sctp_config,
+ pktio_test_chksum_in_sctp_prep,
+ pktio_test_chksum_in_sctp_test);
+}
+
+static int pktio_check_chksum_out_ipv4(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 ||
+ !capa.config.pktout.bit.ipv4_chksum_ena ||
+ !capa.config.pktout.bit.ipv4_chksum)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_chksum_out_ipv4_config(odp_pktio_t pktio_tx,
+ odp_pktio_t pktio_rx ODP_UNUSED)
+{
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.ipv4_chksum_ena);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.ipv4_chksum);
+
+ odp_pktio_config_init(&config);
+ config.pktout.bit.ipv4_chksum_ena = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio_tx, &config) == 0);
+}
+
+static void pktio_test_chksum_out_ipv4_test(odp_packet_t pkt)
+{
+ odph_ipv4hdr_t *ip = odp_packet_l3_ptr(pkt, NULL);
+
+ CU_ASSERT(ip != NULL);
+ if (ip != NULL)
+ CU_ASSERT(ip->chksum != 0);
+}
+
+static void pktio_test_chksum_out_ipv4_no_ovr_prep(odp_packet_t pkt)
+{
+ odp_packet_l3_chksum_insert(pkt, false);
+}
+
+static void pktio_test_chksum_out_ipv4_no_ovr_test(odp_packet_t pkt)
+{
+ odph_ipv4hdr_t *ip = odp_packet_l3_ptr(pkt, NULL);
+
+ CU_ASSERT(ip != NULL);
+ if (ip != NULL)
+ CU_ASSERT(ip->chksum == 0);
+}
+
+static void pktio_test_chksum_out_ipv4_no_ovr(void)
+{
+ pktio_test_chksum(pktio_test_chksum_out_ipv4_config,
+ pktio_test_chksum_out_ipv4_no_ovr_prep,
+ pktio_test_chksum_out_ipv4_no_ovr_test);
+}
+
+static void pktio_test_chksum_out_ipv4_ovr_prep(odp_packet_t pkt)
+{
+ odp_packet_l3_chksum_insert(pkt, true);
+}
+
+static void pktio_test_chksum_out_ipv4_ovr_test(odp_packet_t pkt)
+{
+ odph_ipv4hdr_t *ip = odp_packet_l3_ptr(pkt, NULL);
+
+ CU_ASSERT(ip != NULL);
+ if (ip != NULL)
+ CU_ASSERT(ip->chksum != 0);
+}
+
+static void pktio_test_chksum_out_ipv4_ovr(void)
+{
+ pktio_test_chksum(pktio_test_chksum_out_ipv4_config,
+ pktio_test_chksum_out_ipv4_ovr_prep,
+ pktio_test_chksum_out_ipv4_ovr_test);
+}
+
+static void pktio_test_chksum_out_ipv4_pktio_config(odp_pktio_t pktio_tx,
+ odp_pktio_t pktio_rx
+ ODP_UNUSED)
+{
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.ipv4_chksum_ena);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.ipv4_chksum);
+
+ odp_pktio_config_init(&config);
+ config.pktout.bit.ipv4_chksum_ena = 1;
+ config.pktout.bit.ipv4_chksum = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio_tx, &config) == 0);
+}
+
+static void pktio_test_chksum_out_ipv4_pktio(void)
+{
+ pktio_test_chksum(pktio_test_chksum_out_ipv4_pktio_config,
+ NULL,
+ pktio_test_chksum_out_ipv4_test);
+}
+
+static int pktio_check_chksum_out_udp(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 ||
+ !capa.config.pktout.bit.udp_chksum_ena ||
+ !capa.config.pktout.bit.udp_chksum)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_chksum_out_udp_config(odp_pktio_t pktio_tx,
+ odp_pktio_t pktio_rx ODP_UNUSED)
+{
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.udp_chksum_ena);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.udp_chksum);
+
+ odp_pktio_config_init(&config);
+ config.pktout.bit.udp_chksum_ena = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio_tx, &config) == 0);
+}
+
+static void pktio_test_chksum_out_udp_test(odp_packet_t pkt)
+{
+ odph_udphdr_t *udp = odp_packet_l4_ptr(pkt, NULL);
+
+ CU_ASSERT(udp != NULL);
+ if (udp != NULL) {
+ CU_ASSERT(udp->chksum != 0);
+ CU_ASSERT(!odph_udp_chksum_verify(pkt));
+ }
+}
+
+static void pktio_test_chksum_out_udp_no_ovr_prep(odp_packet_t pkt)
+{
+ odph_ipv4_csum_update(pkt);
+ odp_packet_l4_chksum_insert(pkt, false);
+}
+
+static void pktio_test_chksum_out_udp_no_ovr_test(odp_packet_t pkt)
+{
+ odph_udphdr_t *udp = odp_packet_l4_ptr(pkt, NULL);
+
+ CU_ASSERT(udp != NULL);
+ if (udp != NULL)
+ CU_ASSERT(udp->chksum == 0);
+}
+
+static void pktio_test_chksum_out_udp_no_ovr(void)
+{
+ pktio_test_chksum(pktio_test_chksum_out_udp_config,
+ pktio_test_chksum_out_udp_no_ovr_prep,
+ pktio_test_chksum_out_udp_no_ovr_test);
+}
+
+static void pktio_test_chksum_out_udp_ovr_prep(odp_packet_t pkt)
+{
+ odp_packet_l4_chksum_insert(pkt, true);
+}
+
+static void pktio_test_chksum_out_udp_ovr_test(odp_packet_t pkt)
+{
+ odph_udphdr_t *udp = odp_packet_l4_ptr(pkt, NULL);
+
+ CU_ASSERT(udp != NULL);
+ if (udp != NULL) {
+ CU_ASSERT(udp->chksum != 0);
+ CU_ASSERT(!odph_udp_chksum_verify(pkt));
+ }
+}
+
+static void pktio_test_chksum_out_udp_ovr(void)
+{
+ pktio_test_chksum(pktio_test_chksum_out_udp_config,
+ pktio_test_chksum_out_udp_ovr_prep,
+ pktio_test_chksum_out_udp_ovr_test);
+}
+
+static void pktio_test_chksum_out_udp_pktio_config(odp_pktio_t pktio_tx,
+ odp_pktio_t pktio_rx
+ ODP_UNUSED)
+{
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.udp_chksum_ena);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.udp_chksum);
+
+ odp_pktio_config_init(&config);
+ config.pktout.bit.udp_chksum_ena = 1;
+ config.pktout.bit.udp_chksum = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio_tx, &config) == 0);
+}
+
+static void pktio_test_chksum_out_udp_pktio(void)
+{
+ pktio_test_chksum(pktio_test_chksum_out_udp_pktio_config,
+ NULL,
+ pktio_test_chksum_out_udp_test);
+}
+
+static int pktio_check_chksum_out_sctp(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 ||
+ !capa.config.pktout.bit.sctp_chksum_ena ||
+ !capa.config.pktout.bit.sctp_chksum)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_chksum_out_sctp_config(odp_pktio_t pktio_tx,
+ odp_pktio_t pktio_rx ODP_UNUSED)
+{
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.sctp_chksum_ena);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.sctp_chksum);
+
+ odp_pktio_config_init(&config);
+ config.pktout.bit.sctp_chksum_ena = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio_tx, &config) == 0);
+}
+
+static void pktio_test_chksum_out_sctp_test(odp_packet_t pkt)
+{
+ odph_sctphdr_t *sctp = odp_packet_l4_ptr(pkt, NULL);
+
+ CU_ASSERT(sctp != NULL);
+ if (sctp != NULL) {
+ CU_ASSERT(sctp->chksum != 0);
+ CU_ASSERT(!odph_sctp_chksum_verify(pkt));
+ }
+}
+
+static void pktio_test_chksum_out_sctp_no_ovr_prep(odp_packet_t pkt)
+{
+ odph_ipv4_csum_update(pkt);
+ odp_packet_l4_chksum_insert(pkt, false);
+}
+
+static void pktio_test_chksum_out_sctp_no_ovr_test(odp_packet_t pkt)
+{
+ odph_sctphdr_t *sctp = odp_packet_l4_ptr(pkt, NULL);
+
+ CU_ASSERT(sctp != NULL);
+ if (sctp != NULL)
+ CU_ASSERT(sctp->chksum == 0);
+}
+
+static void pktio_test_chksum_out_sctp_no_ovr(void)
+{
+ pktio_test_chksum_sctp(pktio_test_chksum_out_sctp_config,
+ pktio_test_chksum_out_sctp_no_ovr_prep,
+ pktio_test_chksum_out_sctp_no_ovr_test);
+}
+
+static void pktio_test_chksum_out_sctp_ovr_prep(odp_packet_t pkt)
+{
+ odp_packet_l4_chksum_insert(pkt, true);
+}
+
+static void pktio_test_chksum_out_sctp_ovr_test(odp_packet_t pkt)
+{
+ odph_sctphdr_t *sctp = odp_packet_l4_ptr(pkt, NULL);
+
+ CU_ASSERT(sctp != NULL);
+ if (sctp != NULL) {
+ CU_ASSERT(sctp->chksum != 0);
+ CU_ASSERT(!odph_sctp_chksum_verify(pkt));
+ }
+}
+
+static void pktio_test_chksum_out_sctp_ovr(void)
+{
+ pktio_test_chksum_sctp(pktio_test_chksum_out_sctp_config,
+ pktio_test_chksum_out_sctp_ovr_prep,
+ pktio_test_chksum_out_sctp_ovr_test);
+}
+
+static void pktio_test_chksum_out_sctp_pktio_config(odp_pktio_t pktio_tx,
+ odp_pktio_t pktio_rx
+ ODP_UNUSED)
+{
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.sctp_chksum_ena);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.sctp_chksum);
+
+ odp_pktio_config_init(&config);
+ config.pktout.bit.sctp_chksum_ena = 1;
+ config.pktout.bit.sctp_chksum = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio_tx, &config) == 0);
+}
+
+static void pktio_test_chksum_out_sctp_pktio(void)
+{
+ pktio_test_chksum_sctp(pktio_test_chksum_out_sctp_pktio_config,
+ NULL,
+ pktio_test_chksum_out_sctp_test);
+}
+
+static int create_pool(const char *iface, int num)
+{
+ char pool_name[ODP_POOL_NAME_LEN];
+ odp_pool_param_t params;
+ odp_pool_capability_t pool_capa;
+
+ if (odp_pool_capability(&pool_capa) != 0)
+ return -1;
+
+ odp_pool_param_init(&params);
+ set_pool_len(&params, &pool_capa);
+ /* Allocate enough buffers taking into consideration core starvation
+ * due to caching */
+ params.pkt.num = PKT_BUF_NUM + params.pkt.cache_size;
+ params.type = ODP_POOL_PACKET;
+
+ snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s_%d",
+ iface, pool_segmentation);
+
+ pool[num] = odp_pool_create(pool_name, &params);
+ if (ODP_POOL_INVALID == pool[num]) {
+ ODPH_ERR("failed to create pool: %s\n", pool_name);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int create_pktv_pool(const char *iface, int num)
+{
+ char pool_name[ODP_POOL_NAME_LEN];
+ odp_pool_capability_t pool_capa;
+ odp_pool_param_t params;
+
+ if (odp_pool_capability(&pool_capa) != 0)
+ return -1;
+
+ if (pool_capa.vector.max_num < PKT_BUF_NUM)
+ return -1;
+
+ odp_pool_param_init(&params);
+ set_pool_len(&params, &pool_capa);
+ params.type = ODP_POOL_VECTOR;
+ params.vector.num = PKT_BUF_NUM;
+ params.vector.max_size = pool_capa.vector.max_size;
+
+ snprintf(pool_name, sizeof(pool_name), "pktv_pool_%s_%d",
+ iface, pool_segmentation);
+
+ pktv_pool[num] = odp_pool_create(pool_name, &params);
+ if (ODP_POOL_INVALID == pktv_pool[num]) {
+ ODPH_ERR("failed to create pool: %s\n", pool_name);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int pktio_check_pktv(odp_pktin_mode_t in_mode)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = in_mode;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || !capa.vector.supported)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int pktio_check_pktv_queue(void)
+{
+ return pktio_check_pktv(ODP_PKTIN_MODE_QUEUE);
+}
+
+static int pktio_check_pktv_sched(void)
+{
+ return pktio_check_pktv(ODP_PKTIN_MODE_SCHED);
+}
+
+static void pktio_test_pktv_recv_plain(void)
+{
+ test_txrx(ODP_PKTIN_MODE_QUEUE, PKTV_TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT, 0, true);
+}
+
+static void pktio_test_pktv_recv_parallel(void)
+{
+ test_txrx(ODP_PKTIN_MODE_SCHED, PKTV_TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT,
+ ODP_SCHED_SYNC_PARALLEL, true);
+}
+
+static void pktio_test_pktv_recv_ordered(void)
+{
+ test_txrx(ODP_PKTIN_MODE_SCHED, PKTV_TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT,
+ ODP_SCHED_SYNC_ORDERED, true);
+}
+
+static void pktio_test_pktv_recv_atomic(void)
+{
+ test_txrx(ODP_PKTIN_MODE_SCHED, PKTV_TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT,
+ ODP_SCHED_SYNC_ATOMIC, true);
+}
+
+static void pktio_test_pktv_pktin_queue_config(odp_pktin_mode_t in_mode)
+{
+ odp_pktin_queue_param_t queue_param;
+ odp_pktio_capability_t capa;
+ odp_pktio_t pktio;
+ int num_queues;
+ int i;
+
+ pktio = create_pktio(0, in_mode, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = capa.max_input_queues;
+
+ odp_pktin_queue_param_init(&queue_param);
+ queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ queue_param.hash_proto.proto.ipv4_udp = 1;
+ queue_param.num_queues = num_queues;
+ queue_param.vector.enable = 1;
+ queue_param.vector.pool = default_pktv_pool;
+ queue_param.vector.max_size = capa.vector.min_size;
+ queue_param.vector.max_tmo_ns = capa.vector.min_tmo_ns;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ queue_param.vector.max_size = capa.vector.max_size;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ if (capa.vector.max_size != capa.vector.min_size) {
+ queue_param.vector.max_size = capa.vector.max_size - capa.vector.min_size;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) == 0);
+ }
+
+ queue_param.vector.max_size = capa.vector.min_size - 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) != 0);
+
+ queue_param.vector.max_size = capa.vector.max_size + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) != 0);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio = create_pktio(i, in_mode, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0);
+
+ if (!capa.vector.supported) {
+ printf("Vector mode is not supported. Test Skipped\n");
+ return;
+ }
+
+ queue_param.vector.enable = 1;
+ queue_param.vector.pool = pktv_pool[i];
+ queue_param.vector.max_size = capa.vector.min_size;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ queue_param.vector.max_size = capa.vector.max_size;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ if (capa.vector.max_size != capa.vector.min_size) {
+ queue_param.vector.max_size = capa.vector.max_size - capa.vector.min_size;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) == 0);
+ }
+
+ queue_param.vector.max_size = capa.vector.min_size - 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) != 0);
+
+ queue_param.vector.max_size = capa.vector.max_size + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) != 0);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+ }
+}
+
+static void pktio_test_pktv_pktin_queue_config_queue(void)
+{
+ pktio_test_pktv_pktin_queue_config(ODP_PKTIN_MODE_QUEUE);
+}
+
+static void pktio_test_pktv_pktin_queue_config_sched(void)
+{
+ pktio_test_pktv_pktin_queue_config(ODP_PKTIN_MODE_SCHED);
+}
+
+static void pktio_test_recv_maxlen_set(void)
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {0};
+ pktio_info_t pktio_rx_info;
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+ odp_pktout_queue_t pktout_queue;
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ uint32_t max_len = PKT_LEN_MAX;
+ int num_rx = 0;
+ int ret;
+ int i;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; i++) {
+ uint32_t maxlen_tmp;
+
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(!odp_pktio_capability(pktio[i], &capa));
+ CU_ASSERT_FATAL(capa.set_op.op.maxlen);
+
+ odp_pktio_config_init(&config);
+ CU_ASSERT_FATAL(!odp_pktio_config(pktio[i], &config));
+
+ maxlen_tmp = capa.maxlen.max_input;
+ if (maxlen_tmp == 0)
+ maxlen_tmp = odp_pktin_maxlen(pktio[i]);
+ if (maxlen_tmp < max_len)
+ max_len = maxlen_tmp;
+
+ maxlen_tmp = capa.maxlen.max_output;
+ if (maxlen_tmp == 0)
+ maxlen_tmp = odp_pktout_maxlen(pktio[i]);
+ if (maxlen_tmp < max_len)
+ max_len = maxlen_tmp;
+
+ CU_ASSERT_FATAL(!odp_pktio_maxlen_set(pktio[i], capa.maxlen.max_input,
+ capa.maxlen.max_output));
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; i++)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+ pktio_rx_info.id = pktio_rx;
+ pktio_rx_info.inq = ODP_QUEUE_INVALID;
+ pktio_rx_info.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ packet_len = max_len;
+ ret = create_packets(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
+ pktio_rx);
+ CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
+
+ ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
+ CU_ASSERT_FATAL(ret > 0);
+
+ /* Send packets one at a time and add delay between the packets */
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ CU_ASSERT_FATAL(odp_pktout_send(pktout_queue,
+ &pkt_tbl[i], 1) == 1);
+ ret = wait_for_packets(&pktio_rx_info, &pkt_tbl[i], &pkt_seq[i],
+ 1, TXRX_MODE_SINGLE, ODP_TIME_SEC_IN_NS, false);
+ if (ret != 1)
+ break;
+ }
+ num_rx = i;
+ CU_ASSERT(num_rx == TX_BATCH_LEN);
+
+ if (num_rx)
+ odp_packet_free_multi(pkt_tbl, num_rx);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(!odp_pktio_stop(pktio[i]));
+ CU_ASSERT_FATAL(!odp_pktio_close(pktio[i]));
+ }
+
+ /* Restore global variable */
+ packet_len = PKT_LEN_NORMAL;
+}
+
+static int pktio_check_pktout_aging_tmo(void)
+{
+ odp_pktio_param_t pktio_param;
+ odp_pktio_capability_t capa;
+ odp_pktio_t pktio;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || !capa.max_tx_aging_tmo_ns)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_pktout_aging_tmo(void)
+{
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {ODP_PKTIO_INVALID};
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ odp_pktio_capability_t pktio_capa;
+ odp_pktout_queue_t pktout_queue;
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ odp_pktio_t pktio_tx, pktio_rx;
+ pktio_info_t pktio_rx_info;
+ odp_pktio_config_t config;
+ int ret, i, num_rx = 0;
+ uint64_t tmo_0, tmo_1;
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &pktio_capa) == 0);
+
+ /* Configure Tx aging for PKTIO Tx */
+ if (i == 0) {
+ CU_ASSERT_FATAL(pktio_capa.max_tx_aging_tmo_ns > 0);
+
+ odp_pktio_config_init(&config);
+ config.pktout.bit.aging_ena = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio[i], &config) == 0);
+ }
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; i++)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+ pktio_rx_info.id = pktio_rx;
+ pktio_rx_info.inq = ODP_QUEUE_INVALID;
+ pktio_rx_info.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ ret = create_packets(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
+ pktio_rx);
+ CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
+
+ ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
+ CU_ASSERT_FATAL(ret > 0);
+
+ /* Prepare packets with aging */
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ /* Aging disabled by default */
+ CU_ASSERT(odp_packet_aging_tmo(pkt_tbl[i]) == 0);
+
+ /* Test tmo set relatively since we don't know about supported resolution */
+ odp_packet_aging_tmo_set(pkt_tbl[i], pktio_capa.max_tx_aging_tmo_ns - 1);
+ tmo_0 = odp_packet_aging_tmo(pkt_tbl[i]);
+
+ odp_packet_aging_tmo_set(pkt_tbl[i], pktio_capa.max_tx_aging_tmo_ns / 2);
+ tmo_1 = odp_packet_aging_tmo(pkt_tbl[i]);
+ CU_ASSERT(tmo_0 > tmo_1);
+
+ /* Set max before transmitting */
+ odp_packet_aging_tmo_set(pkt_tbl[i], pktio_capa.max_tx_aging_tmo_ns);
+ CU_ASSERT(odp_packet_aging_tmo(pkt_tbl[i]) != 0);
+ }
+
+ CU_ASSERT_FATAL(odp_pktout_send(pktout_queue, pkt_tbl, TX_BATCH_LEN) == TX_BATCH_LEN);
+
+ num_rx = wait_for_packets(&pktio_rx_info, pkt_tbl, pkt_seq, TX_BATCH_LEN, TXRX_MODE_SINGLE,
+ ODP_TIME_SEC_IN_NS, false);
+ CU_ASSERT(num_rx == TX_BATCH_LEN);
+ for (i = 0; i < num_rx; i++)
+ odp_packet_free(pkt_tbl[i]);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static void pktio_test_pktin_event_queue(odp_pktin_mode_t pktin_mode)
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_pktin_queue_param_t in_queue_param;
+ odp_pktout_queue_param_t out_queue_param;
+ odp_pktout_queue_t pktout_queue;
+ odp_queue_t queue, from;
+ odp_pool_t buf_pool;
+ odp_pool_param_t pool_param;
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ odp_packet_t pkt;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ int ret, i;
+ odp_time_t t1, t2;
+ int inactive = 0;
+ int num_pkt = 0;
+ int num_buf = 0;
+ int num_bad = 0;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {0};
+ uint64_t wait_time = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_BUFFER;
+ pool_param.buf.num = 2 * TX_BATCH_LEN;
+ pool_param.buf.size = 100;
+
+ buf_pool = odp_pool_create("buffer pool", &pool_param);
+ CU_ASSERT_FATAL(buf_pool != ODP_POOL_INVALID);
+
+ buf = odp_buffer_alloc(buf_pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ odp_pktin_queue_param_init(&in_queue_param);
+ in_queue_param.num_queues = 1;
+ in_queue_param.hash_enable = 0;
+ in_queue_param.classifier_enable = 0;
+
+ if (pktin_mode == ODP_PKTIN_MODE_SCHED) {
+ in_queue_param.queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ in_queue_param.queue_param.sched.prio = odp_schedule_default_prio();
+ in_queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ in_queue_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ }
+
+ odp_pktout_queue_param_init(&out_queue_param);
+ out_queue_param.num_queues = 1;
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, pktin_mode, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ ret = odp_pktin_queue_config(pktio[i], &in_queue_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_pktout_queue_config(pktio[i], &out_queue_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; ++i)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ if (num_ifaces > 1)
+ pktio_rx = pktio[1];
+ else
+ pktio_rx = pktio_tx;
+
+ CU_ASSERT_FATAL(odp_pktin_event_queue(pktio_rx, &queue, 1) == 1);
+ CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout_queue, 1) == 1);
+
+ /* Allocate and initialize test packets */
+ ret = create_packets(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx, pktio_rx);
+ if (ret != TX_BATCH_LEN) {
+ CU_FAIL("Failed to generate test packets");
+ return;
+ }
+
+ /* Send packets */
+ ret = odp_pktout_send(pktout_queue, pkt_tbl, TX_BATCH_LEN);
+ CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
+
+ /* Send buffer event */
+ ret = odp_queue_enq(queue, odp_buffer_to_event(buf));
+ CU_ASSERT_FATAL(ret == 0);
+
+ /* Receive events */
+ while (1) {
+ /* Break after a period of inactivity */
+ if (pktin_mode == ODP_PKTIN_MODE_SCHED) {
+ ev = odp_schedule(&from, wait_time);
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ CU_ASSERT(from == queue);
+ } else {
+ ev = odp_queue_deq(queue);
+
+ if (ev == ODP_EVENT_INVALID) {
+ if (inactive == 0) {
+ inactive = 1;
+ t1 = odp_time_local();
+ continue;
+ } else {
+ t2 = odp_time_local();
+ if (odp_time_diff_ns(t2, t1) > ODP_TIME_SEC_IN_NS)
+ break;
+
+ continue;
+ }
+ }
+
+ inactive = 0;
+ }
+
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ num_pkt++;
+
+ } else if (odp_event_type(ev) == ODP_EVENT_BUFFER) {
+ num_buf++;
+ } else {
+ CU_FAIL("Bad event type");
+ num_bad++;
+ }
+
+ odp_event_free(ev);
+ }
+
+ CU_ASSERT(num_pkt == TX_BATCH_LEN);
+ CU_ASSERT(num_buf == 1);
+ CU_ASSERT(num_bad == 0);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+
+ CU_ASSERT_FATAL(odp_pool_destroy(buf_pool) == 0);
+}
+
+static void pktio_test_pktin_event_sched(void)
+{
+ pktio_test_pktin_event_queue(ODP_PKTIN_MODE_SCHED);
+}
+
+static int pktio_check_pktin_event_sched(void)
+{
+ if (odp_cunit_ci_skip("pktio_test_pktin_event_sched"))
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int pktio_suite_init(void)
+{
+ int i;
+
+ odp_atomic_init_u32(&ip_seq, 0);
+
+ if (getenv("ODP_WAIT_FOR_NETWORK"))
+ wait_for_network = true;
+
+ iface_name[0] = getenv("ODP_PKTIO_IF0");
+ iface_name[1] = getenv("ODP_PKTIO_IF1");
+ num_ifaces = 1;
+
+ if (!iface_name[0]) {
+ printf("No interfaces specified, using default \"loop\".\n");
+ iface_name[0] = "loop";
+ } else if (!iface_name[1]) {
+ printf("Using loopback interface: %s\n", iface_name[0]);
+ } else {
+ num_ifaces = 2;
+ printf("Using paired interfaces: %s %s\n",
+ iface_name[0], iface_name[1]);
+ }
+
+ for (i = 0; i < num_ifaces; i++) {
+ if (create_pool(iface_name[i], i) != 0)
+ return -1;
+
+ if (create_pktv_pool(iface_name[i], i) != 0)
+ return -1;
+ }
+
+ if (default_pool_create() != 0) {
+ ODPH_ERR("failed to create default pool\n");
+ return -1;
+ }
+
+ if (default_pktv_pool_create() != 0) {
+ ODPH_ERR("failed to create default pktv pool\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int pktio_suite_init_unsegmented(void)
+{
+ pool_segmentation = PKT_POOL_UNSEGMENTED;
+ return pktio_suite_init();
+}
+
+static int pktio_suite_init_segmented(void)
+{
+ pool_segmentation = PKT_POOL_SEGMENTED;
+ return pktio_suite_init();
+}
+
+static int pktv_suite_init(void)
+{
+ pool_segmentation = PKT_POOL_UNSEGMENTED;
+ return pktio_suite_init();
+}
+
+static int pktio_suite_term(void)
+{
+ char pool_name[ODP_POOL_NAME_LEN];
+ odp_pool_t pool;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < num_ifaces; ++i) {
+ snprintf(pool_name, sizeof(pool_name),
+ "pkt_pool_%s_%d", iface_name[i], pool_segmentation);
+ pool = odp_pool_lookup(pool_name);
+ if (pool == ODP_POOL_INVALID)
+ continue;
+
+ if (odp_pool_destroy(pool) != 0) {
+ ODPH_ERR("failed to destroy pool %s\n", pool_name);
+ ret = -1;
+ }
+ }
+
+ for (i = 0; i < num_ifaces; ++i) {
+ snprintf(pool_name, sizeof(pool_name),
+ "pktv_pool_%s_%d", iface_name[i], pool_segmentation);
+ pool = odp_pool_lookup(pool_name);
+ if (pool == ODP_POOL_INVALID)
+ continue;
+
+ if (odp_pool_destroy(pool) != 0) {
+ ODPH_ERR("failed to destroy pool %s\n", pool_name);
+ ret = -1;
+ }
+ }
+
+ if (odp_pool_destroy(default_pkt_pool) != 0) {
+ ODPH_ERR("failed to destroy default pool\n");
+ ret = -1;
+ }
+ default_pkt_pool = ODP_POOL_INVALID;
+
+ if (odp_pool_destroy(default_pktv_pool) != 0) {
+ ODPH_ERR("failed to destroy default pktv pool\n");
+ ret = -1;
+ }
+ default_pktv_pool = ODP_POOL_INVALID;
+
+ if (odp_cunit_print_inactive())
+ ret = -1;
+
+ return ret;
+}
+
+static int pktv_suite_term(void)
+{
+ pool_segmentation = PKT_POOL_UNSEGMENTED;
+ return pktio_suite_term();
+}
+
+odp_testinfo_t pktio_suite_unsegmented[] = {
+ ODP_TEST_INFO(pktio_test_default_values),
+ ODP_TEST_INFO(pktio_test_open),
+ ODP_TEST_INFO(pktio_test_lookup),
+ ODP_TEST_INFO(pktio_test_index),
+ ODP_TEST_INFO(pktio_test_print),
+ ODP_TEST_INFO(pktio_test_pktio_config),
+ ODP_TEST_INFO(pktio_test_info),
+ ODP_TEST_INFO(pktio_test_link_info),
+ ODP_TEST_INFO(pktio_test_pktin_queue_config_direct),
+ ODP_TEST_INFO(pktio_test_pktin_queue_config_sched),
+ ODP_TEST_INFO(pktio_test_pktin_queue_config_multi_sched),
+ ODP_TEST_INFO(pktio_test_pktin_queue_config_queue),
+ ODP_TEST_INFO(pktio_test_pktout_queue_config),
+ ODP_TEST_INFO(pktio_test_plain_queue),
+ ODP_TEST_INFO(pktio_test_plain_multi),
+ ODP_TEST_INFO(pktio_test_sched_queue),
+ ODP_TEST_INFO(pktio_test_sched_multi),
+ ODP_TEST_INFO(pktio_test_recv),
+ ODP_TEST_INFO(pktio_test_recv_multi),
+ ODP_TEST_INFO(pktio_test_recv_queue),
+ ODP_TEST_INFO(pktio_test_recv_tmo),
+ ODP_TEST_INFO(pktio_test_recv_mq_tmo),
+ ODP_TEST_INFO(pktio_test_recv_mtu),
+ ODP_TEST_INFO(pktio_test_maxlen),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_maxlen_set,
+ pktio_check_maxlen_set),
+ ODP_TEST_INFO(pktio_test_promisc),
+ ODP_TEST_INFO(pktio_test_mac),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_start_stop,
+ pktio_check_start_stop),
+ ODP_TEST_INFO(pktio_test_recv_on_wonly),
+ ODP_TEST_INFO(pktio_test_send_on_ronly),
+ ODP_TEST_INFO(pktio_test_plain_multi_event),
+ ODP_TEST_INFO(pktio_test_sched_multi_event),
+ ODP_TEST_INFO(pktio_test_recv_multi_event),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktin_event_sched,
+ pktio_check_pktin_event_sched),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_statistics_counters,
+ pktio_check_statistics_counters),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_statistics_counters_bcast,
+ pktio_check_statistics_counters_bcast),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_queue_statistics_counters,
+ pktio_check_queue_statistics_counters),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_event_queue_statistics_counters,
+ pktio_check_event_queue_statistics_counters),
+ ODP_TEST_INFO(pktio_test_extra_stats),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_proto_statistics_counters,
+ pktio_check_proto_statistics_counters),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktin_ts,
+ pktio_check_pktin_ts),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktout_ts,
+ pktio_check_pktout_ts),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_in_ipv4,
+ pktio_check_chksum_in_ipv4),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_in_udp,
+ pktio_check_chksum_in_udp),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_in_sctp,
+ pktio_check_chksum_in_sctp),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_ipv4_no_ovr,
+ pktio_check_chksum_out_ipv4),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_ipv4_pktio,
+ pktio_check_chksum_out_ipv4),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_ipv4_ovr,
+ pktio_check_chksum_out_ipv4),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_udp_no_ovr,
+ pktio_check_chksum_out_udp),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_udp_pktio,
+ pktio_check_chksum_out_udp),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_udp_ovr,
+ pktio_check_chksum_out_udp),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_sctp_no_ovr,
+ pktio_check_chksum_out_sctp),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_sctp_pktio,
+ pktio_check_chksum_out_sctp),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_sctp_ovr,
+ pktio_check_chksum_out_sctp),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_recv_maxlen_set,
+ pktio_check_maxlen_set),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktout_aging_tmo,
+ pktio_check_pktout_aging_tmo),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktout_compl_event_plain_queue,
+ pktio_check_pktout_compl_event_plain_queue),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktout_compl_event_sched_queue,
+ pktio_check_pktout_compl_event_sched_queue),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktout_compl_poll, pktio_check_pktout_compl_poll),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktout_dont_free, pktio_check_pktout_dont_free),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pause_rx, pktio_check_pause_rx),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pause_tx, pktio_check_pause_tx),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pause_both, pktio_check_pause_both),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pfc_rx, pktio_check_pfc_rx),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pfc_tx, pktio_check_pfc_tx),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_enable_pfc_both, pktio_check_pfc_both),
+ ODP_TEST_INFO_NULL
+};
+
+odp_testinfo_t pktio_suite_segmented[] = {
+ ODP_TEST_INFO(pktio_test_plain_queue),
+ ODP_TEST_INFO(pktio_test_plain_multi),
+ ODP_TEST_INFO(pktio_test_sched_queue),
+ ODP_TEST_INFO(pktio_test_sched_multi),
+ ODP_TEST_INFO(pktio_test_recv),
+ ODP_TEST_INFO(pktio_test_recv_multi),
+ ODP_TEST_INFO(pktio_test_recv_mtu),
+ ODP_TEST_INFO_NULL
+};
+
+odp_testinfo_t pktv_suite[] = {
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktv_pktin_queue_config_queue, pktio_check_pktv_queue),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktv_pktin_queue_config_sched, pktio_check_pktv_sched),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktv_recv_plain, pktio_check_pktv_queue),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktv_recv_parallel, pktio_check_pktv_sched),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktv_recv_ordered, pktio_check_pktv_sched),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktv_recv_atomic, pktio_check_pktv_sched),
+ ODP_TEST_INFO_NULL
+};
+
+odp_suiteinfo_t pktio_suites[] = {
+ {"Packet I/O Unsegmented", pktio_suite_init_unsegmented,
+ pktio_suite_term, pktio_suite_unsegmented},
+ {"Packet I/O Segmented", pktio_suite_init_segmented,
+ pktio_suite_term, pktio_suite_segmented},
+ {"Packet parser", parser_suite_init, parser_suite_term, parser_suite},
+ {"Packet vector", pktv_suite_init, pktv_suite_term, pktv_suite},
+ {"Large Segment Offload", lso_suite_init, lso_suite_term, lso_suite},
+ ODP_SUITE_INFO_NULL
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(pktio_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/pool/.gitignore b/test/validation/api/pool/.gitignore
index fc91b28d6..fc91b28d6 100644
--- a/test/common_plat/validation/api/pool/.gitignore
+++ b/test/validation/api/pool/.gitignore
diff --git a/test/validation/api/pool/Makefile.am b/test/validation/api/pool/Makefile.am
new file mode 100644
index 000000000..1b0d5934c
--- /dev/null
+++ b/test/validation/api/pool/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = pool_main
+pool_main_SOURCES = pool.c
diff --git a/test/validation/api/pool/pool.c b/test/validation/api/pool/pool.c
new file mode 100644
index 000000000..86a47230a
--- /dev/null
+++ b/test/validation/api/pool/pool.c
@@ -0,0 +1,2386 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2020, Marvell
+ * Copyright (c) 2020-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include "odp_cunit_common.h"
+#include "test_common_macros.h"
+#include <odp/helper/odph_api.h>
+
+#define MAX_WORKERS 32
+
+#define BUF_SIZE 1500
+#define BUF_NUM 1000
+#define TMO_NUM 1000
+#define VEC_NUM 1000
+#define VEC_LEN 32
+#define PKT_LEN 400
+#define PKT_NUM 500
+#define ELEM_NUM 10u
+#define ELEM_SIZE 128u
+#define CACHE_SIZE 32
+#define MAX_NUM_DEFAULT (10 * 1024 * 1024)
+#define UAREA 0xaa
+
+#define EXT_NUM_BUF 10
+#define EXT_BUF_SIZE 2048
+#define EXT_BUF_ALIGN 64
+#define EXT_APP_HDR_SIZE 128
+#define EXT_UAREA_SIZE 32
+#define EXT_HEADROOM 16
+#define MAGIC_U8 0x7a
+
+typedef struct {
+ odp_barrier_t init_barrier;
+ odp_atomic_u32_t index;
+ uint32_t nb_threads;
+ odp_pool_t pool;
+} global_shared_mem_t;
+
+typedef struct {
+ uint32_t count;
+ uint8_t mark[ELEM_NUM];
+} uarea_init_t;
+
+static global_shared_mem_t *global_mem;
+
+static odp_pool_capability_t global_pool_capa;
+static odp_pool_param_t default_pool_param;
+static odp_pool_ext_capability_t global_pool_ext_capa;
+
+static void test_param_init(uint8_t fill)
+{
+ odp_pool_param_t param;
+
+ memset(&param, fill, sizeof(param));
+ odp_pool_param_init(&param);
+
+ CU_ASSERT(param.uarea_init.init_fn == NULL);
+ CU_ASSERT(param.uarea_init.args == NULL);
+
+ CU_ASSERT(param.buf.uarea_size == 0);
+ CU_ASSERT(param.buf.cache_size >= global_pool_capa.buf.min_cache_size &&
+ param.buf.cache_size <= global_pool_capa.buf.max_cache_size);
+
+ CU_ASSERT(param.pkt.max_num == 0);
+ CU_ASSERT(param.pkt.num_subparam == 0);
+ CU_ASSERT(param.pkt.uarea_size == 0);
+ CU_ASSERT(param.pkt.cache_size >= global_pool_capa.pkt.min_cache_size &&
+ param.pkt.cache_size <= global_pool_capa.pkt.max_cache_size);
+
+ CU_ASSERT(param.tmo.uarea_size == 0);
+ CU_ASSERT(param.tmo.cache_size >= global_pool_capa.tmo.min_cache_size &&
+ param.tmo.cache_size <= global_pool_capa.tmo.max_cache_size);
+
+ CU_ASSERT(param.vector.uarea_size == 0);
+ CU_ASSERT(param.vector.cache_size >= global_pool_capa.vector.min_cache_size &&
+ param.vector.cache_size <= global_pool_capa.vector.max_cache_size);
+}
+
+static void pool_test_param_init(void)
+{
+ test_param_init(0);
+ test_param_init(0xff);
+}
+
+static void pool_create_destroy(odp_pool_param_t *param)
+{
+ odp_pool_t pool;
+
+ pool = odp_pool_create(NULL, param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ CU_ASSERT(odp_pool_to_u64(pool) !=
+ odp_pool_to_u64(ODP_POOL_INVALID));
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_create_destroy_buffer(void)
+{
+ odp_pool_param_t param;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_BUFFER;
+ param.buf.size = BUF_SIZE;
+ param.buf.num = BUF_NUM;
+
+ pool_create_destroy(&param);
+}
+
+static void pool_test_create_destroy_packet(void)
+{
+ odp_pool_param_t param;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = PKT_LEN;
+ param.pkt.num = PKT_NUM;
+
+ pool_create_destroy(&param);
+}
+
+static void pool_test_create_destroy_timeout(void)
+{
+ odp_pool_param_t param;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_TIMEOUT;
+ param.tmo.num = TMO_NUM;
+
+ pool_create_destroy(&param);
+}
+
+static void pool_test_create_destroy_vector(void)
+{
+ odp_pool_param_t param;
+ odp_pool_capability_t capa;
+ uint32_t max_num = VEC_NUM;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ CU_ASSERT_FATAL(capa.vector.max_pools > 0);
+
+ if (capa.vector.max_num && capa.vector.max_num < max_num)
+ max_num = capa.vector.max_num;
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_VECTOR;
+ param.vector.num = max_num;
+ param.vector.max_size = capa.vector.max_size < VEC_LEN ? capa.vector.max_size : VEC_LEN;
+
+ pool_create_destroy(&param);
+}
+
+static int pool_check_buffer_uarea_init(void)
+{
+ if (global_pool_capa.buf.max_uarea_size == 0 || !global_pool_capa.buf.uarea_persistence)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int pool_check_packet_uarea_init(void)
+{
+ if (global_pool_capa.pkt.max_uarea_size == 0 || !global_pool_capa.pkt.uarea_persistence)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int pool_check_vector_uarea_init(void)
+{
+ if (global_pool_capa.vector.max_uarea_size == 0 ||
+ !global_pool_capa.vector.uarea_persistence)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int pool_check_timeout_uarea_init(void)
+{
+ if (global_pool_capa.tmo.max_uarea_size == 0 || !global_pool_capa.tmo.uarea_persistence)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void init_event_uarea(void *uarea, uint32_t size, void *args, uint32_t index)
+{
+ uarea_init_t *data = args;
+
+ data->count++;
+ data->mark[index] = 1;
+ memset(uarea, UAREA, size);
+}
+
+static void pool_test_buffer_uarea_init(void)
+{
+ odp_pool_param_t param;
+ uint32_t num = ODPH_MIN(global_pool_capa.buf.max_num, ELEM_NUM),
+ size = ODPH_MIN(global_pool_capa.buf.max_size, ELEM_SIZE), i;
+ odp_pool_t pool;
+ uarea_init_t data;
+ odp_buffer_t bufs[num];
+ uint8_t *uarea;
+
+ memset(&data, 0, sizeof(uarea_init_t));
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_BUFFER;
+ param.uarea_init.init_fn = init_event_uarea;
+ param.uarea_init.args = &data;
+ param.buf.num = num;
+ param.buf.size = size;
+ param.buf.uarea_size = 1;
+ pool = odp_pool_create(NULL, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ CU_ASSERT(data.count == num);
+
+ for (i = 0; i < num; i++) {
+ CU_ASSERT(data.mark[i] == 1);
+
+ bufs[i] = odp_buffer_alloc(pool);
+
+ CU_ASSERT(bufs[i] != ODP_BUFFER_INVALID);
+
+ if (bufs[i] == ODP_BUFFER_INVALID)
+ break;
+
+ uarea = odp_buffer_user_area(bufs[i]);
+
+ CU_ASSERT(*uarea == UAREA);
+ }
+
+ odp_buffer_free_multi(bufs, i);
+ odp_pool_destroy(pool);
+}
+
+static void pool_test_packet_uarea_init(void)
+{
+ odp_pool_param_t param;
+ uint32_t num = ODPH_MIN(global_pool_capa.pkt.max_num, ELEM_NUM),
+ size = ODPH_MIN(global_pool_capa.pkt.max_len, ELEM_SIZE), i;
+ odp_pool_t pool;
+ uarea_init_t data;
+ odp_packet_t pkts[num];
+ uint8_t *uarea;
+
+ memset(&data, 0, sizeof(uarea_init_t));
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_PACKET;
+ param.uarea_init.init_fn = init_event_uarea;
+ param.uarea_init.args = &data;
+ param.pkt.num = num;
+ param.pkt.len = size;
+ param.pkt.uarea_size = 1;
+ pool = odp_pool_create(NULL, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ CU_ASSERT(data.count == num);
+
+ for (i = 0; i < num; i++) {
+ CU_ASSERT(data.mark[i] == 1);
+
+ pkts[i] = odp_packet_alloc(pool, ELEM_SIZE);
+
+ CU_ASSERT(pkts[i] != ODP_PACKET_INVALID);
+
+ if (pkts[i] == ODP_PACKET_INVALID)
+ break;
+
+ uarea = odp_packet_user_area(pkts[i]);
+
+ CU_ASSERT(*uarea == UAREA);
+ }
+
+ odp_packet_free_multi(pkts, i);
+ odp_pool_destroy(pool);
+}
+
+static void pool_test_vector_uarea_init(void)
+{
+ odp_pool_param_t param;
+ uint32_t num = ODPH_MIN(global_pool_capa.vector.max_num, ELEM_NUM),
+ size = ODPH_MIN(global_pool_capa.vector.max_size, ELEM_NUM), i;
+ odp_pool_t pool;
+ uarea_init_t data;
+ odp_packet_vector_t vecs[num];
+ uint8_t *uarea;
+
+ memset(&data, 0, sizeof(uarea_init_t));
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_VECTOR;
+ param.uarea_init.init_fn = init_event_uarea;
+ param.uarea_init.args = &data;
+ param.vector.num = num;
+ param.vector.max_size = size;
+ param.vector.uarea_size = 1;
+ pool = odp_pool_create(NULL, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ CU_ASSERT(data.count == num);
+
+ for (i = 0; i < num; i++) {
+ CU_ASSERT(data.mark[i] == 1);
+
+ vecs[i] = odp_packet_vector_alloc(pool);
+
+ CU_ASSERT(vecs[i] != ODP_PACKET_VECTOR_INVALID);
+
+ if (vecs[i] == ODP_PACKET_VECTOR_INVALID)
+ break;
+
+ uarea = odp_packet_vector_user_area(vecs[i]);
+
+ CU_ASSERT(*uarea == UAREA);
+ }
+
+ for (uint32_t j = 0; j < i; j++)
+ odp_packet_vector_free(vecs[j]);
+
+ odp_pool_destroy(pool);
+}
+
+static void pool_test_timeout_uarea_init(void)
+{
+ odp_pool_param_t param;
+ uint32_t num = ODPH_MIN(global_pool_capa.tmo.max_num, ELEM_NUM), i;
+ odp_pool_t pool;
+ uarea_init_t data;
+ odp_timeout_t tmos[num];
+ uint8_t *uarea;
+
+ memset(&data, 0, sizeof(uarea_init_t));
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_TIMEOUT;
+ param.uarea_init.init_fn = init_event_uarea;
+ param.uarea_init.args = &data;
+ param.tmo.num = num;
+ param.tmo.uarea_size = 1;
+ pool = odp_pool_create(NULL, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ CU_ASSERT(data.count == num);
+
+ for (i = 0; i < num; i++) {
+ CU_ASSERT(data.mark[i] == 1);
+
+ tmos[i] = odp_timeout_alloc(pool);
+
+ CU_ASSERT(tmos[i] != ODP_TIMEOUT_INVALID);
+
+ if (tmos[i] == ODP_TIMEOUT_INVALID)
+ break;
+
+ uarea = odp_timeout_user_area(tmos[i]);
+
+ CU_ASSERT(*uarea == UAREA);
+ }
+
+ for (uint32_t j = 0; j < i; j++)
+ odp_timeout_free(tmos[j]);
+
+ odp_pool_destroy(pool);
+}
+
+static void pool_test_lookup_info_print(void)
+{
+ odp_pool_t pool;
+ const char pool_name[] = "pool_for_lookup_test";
+ odp_pool_info_t info;
+ odp_pool_param_t param;
+
+ memset(&info, 0, sizeof(info));
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_BUFFER;
+ param.buf.size = BUF_SIZE;
+ param.buf.num = BUF_NUM;
+
+ pool = odp_pool_create(pool_name, &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ pool = odp_pool_lookup(pool_name);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ CU_ASSERT_FATAL(odp_pool_info(pool, &info) == 0);
+ CU_ASSERT(strncmp(pool_name, info.name, sizeof(pool_name)) == 0);
+ CU_ASSERT(param.buf.size <= info.params.buf.size);
+ CU_ASSERT(param.buf.align <= info.params.buf.align);
+ CU_ASSERT(param.buf.num <= info.params.buf.num);
+ CU_ASSERT(param.type == info.params.type);
+
+ odp_pool_print(pool);
+ odp_pool_print_all();
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_same_name(const odp_pool_param_t *param)
+{
+ odp_pool_t pool, pool_a, pool_b;
+ const char *name = "same_name";
+
+ pool_a = odp_pool_create(name, param);
+ CU_ASSERT_FATAL(pool_a != ODP_POOL_INVALID);
+
+ pool = odp_pool_lookup(name);
+ CU_ASSERT(pool == pool_a);
+
+ /* Second pool with the same name */
+ pool_b = odp_pool_create(name, param);
+ CU_ASSERT_FATAL(pool_b != ODP_POOL_INVALID);
+
+ pool = odp_pool_lookup(name);
+ CU_ASSERT(pool == pool_a || pool == pool_b);
+
+ CU_ASSERT(odp_pool_destroy(pool_a) == 0);
+ CU_ASSERT(odp_pool_destroy(pool_b) == 0);
+}
+
+static void pool_test_same_name_buf(void)
+{
+ odp_pool_param_t param;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_BUFFER;
+ param.buf.size = BUF_SIZE;
+ param.buf.num = BUF_NUM;
+
+ pool_test_same_name(&param);
+}
+
+static void pool_test_same_name_pkt(void)
+{
+ odp_pool_param_t param;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = PKT_LEN;
+ param.pkt.num = PKT_NUM;
+
+ pool_test_same_name(&param);
+}
+
+static void pool_test_same_name_tmo(void)
+{
+ odp_pool_param_t param;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_TIMEOUT;
+ param.tmo.num = TMO_NUM;
+
+ pool_test_same_name(&param);
+}
+
+static void pool_test_same_name_vec(void)
+{
+ odp_pool_param_t param;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_VECTOR;
+ param.vector.num = 10;
+ param.vector.max_size = 2;
+
+ pool_test_same_name(&param);
+}
+
+static void alloc_buffer(uint32_t cache_size)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ uint32_t i, num;
+ odp_buffer_t buf[BUF_NUM];
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_BUFFER;
+ param.buf.num = BUF_NUM;
+ param.buf.size = BUF_SIZE;
+ param.pkt.cache_size = cache_size;
+
+ pool = odp_pool_create(NULL, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ num = 0;
+
+ for (i = 0; i < PKT_NUM; i++) {
+ buf[num] = odp_buffer_alloc(pool);
+ CU_ASSERT(buf[num] != ODP_BUFFER_INVALID);
+
+ if (buf[num] != ODP_BUFFER_INVALID)
+ num++;
+ }
+
+ for (i = 0; i < num; i++)
+ odp_buffer_free(buf[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_alloc_buffer(void)
+{
+ alloc_buffer(default_pool_param.buf.cache_size);
+}
+
+static void pool_test_alloc_buffer_min_cache(void)
+{
+ alloc_buffer(global_pool_capa.buf.min_cache_size);
+}
+
+static void pool_test_alloc_buffer_max_cache(void)
+{
+ alloc_buffer(global_pool_capa.buf.max_cache_size);
+}
+
+static void alloc_packet_vector(uint32_t cache_size)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_pool_capability_t capa;
+ uint32_t i, num;
+ odp_packet_vector_t pkt_vec[VEC_NUM];
+ uint32_t max_num = VEC_NUM;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ if (capa.vector.max_num && capa.vector.max_num < max_num)
+ max_num = capa.vector.max_num;
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_VECTOR;
+ param.vector.num = max_num;
+ param.vector.max_size = capa.vector.max_size < VEC_LEN ? capa.vector.max_size : VEC_LEN;
+ param.vector.cache_size = cache_size;
+
+ pool = odp_pool_create(NULL, &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ num = 0;
+ for (i = 0; i < max_num; i++) {
+ odp_packet_vector_t pktv = odp_packet_vector_alloc(pool);
+
+ CU_ASSERT(pktv != ODP_PACKET_VECTOR_INVALID);
+
+ if (pktv == ODP_PACKET_VECTOR_INVALID)
+ continue;
+
+ CU_ASSERT(odp_packet_vector_valid(pktv) == 1);
+ CU_ASSERT(odp_event_is_valid(odp_packet_vector_to_event(pktv)) == 1);
+ CU_ASSERT(odp_packet_vector_size(pktv) == 0);
+
+ pkt_vec[num] = pktv;
+ num++;
+ }
+
+ for (i = 0; i < num; i++)
+ odp_packet_vector_free(pkt_vec[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_alloc_packet_vector(void)
+{
+ alloc_packet_vector(default_pool_param.vector.cache_size);
+}
+
+static void pool_test_alloc_packet_vector_min_cache(void)
+{
+ alloc_packet_vector(global_pool_capa.vector.min_cache_size);
+}
+
+static void pool_test_alloc_packet_vector_max_cache(void)
+{
+ alloc_packet_vector(global_pool_capa.vector.max_cache_size);
+}
+
+static void alloc_packet(uint32_t cache_size)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ uint32_t i, num;
+ odp_packet_t pkt[PKT_NUM];
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.num = PKT_NUM;
+ param.pkt.len = PKT_LEN;
+ param.pkt.cache_size = cache_size;
+
+ pool = odp_pool_create(NULL, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ num = 0;
+
+ for (i = 0; i < PKT_NUM; i++) {
+ pkt[num] = odp_packet_alloc(pool, PKT_LEN);
+ CU_ASSERT(pkt[num] != ODP_PACKET_INVALID);
+
+ if (pkt[num] != ODP_PACKET_INVALID)
+ num++;
+ }
+
+ for (i = 0; i < num; i++)
+ odp_packet_free(pkt[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_alloc_packet(void)
+{
+ alloc_packet(default_pool_param.pkt.cache_size);
+}
+
+static void pool_test_alloc_packet_min_cache(void)
+{
+ alloc_packet(global_pool_capa.pkt.min_cache_size);
+}
+
+static void pool_test_alloc_packet_max_cache(void)
+{
+ alloc_packet(global_pool_capa.pkt.max_cache_size);
+}
+
+static void pool_test_alloc_packet_subparam(void)
+{
+ odp_pool_t pool;
+ odp_pool_capability_t capa;
+ odp_pool_param_t param;
+ uint32_t i, j, num, num_sub;
+ odp_packet_t pkt[PKT_NUM];
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+ num_sub = capa.pkt.max_num_subparam;
+
+ CU_ASSERT_FATAL(num_sub <= ODP_POOL_MAX_SUBPARAMS);
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.num = PKT_NUM;
+ param.pkt.len = PKT_LEN;
+ param.pkt.num_subparam = num_sub;
+
+ for (i = 0; i < num_sub; i++) {
+ param.pkt.sub[i].num = PKT_NUM;
+ param.pkt.sub[i].len = PKT_LEN + (i * 100);
+ }
+
+ pool = odp_pool_create(NULL, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ num = 0;
+
+ for (i = 0; i < PKT_NUM; i++) {
+ pkt[num] = odp_packet_alloc(pool, PKT_LEN);
+ CU_ASSERT(pkt[num] != ODP_PACKET_INVALID);
+
+ if (pkt[num] != ODP_PACKET_INVALID)
+ num++;
+ }
+
+ for (i = 0; i < num; i++)
+ odp_packet_free(pkt[i]);
+
+ for (j = 0; j < num_sub; j++) {
+ num = 0;
+
+ for (i = 0; i < param.pkt.sub[j].num; i++) {
+ pkt[num] = odp_packet_alloc(pool, param.pkt.sub[j].len);
+ CU_ASSERT(pkt[num] != ODP_PACKET_INVALID);
+
+ if (pkt[num] != ODP_PACKET_INVALID)
+ num++;
+ }
+
+ for (i = 0; i < num; i++)
+ odp_packet_free(pkt[i]);
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void alloc_timeout(uint32_t cache_size)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ uint32_t i, num;
+ odp_timeout_t tmo[TMO_NUM];
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_TIMEOUT;
+ param.tmo.num = TMO_NUM;
+ param.tmo.cache_size = cache_size;
+
+ pool = odp_pool_create(NULL, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ num = 0;
+
+ for (i = 0; i < PKT_NUM; i++) {
+ tmo[num] = odp_timeout_alloc(pool);
+ CU_ASSERT(tmo[num] != ODP_TIMEOUT_INVALID);
+
+ if (tmo[num] != ODP_TIMEOUT_INVALID)
+ num++;
+ }
+
+ for (i = 0; i < num; i++)
+ odp_timeout_free(tmo[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_alloc_timeout(void)
+{
+ alloc_timeout(default_pool_param.tmo.cache_size);
+}
+
+static void pool_test_alloc_timeout_min_cache(void)
+{
+ alloc_timeout(global_pool_capa.tmo.min_cache_size);
+}
+
+static void pool_test_alloc_timeout_max_cache(void)
+{
+ alloc_timeout(global_pool_capa.tmo.max_cache_size);
+}
+
+static void pool_test_info_packet(void)
+{
+ odp_pool_t pool;
+ odp_pool_info_t info;
+ odp_pool_param_t param;
+ const char pool_name[] = "test_pool_name";
+
+ memset(&info, 0, sizeof(info));
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.num = PKT_NUM;
+ param.pkt.len = PKT_LEN;
+
+ pool = odp_pool_create(pool_name, &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ memset(&info, 0, sizeof(odp_pool_info_t));
+ CU_ASSERT_FATAL(odp_pool_info(pool, &info) == 0);
+
+ CU_ASSERT(strncmp(pool_name, info.name, sizeof(pool_name)) == 0);
+ CU_ASSERT(info.params.type == ODP_POOL_PACKET);
+ CU_ASSERT(info.params.pkt.num == param.pkt.num);
+ CU_ASSERT(info.params.pkt.len == param.pkt.len);
+ CU_ASSERT(info.pkt.max_num >= param.pkt.num);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_info_data_range(void)
+{
+ odp_pool_t pool;
+ odp_pool_info_t info;
+ odp_pool_param_t param;
+ odp_packet_t pkt[PKT_NUM];
+ uint32_t i, num;
+ uintptr_t pool_len;
+
+ memset(&info, 0, sizeof(info));
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.num = PKT_NUM;
+ param.pkt.len = PKT_LEN;
+
+ pool = odp_pool_create(NULL, &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ CU_ASSERT_FATAL(odp_pool_info(pool, &info) == 0);
+
+ pool_len = info.max_data_addr - info.min_data_addr + 1;
+ CU_ASSERT(pool_len >= PKT_NUM * PKT_LEN);
+
+ num = 0;
+
+ for (i = 0; i < PKT_NUM; i++) {
+ pkt[num] = odp_packet_alloc(pool, PKT_LEN);
+ CU_ASSERT(pkt[num] != ODP_PACKET_INVALID);
+
+ if (pkt[num] != ODP_PACKET_INVALID)
+ num++;
+ }
+
+ for (i = 0; i < num; i++) {
+ uintptr_t pkt_data, pkt_data_end;
+ uint32_t offset = 0;
+ uint32_t seg_len = 0;
+ uint32_t pkt_len = odp_packet_len(pkt[i]);
+
+ while (offset < pkt_len) {
+ pkt_data = (uintptr_t)odp_packet_offset(pkt[i], offset,
+ &seg_len, NULL);
+ pkt_data_end = pkt_data + seg_len - 1;
+ CU_ASSERT((pkt_data >= info.min_data_addr) &&
+ (pkt_data_end <= info.max_data_addr));
+ offset += seg_len;
+ }
+
+ odp_packet_free(pkt[i]);
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_buf_max_num(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_pool_capability_t capa;
+ uint32_t max_num, num, i;
+ odp_shm_t shm;
+ odp_buffer_t *buf;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ max_num = MAX_NUM_DEFAULT;
+ if (capa.buf.max_num)
+ max_num = capa.buf.max_num;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_BUFFER;
+ param.buf.num = max_num;
+ param.buf.size = 10;
+
+ pool = odp_pool_create("test_buf_max_num", &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ shm = odp_shm_reserve("test_max_num_shm",
+ max_num * sizeof(odp_buffer_t),
+ sizeof(odp_buffer_t), 0);
+
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ buf = odp_shm_addr(shm);
+
+ num = 0;
+ for (i = 0; i < max_num; i++) {
+ buf[num] = odp_buffer_alloc(pool);
+
+ if (buf[num] != ODP_BUFFER_INVALID) {
+ CU_ASSERT(odp_buffer_is_valid(buf[num]) == 1);
+ CU_ASSERT(odp_event_is_valid(odp_buffer_to_event(buf[num])) == 1);
+ num++;
+ }
+ }
+
+ CU_ASSERT(num == max_num);
+
+ for (i = 0; i < num; i++)
+ odp_buffer_free(buf[i]);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_pkt_max_num(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_pool_capability_t capa;
+ uint32_t max_num, num, i;
+ odp_shm_t shm;
+ odp_packet_t *pkt;
+ uint32_t len = 10;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ max_num = MAX_NUM_DEFAULT;
+ if (capa.pkt.max_num)
+ max_num = capa.pkt.max_num;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.num = max_num;
+ param.pkt.max_num = max_num;
+ param.pkt.len = len;
+ param.pkt.max_len = len;
+ param.pkt.headroom = 0;
+
+ pool = odp_pool_create("test_packet_max_num", &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ shm = odp_shm_reserve("test_max_num_shm",
+ max_num * sizeof(odp_packet_t),
+ sizeof(odp_packet_t), 0);
+
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ pkt = odp_shm_addr(shm);
+
+ num = 0;
+ for (i = 0; i < max_num; i++) {
+ pkt[num] = odp_packet_alloc(pool, len);
+
+ if (pkt[num] != ODP_PACKET_INVALID) {
+ CU_ASSERT(odp_packet_is_valid(pkt[num]) == 1);
+ CU_ASSERT(odp_event_is_valid(odp_packet_to_event(pkt[num])) == 1);
+ num++;
+ }
+ }
+
+ CU_ASSERT(num == max_num);
+
+ for (i = 0; i < num; i++)
+ odp_packet_free(pkt[i]);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_packet_vector_max_num(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_pool_capability_t capa;
+ uint32_t num, i;
+ odp_shm_t shm;
+ odp_packet_vector_t *pktv;
+ uint32_t max_num = VEC_NUM;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ if (capa.vector.max_num)
+ max_num = capa.vector.max_num;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_VECTOR;
+ param.vector.num = max_num;
+ param.vector.max_size = 1;
+
+ pool = odp_pool_create("test_packet_vector_max_num", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ shm = odp_shm_reserve("test_max_num_shm", max_num * sizeof(odp_packet_vector_t),
+ sizeof(odp_packet_vector_t), 0);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ pktv = odp_shm_addr(shm);
+ CU_ASSERT_FATAL(pktv != NULL);
+
+ num = 0;
+ for (i = 0; i < max_num; i++) {
+ pktv[num] = odp_packet_vector_alloc(pool);
+
+ if (pktv[num] != ODP_PACKET_VECTOR_INVALID)
+ num++;
+ }
+
+ CU_ASSERT(num == max_num);
+
+ for (i = 0; i < num; i++)
+ odp_packet_vector_free(pktv[i]);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_pkt_seg_len(void)
+{
+ uint32_t len = 1500;
+ uint32_t min_seg_len = 42;
+ uint32_t max_num = 10;
+ uint32_t num = 0;
+ uint32_t i;
+ odp_packet_t pkt_tbl[max_num];
+ odp_pool_t pool;
+ odp_pool_param_t param;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.num = max_num;
+ param.pkt.len = len;
+ param.pkt.max_len = len;
+ param.pkt.seg_len = min_seg_len;
+
+ pool = odp_pool_create("test_packet_seg_len", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < max_num; i++) {
+ pkt_tbl[i] = odp_packet_alloc(pool, len);
+
+ if (pkt_tbl[i] != ODP_PACKET_INVALID)
+ num++;
+ }
+
+ CU_ASSERT(num == max_num);
+
+ for (i = 0; i < num; i++) {
+ CU_ASSERT(odp_packet_seg_len(pkt_tbl[i]) >= min_seg_len);
+ odp_packet_free(pkt_tbl[i]);
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_tmo_max_num(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_pool_capability_t capa;
+ uint32_t max_num, num, i;
+ odp_shm_t shm;
+ odp_timeout_t *tmo;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ max_num = MAX_NUM_DEFAULT;
+ if (capa.tmo.max_num)
+ max_num = capa.tmo.max_num;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_TIMEOUT;
+ param.tmo.num = max_num;
+
+ pool = odp_pool_create("test_tmo_max_num", &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ shm = odp_shm_reserve("test_max_num_shm",
+ max_num * sizeof(odp_packet_t),
+ sizeof(odp_packet_t), 0);
+
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ tmo = odp_shm_addr(shm);
+
+ num = 0;
+ for (i = 0; i < max_num; i++) {
+ tmo[num] = odp_timeout_alloc(pool);
+
+ if (tmo[num] != ODP_TIMEOUT_INVALID) {
+ CU_ASSERT(odp_event_is_valid(odp_timeout_to_event(tmo[num])) == 1);
+ num++;
+ }
+ }
+
+ CU_ASSERT(num == max_num);
+
+ for (i = 0; i < num; i++)
+ odp_timeout_free(tmo[i]);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void buffer_alloc_loop(odp_pool_t pool, int num, int buffer_size)
+{
+ int allocs;
+
+ /* Allocate, modify, and free buffers */
+ for (allocs = 0; allocs < num;) {
+ odp_buffer_t buf;
+ uint8_t *data;
+ int i;
+
+ buf = odp_buffer_alloc(pool);
+ if (buf == ODP_BUFFER_INVALID)
+ continue;
+
+ data = odp_buffer_addr(buf);
+
+ for (i = 0; i < buffer_size; i++)
+ data[i] = i;
+
+ odp_buffer_free(buf);
+ allocs++;
+ }
+}
+
+static int run_pool_test_create_after_fork(void *arg ODP_UNUSED)
+{
+ int thr_index;
+
+ thr_index = odp_atomic_fetch_inc_u32(&global_mem->index);
+
+ /* Thread 0 allocates the shared pool */
+ if (thr_index == 0) {
+ odp_pool_t pool;
+ odp_pool_param_t param;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_BUFFER;
+ param.buf.size = BUF_SIZE;
+ param.buf.num = BUF_NUM;
+
+ pool = odp_pool_create(NULL, &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ global_mem->pool = pool;
+ }
+
+ odp_barrier_wait(&global_mem->init_barrier);
+
+ buffer_alloc_loop(global_mem->pool, BUF_NUM, BUF_SIZE);
+
+ return CU_get_number_of_failures();
+}
+
+static void pool_test_create_after_fork(void)
+{
+ odp_shm_t shm;
+ int num;
+
+ /* No single VA required since reserve is done before fork */
+ shm = odp_shm_reserve(NULL, sizeof(global_shared_mem_t), 0, 0);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ global_mem = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(global_mem);
+
+ num = odp_cpumask_default_worker(NULL, 0);
+ if (num > MAX_WORKERS)
+ num = MAX_WORKERS;
+
+ global_mem->nb_threads = num;
+ global_mem->pool = ODP_POOL_INVALID;
+ odp_barrier_init(&global_mem->init_barrier, num + 1);
+ odp_atomic_init_u32(&global_mem->index, 0);
+
+ /* Fork here */
+ odp_cunit_thread_create(num, run_pool_test_create_after_fork, NULL, 0, 0);
+
+ /* Wait until thread 0 has created the test pool */
+ odp_barrier_wait(&global_mem->init_barrier);
+
+ buffer_alloc_loop(global_mem->pool, BUF_NUM, BUF_SIZE);
+
+ /* Wait for all thread endings */
+ CU_ASSERT(odp_cunit_thread_join(num) >= 0);
+
+ CU_ASSERT(!odp_pool_destroy(global_mem->pool));
+
+ CU_ASSERT(!odp_shm_free(shm));
+}
+
+static void pool_test_pool_index(void)
+{
+ uint32_t max_pools = global_pool_capa.pkt.max_pools;
+ uint32_t i, num_pools;
+ unsigned int max_index = odp_pool_max_index();
+ odp_packet_t pool_lookup[max_index + 1];
+ odp_packet_t pkt;
+ odp_pool_t pool[max_pools];
+ odp_pool_param_t param;
+ int pool_index;
+
+ CU_ASSERT_FATAL(max_pools > 0);
+
+ /* Pool max index should match to pool capability */
+ CU_ASSERT_FATAL(max_index >= global_pool_capa.max_pools - 1);
+ CU_ASSERT_FATAL(max_index >= global_pool_capa.pkt.max_pools - 1);
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = PKT_LEN;
+ param.pkt.num = 1;
+ param.pkt.max_num = 1;
+
+ for (i = 0; i < max_pools; i++) {
+ pool[i] = odp_pool_create(NULL, &param);
+
+ if (pool[i] == ODP_POOL_INVALID)
+ break;
+ }
+
+ /* Ensuring max possible pools are created */
+ num_pools = i;
+ CU_ASSERT(num_pools == max_pools);
+
+ for (i = 0; i < num_pools; i++) {
+ pkt = odp_packet_alloc(pool[i], PKT_LEN);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ /* Only one packet should be possible from each pool */
+ CU_ASSERT_FATAL(odp_packet_alloc(pool[i], PKT_LEN) == ODP_PACKET_INVALID);
+
+ /* Check pool index validity */
+ pool_index = odp_pool_index(pool[i]);
+ CU_ASSERT_FATAL(pool_index >= 0);
+ CU_ASSERT_FATAL((unsigned int)pool_index <= odp_pool_max_index());
+
+ /* Store packet handle in pool lookup table */
+ pool_lookup[pool_index] = pkt;
+ }
+
+ for (i = 0; i < num_pools; i++) {
+ pool_index = odp_pool_index(pool[i]);
+
+ /* Free the packet using pool lookup */
+ odp_packet_free(pool_lookup[pool_index]);
+
+ /* Now packet allocation from the pool should be possible */
+ pkt = odp_packet_alloc(pool[i], PKT_LEN);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ odp_packet_free(pkt);
+
+ /* Destroy the pool */
+ CU_ASSERT(odp_pool_destroy(pool[i]) == 0);
+ }
+}
+
+static void pool_test_create_max_pkt_pools(void)
+{
+ uint32_t max_pools = global_pool_capa.pkt.max_pools;
+ uint32_t i, num_pools, num_shm;
+ odp_pool_t pool[max_pools];
+ odp_pool_param_t param;
+ odp_shm_capability_t shm_capa;
+ uint32_t shm_size = 32;
+ uint32_t uarea_size = 32;
+
+ CU_ASSERT_FATAL(max_pools > 0);
+
+ /* Reserve maximum number of SHM blocks */
+ CU_ASSERT_FATAL(odp_shm_capability(&shm_capa) == 0);
+ CU_ASSERT_FATAL(shm_capa.max_blocks > 0);
+
+ odp_shm_t shm[shm_capa.max_blocks];
+
+ if (shm_capa.max_size && shm_capa.max_size < shm_size)
+ shm_size = shm_capa.max_size;
+
+ for (i = 0; i < shm_capa.max_blocks; i++) {
+ shm[i] = odp_shm_reserve(NULL, shm_size, 0, 0);
+
+ if (shm[i] == ODP_SHM_INVALID)
+ break;
+ }
+ num_shm = i;
+ CU_ASSERT(num_shm == shm_capa.max_blocks);
+
+ /* Create maximum number of packet pools */
+ if (uarea_size > global_pool_capa.pkt.max_uarea_size)
+ uarea_size = global_pool_capa.pkt.max_uarea_size;
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = PKT_LEN;
+ param.pkt.num = 1;
+ param.pkt.max_num = 1;
+ param.pkt.uarea_size = uarea_size;
+
+ for (i = 0; i < max_pools; i++) {
+ pool[i] = odp_pool_create(NULL, &param);
+
+ if (pool[i] == ODP_POOL_INVALID)
+ break;
+ }
+ num_pools = i;
+ CU_ASSERT(num_pools == max_pools);
+
+ for (i = 0; i < num_pools; i++)
+ CU_ASSERT(odp_pool_destroy(pool[i]) == 0);
+
+ for (i = 0; i < num_shm; i++)
+ CU_ASSERT(odp_shm_free(shm[i]) == 0);
+}
+
+static int pool_check_buffer_pool_statistics(void)
+{
+ if (global_pool_capa.buf.stats.all == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int pool_check_packet_pool_statistics(void)
+{
+ if (global_pool_capa.pkt.stats.all == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int pool_check_packet_vector_pool_statistics(void)
+{
+ if (global_pool_capa.vector.stats.all == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int pool_check_timeout_pool_statistics(void)
+{
+ if (global_pool_capa.tmo.stats.all == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pool_test_pool_statistics(odp_pool_type_t pool_type)
+{
+ odp_pool_stats_t stats;
+ odp_pool_stats_selected_t selected;
+ odp_pool_param_t param;
+ odp_pool_stats_opt_t supported;
+ uint32_t i, j, num_pool, num_obj, cache_size, num_thr;
+ uint32_t max_pools = 2;
+ uint16_t first = 0;
+ uint16_t last = ODP_POOL_MAX_THREAD_STATS - 1;
+
+ if (last > odp_thread_count_max() - 1)
+ last = odp_thread_count_max() - 1;
+
+ odp_pool_param_init(&param);
+
+ if (pool_type == ODP_POOL_BUFFER) {
+ max_pools = global_pool_capa.buf.max_pools < max_pools ?
+ global_pool_capa.buf.max_pools : max_pools;
+ num_obj = BUF_NUM;
+ supported.all = global_pool_capa.buf.stats.all;
+ param.type = ODP_POOL_BUFFER;
+ cache_size = CACHE_SIZE > global_pool_capa.buf.max_cache_size ?
+ global_pool_capa.buf.max_cache_size : CACHE_SIZE;
+ param.buf.cache_size = cache_size;
+ param.buf.size = BUF_SIZE;
+ param.buf.num = num_obj;
+ } else if (pool_type == ODP_POOL_PACKET) {
+ max_pools = global_pool_capa.pkt.max_pools < max_pools ?
+ global_pool_capa.pkt.max_pools : max_pools;
+ num_obj = PKT_NUM;
+ supported.all = global_pool_capa.pkt.stats.all;
+ param.type = ODP_POOL_PACKET;
+ cache_size = CACHE_SIZE > global_pool_capa.pkt.max_cache_size ?
+ global_pool_capa.pkt.max_cache_size : CACHE_SIZE;
+ param.pkt.cache_size = cache_size;
+ param.pkt.len = PKT_LEN;
+ param.pkt.num = num_obj;
+ param.pkt.max_num = num_obj;
+ } else if (pool_type == ODP_POOL_VECTOR) {
+ max_pools = global_pool_capa.vector.max_pools < max_pools ?
+ global_pool_capa.vector.max_pools : max_pools;
+ num_obj = VEC_NUM;
+ if (global_pool_capa.vector.max_num && global_pool_capa.vector.max_num < num_obj)
+ num_obj = global_pool_capa.vector.max_num;
+ supported.all = global_pool_capa.vector.stats.all;
+ param.type = ODP_POOL_VECTOR;
+ cache_size = CACHE_SIZE > global_pool_capa.vector.max_cache_size ?
+ global_pool_capa.vector.max_cache_size : CACHE_SIZE;
+ param.vector.cache_size = cache_size;
+ param.vector.num = num_obj;
+ param.vector.max_size = global_pool_capa.vector.max_size < VEC_LEN ?
+ global_pool_capa.vector.max_size : VEC_LEN;
+ } else {
+ max_pools = global_pool_capa.tmo.max_pools < max_pools ?
+ global_pool_capa.tmo.max_pools : max_pools;
+ num_obj = TMO_NUM;
+ supported.all = global_pool_capa.tmo.stats.all;
+ param.type = ODP_POOL_TIMEOUT;
+ cache_size = CACHE_SIZE > global_pool_capa.tmo.max_cache_size ?
+ global_pool_capa.tmo.max_cache_size : CACHE_SIZE;
+ param.tmo.cache_size = cache_size;
+ param.tmo.num = num_obj;
+ }
+
+ param.stats.all = supported.all;
+
+ CU_ASSERT_FATAL(max_pools != 0);
+
+ /* Extra alloc rounds for testing odp_pool_stats_t.alloc_fails */
+ uint32_t num_allocs = num_obj + 100;
+ odp_event_t event[max_pools][num_allocs];
+ uint32_t num_event[max_pools];
+ odp_pool_t pool[max_pools];
+
+ for (i = 0; i < max_pools; i++) {
+ pool[i] = odp_pool_create(NULL, &param);
+
+ if (pool[i] == ODP_POOL_INVALID)
+ break;
+ }
+
+ num_pool = i;
+ CU_ASSERT(num_pool == max_pools);
+
+ for (i = 0; i < num_pool; i++) {
+ uint32_t num_events = 0;
+ uint32_t num_fails = 0;
+
+ memset(&stats, 0xff, sizeof(odp_pool_stats_t));
+ memset(&selected, 0xff, sizeof(odp_pool_stats_selected_t));
+
+ CU_ASSERT_FATAL(odp_pool_stats_reset(pool[i]) == 0);
+
+ stats.thread.first = first;
+ stats.thread.last = last;
+ num_thr = last - first + 1;
+ CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
+ CU_ASSERT_FATAL(odp_pool_stats_selected(pool[i], &selected, &supported) == 0);
+
+ CU_ASSERT(stats.available <= num_obj);
+ if (supported.bit.available)
+ CU_ASSERT(selected.available <= num_obj);
+ CU_ASSERT(stats.alloc_ops == 0);
+ if (supported.bit.alloc_ops)
+ CU_ASSERT(selected.alloc_ops == 0);
+ CU_ASSERT(stats.alloc_fails == 0);
+ if (supported.bit.alloc_fails)
+ CU_ASSERT(selected.alloc_fails == 0);
+ CU_ASSERT(stats.free_ops == 0);
+ if (supported.bit.free_ops)
+ CU_ASSERT(selected.free_ops == 0);
+ CU_ASSERT(stats.total_ops == 0);
+ if (supported.bit.total_ops)
+ CU_ASSERT(selected.total_ops == 0);
+ CU_ASSERT(stats.cache_available <= num_obj);
+ if (supported.bit.cache_available)
+ CU_ASSERT(selected.cache_available <= num_obj);
+ CU_ASSERT(stats.cache_alloc_ops == 0);
+ if (supported.bit.cache_alloc_ops)
+ CU_ASSERT(selected.cache_alloc_ops == 0);
+ CU_ASSERT(stats.cache_free_ops == 0);
+ if (supported.bit.cache_free_ops)
+ CU_ASSERT(selected.cache_free_ops == 0);
+
+ CU_ASSERT(stats.thread.first == first);
+ CU_ASSERT(stats.thread.last == last);
+
+ if (supported.bit.thread_cache_available) {
+ for (j = 0; j < num_thr; j++)
+ CU_ASSERT(stats.thread.cache_available[j] <= stats.cache_available);
+ }
+
+ /* Allocate the events */
+ for (j = 0; j < num_allocs; j++) {
+ odp_event_t new_event = ODP_EVENT_INVALID;
+ uint64_t total_cached = 0;
+ uint16_t first_id = 0;
+ uint16_t last_id = last;
+
+ if (pool_type == ODP_POOL_BUFFER) {
+ odp_buffer_t buf = odp_buffer_alloc(pool[i]);
+
+ if (buf != ODP_BUFFER_INVALID)
+ new_event = odp_buffer_to_event(buf);
+ } else if (pool_type == ODP_POOL_PACKET) {
+ odp_packet_t pkt = odp_packet_alloc(pool[i], PKT_LEN);
+
+ if (pkt != ODP_PACKET_INVALID)
+ new_event = odp_packet_to_event(pkt);
+ } else if (pool_type == ODP_POOL_VECTOR) {
+ odp_packet_vector_t pktv = odp_packet_vector_alloc(pool[i]);
+
+ if (pktv != ODP_PACKET_VECTOR_INVALID)
+ new_event = odp_packet_vector_to_event(pktv);
+ } else {
+ odp_timeout_t tmo = odp_timeout_alloc(pool[i]);
+
+ if (tmo != ODP_TIMEOUT_INVALID)
+ new_event = odp_timeout_to_event(tmo);
+ }
+
+ if (new_event != ODP_EVENT_INVALID)
+ event[i][num_events++] = new_event;
+ else
+ num_fails++;
+
+ CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
+ CU_ASSERT_FATAL(odp_pool_stats_selected(pool[i], &selected,
+ &supported) == 0);
+ CU_ASSERT(stats.available <= num_obj - num_events);
+ if (supported.bit.available)
+ CU_ASSERT(selected.available <= num_obj - num_events);
+ CU_ASSERT(stats.cache_available <= num_obj - num_events);
+ if (supported.bit.cache_available)
+ CU_ASSERT(selected.cache_available <= num_obj - num_events);
+
+ if (supported.bit.thread_cache_available) {
+ while (first_id < odp_thread_count_max()) {
+ memset(&stats, 0xff, sizeof(odp_pool_stats_t));
+
+ stats.thread.first = first_id;
+ stats.thread.last = last_id;
+ num_thr = last_id - first_id + 1;
+ CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
+
+ for (uint32_t k = 0; k < num_thr; k++) {
+ uint64_t cached = stats.thread.cache_available[k];
+
+ CU_ASSERT(cached <= num_obj - num_events);
+ total_cached += cached;
+ }
+ first_id = last_id + 1;
+ last_id += ODP_POOL_MAX_THREAD_STATS;
+ if (last_id >= odp_thread_count_max())
+ last_id = odp_thread_count_max() - 1;
+ };
+
+ if (supported.bit.cache_available &&
+ ODP_POOL_MAX_THREAD_STATS >= odp_thread_count_max())
+ CU_ASSERT(stats.cache_available == total_cached);
+ }
+ }
+
+ CU_ASSERT(num_events == num_obj);
+ num_event[i] = num_events;
+
+ /* Allow implementation some time to update counters */
+ odp_time_wait_ns(ODP_TIME_MSEC_IN_NS);
+
+ memset(&stats, 0xff, sizeof(odp_pool_stats_t));
+ memset(&selected, 0xff, sizeof(odp_pool_stats_selected_t));
+
+ stats.thread.first = first;
+ stats.thread.last = last;
+ num_thr = last - first + 1;
+ CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
+ CU_ASSERT_FATAL(odp_pool_stats_selected(pool[i], &selected, &supported) == 0);
+
+ /* All events are allocated, available count in pool and pool
+ * local caches should be zero. */
+ CU_ASSERT(stats.available == 0);
+ if (supported.bit.available)
+ CU_ASSERT(selected.available == 0);
+ CU_ASSERT(stats.cache_available == 0);
+ if (supported.bit.cache_available)
+ CU_ASSERT(selected.cache_available == 0);
+ if (supported.bit.thread_cache_available) {
+ for (j = 0; j < num_thr; j++)
+ CU_ASSERT(stats.thread.cache_available[j] == 0);
+ }
+ if (supported.bit.alloc_ops) {
+ CU_ASSERT(stats.alloc_ops > 0 && stats.alloc_ops <= num_allocs);
+ CU_ASSERT(selected.alloc_ops > 0 && selected.alloc_ops <= num_allocs);
+ }
+ if (supported.bit.alloc_fails) {
+ CU_ASSERT(stats.alloc_fails == num_fails);
+ CU_ASSERT(selected.alloc_fails == num_fails);
+ }
+ if (supported.bit.total_ops) {
+ CU_ASSERT(stats.total_ops > 0 && stats.total_ops <= num_allocs);
+ CU_ASSERT(selected.total_ops > 0 && selected.total_ops <= num_allocs);
+ }
+ CU_ASSERT(stats.free_ops == 0);
+ if (supported.bit.free_ops)
+ CU_ASSERT(selected.free_ops == 0);
+ CU_ASSERT(stats.cache_alloc_ops <= num_allocs);
+ if (supported.bit.cache_alloc_ops)
+ CU_ASSERT(selected.cache_alloc_ops <= num_allocs);
+ CU_ASSERT(stats.cache_free_ops == 0);
+ if (supported.bit.cache_free_ops)
+ CU_ASSERT(selected.cache_free_ops == 0);
+ }
+
+ for (i = 0; i < num_pool; i++) {
+ odp_event_free_multi(event[i], num_event[i]);
+
+ /* Allow implementation some time to update counters */
+ odp_time_wait_ns(ODP_TIME_MSEC_IN_NS);
+
+ stats.thread.first = odp_thread_id();
+ stats.thread.last = odp_thread_id();
+ CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
+ CU_ASSERT_FATAL(odp_pool_stats_selected(pool[i], &selected, &supported) == 0);
+
+ if (supported.bit.available && supported.bit.cache_available) {
+ CU_ASSERT(stats.available + stats.cache_available == num_obj);
+ CU_ASSERT(selected.available + selected.cache_available == num_obj);
+ }
+ if (supported.bit.free_ops) {
+ CU_ASSERT(stats.free_ops > 0);
+ CU_ASSERT(selected.free_ops > 0);
+ }
+ if (supported.bit.total_ops) {
+ CU_ASSERT(stats.total_ops > 0);
+ CU_ASSERT(selected.total_ops > 0);
+ }
+
+ if (i == 0) {
+ printf("\nPool Statistics\n---------------\n");
+ printf(" available: %" PRIu64 "\n", stats.available);
+ printf(" alloc_ops: %" PRIu64 "\n", stats.alloc_ops);
+ printf(" alloc_fails: %" PRIu64 "\n", stats.alloc_fails);
+ printf(" free_ops: %" PRIu64 "\n", stats.free_ops);
+ printf(" total_ops: %" PRIu64 "\n", stats.total_ops);
+ printf(" cache_available: %" PRIu64 "\n", stats.cache_available);
+ printf(" cache_alloc_ops: %" PRIu64 "\n", stats.cache_alloc_ops);
+ printf(" cache_free_ops: %" PRIu64 "\n", stats.cache_free_ops);
+ if (supported.bit.thread_cache_available)
+ printf(" thread.cache_available[0]: %" PRIu64 "\n",
+ stats.thread.cache_available[0]);
+ }
+
+ CU_ASSERT_FATAL(odp_pool_stats_reset(pool[i]) == 0);
+ CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
+ CU_ASSERT_FATAL(odp_pool_stats_selected(pool[i], &selected, &supported) == 0);
+
+ CU_ASSERT(stats.alloc_ops == 0);
+ if (supported.bit.alloc_ops)
+ CU_ASSERT(selected.alloc_ops == 0);
+ CU_ASSERT(stats.alloc_fails == 0);
+ if (supported.bit.alloc_fails)
+ CU_ASSERT(selected.alloc_fails == 0);
+ CU_ASSERT(stats.free_ops == 0);
+ if (supported.bit.free_ops)
+ CU_ASSERT(selected.free_ops == 0);
+ CU_ASSERT(stats.total_ops == 0);
+ if (supported.bit.total_ops)
+ CU_ASSERT(selected.total_ops == 0);
+ CU_ASSERT(stats.cache_alloc_ops == 0);
+ if (supported.bit.cache_alloc_ops)
+ CU_ASSERT(selected.cache_alloc_ops == 0);
+ CU_ASSERT(stats.cache_free_ops == 0);
+ if (supported.bit.cache_free_ops)
+ CU_ASSERT(selected.cache_free_ops == 0);
+
+ CU_ASSERT(odp_pool_destroy(pool[i]) == 0);
+ }
+}
+
+static void pool_test_buffer_pool_statistics(void)
+{
+ pool_test_pool_statistics(ODP_POOL_BUFFER);
+}
+
+static void pool_test_packet_pool_statistics(void)
+{
+ pool_test_pool_statistics(ODP_POOL_PACKET);
+}
+
+static void pool_test_packet_vector_pool_statistics(void)
+{
+ pool_test_pool_statistics(ODP_POOL_VECTOR);
+}
+
+static void pool_test_timeout_pool_statistics(void)
+{
+ pool_test_pool_statistics(ODP_POOL_TIMEOUT);
+}
+
+static void pool_ext_init_packet_pool_param(odp_pool_ext_param_t *param)
+{
+ odp_pool_ext_capability_t capa;
+ uint32_t head_offset, head_align, trailer_size;
+ odp_pool_type_t type = ODP_POOL_PACKET;
+ uint32_t num_buf = EXT_NUM_BUF;
+ uint32_t buf_size = EXT_BUF_SIZE;
+ uint32_t uarea_size = EXT_UAREA_SIZE;
+ uint32_t headroom = EXT_HEADROOM;
+ uint32_t app_hdr_size = EXT_APP_HDR_SIZE;
+
+ CU_ASSERT_FATAL(odp_pool_ext_capability(type, &capa) == 0);
+
+ odp_pool_ext_param_init(type, param);
+
+ if (num_buf > capa.pkt.max_num_buf)
+ num_buf = capa.pkt.max_num_buf;
+
+ if (buf_size > capa.pkt.max_buf_size)
+ buf_size = capa.pkt.max_buf_size;
+
+ if (uarea_size > capa.pkt.max_uarea_size)
+ uarea_size = capa.pkt.max_uarea_size;
+
+ if (headroom > capa.pkt.max_headroom)
+ headroom = capa.pkt.max_headroom;
+
+ head_align = capa.pkt.min_head_align;
+ head_offset = capa.pkt.odp_header_size + app_hdr_size;
+ trailer_size = capa.pkt.odp_trailer_size;
+
+ CU_ASSERT_FATAL(head_offset < buf_size);
+ CU_ASSERT_FATAL((head_offset + trailer_size) < buf_size);
+
+ while (head_offset % head_align) {
+ app_hdr_size++;
+ head_offset = capa.pkt.odp_header_size + app_hdr_size;
+
+ if (head_offset >= buf_size) {
+ ODPH_ERR("Head align too large (%u). No room for data.\n", head_align);
+ break;
+ }
+ }
+
+ CU_ASSERT_FATAL(head_offset < buf_size);
+ CU_ASSERT_FATAL((head_offset + trailer_size) < buf_size);
+ CU_ASSERT_FATAL((head_offset % head_align) == 0);
+
+ param->pkt.num_buf = num_buf;
+ param->pkt.buf_size = buf_size;
+ param->pkt.app_header_size = app_hdr_size;
+ param->pkt.uarea_size = uarea_size;
+ param->pkt.headroom = headroom;
+}
+
+static void test_packet_pool_ext_capa(void)
+{
+ odp_pool_ext_capability_t capa;
+ odp_pool_type_t type;
+ const odp_pool_type_t unsupported_types[] = {ODP_POOL_BUFFER, ODP_POOL_TIMEOUT,
+ ODP_POOL_VECTOR, ODP_POOL_DMA_COMPL,
+ ODP_POOL_ML_COMPL};
+ const int num_types = ODPH_ARRAY_SIZE(unsupported_types);
+
+ /* Verify operation for unsupported pool types */
+ for (int i = 0; i < num_types; i++) {
+ type = unsupported_types[i];
+ CU_ASSERT_FATAL(odp_pool_ext_capability(type, &capa) == 0);
+ CU_ASSERT(capa.max_pools == 0);
+ }
+
+ type = ODP_POOL_PACKET;
+
+ CU_ASSERT_FATAL(odp_pool_ext_capability(type, &capa) == 0);
+
+ CU_ASSERT(capa.type == type);
+
+ /* External memory pools not supported */
+ if (capa.max_pools == 0)
+ return;
+
+ CU_ASSERT(capa.max_pools > 0);
+ CU_ASSERT(capa.min_cache_size <= capa.max_cache_size);
+ CU_ASSERT(capa.pkt.max_num_buf > 0);
+ CU_ASSERT(capa.pkt.max_buf_size > 0);
+ CU_ASSERT(capa.pkt.min_mem_align > 0);
+ CU_ASSERT(TEST_CHECK_POW2(capa.pkt.min_mem_align));
+ CU_ASSERT(capa.pkt.min_buf_align > 0);
+ CU_ASSERT(capa.pkt.min_head_align > 0);
+ CU_ASSERT(capa.pkt.max_headroom > 0);
+ CU_ASSERT(capa.pkt.max_headroom_size > 0);
+ CU_ASSERT(capa.pkt.max_headroom_size >= capa.pkt.max_headroom);
+ CU_ASSERT(capa.pkt.max_segs_per_pkt > 0);
+}
+
+static void test_ext_param_init(uint8_t fill)
+{
+ odp_pool_ext_param_t param;
+
+ memset(&param, fill, sizeof(param));
+ odp_pool_ext_param_init(ODP_POOL_PACKET, &param);
+
+ CU_ASSERT(param.type == ODP_POOL_PACKET);
+ CU_ASSERT(param.uarea_init.init_fn == NULL);
+ CU_ASSERT(param.uarea_init.args == NULL);
+ CU_ASSERT(param.cache_size >= global_pool_ext_capa.min_cache_size &&
+ param.cache_size <= global_pool_ext_capa.max_cache_size);
+ CU_ASSERT(param.stats.all == 0);
+ CU_ASSERT(param.pkt.app_header_size == 0);
+ CU_ASSERT(param.pkt.uarea_size == 0);
+}
+
+static void test_packet_pool_ext_param_init(void)
+{
+ test_ext_param_init(0);
+ test_ext_param_init(0xff);
+}
+
+static void test_packet_pool_ext_create(void)
+{
+ odp_pool_t pool;
+ odp_pool_ext_param_t param;
+
+ pool_ext_init_packet_pool_param(&param);
+
+ pool = odp_pool_ext_create("pool_ext_0", &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void test_packet_pool_ext_lookup(void)
+{
+ odp_pool_t pool, pool_1;
+ odp_pool_ext_param_t param;
+ const char *name = "pool_ext_0";
+
+ pool_ext_init_packet_pool_param(&param);
+
+ pool = odp_pool_ext_create(name, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ pool_1 = odp_pool_lookup(name);
+
+ CU_ASSERT_FATAL(pool_1 != ODP_POOL_INVALID);
+ CU_ASSERT(pool == pool_1);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void test_packet_pool_ext_info(void)
+{
+ odp_pool_t pool;
+ odp_pool_ext_param_t param;
+ odp_pool_info_t info;
+ const char *name = "pool_ext_0";
+
+ pool_ext_init_packet_pool_param(&param);
+
+ pool = odp_pool_ext_create(name, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ memset(&info, 0, sizeof(odp_pool_info_t));
+ CU_ASSERT_FATAL(odp_pool_info(pool, &info) == 0);
+
+ CU_ASSERT(info.pool_ext);
+ CU_ASSERT(strncmp(name, info.name, strlen(name)) == 0);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static odp_shm_t populate_pool(odp_pool_t pool, odp_pool_ext_capability_t *capa,
+ void *buf[], uint32_t num, uint32_t buf_size)
+{
+ odp_shm_t shm;
+ uint8_t *buf_ptr;
+ uint32_t i;
+ uint32_t shm_size, mem_align;
+ uint32_t flags = 0;
+ uint32_t buf_align = EXT_BUF_ALIGN;
+ uint32_t min_align = capa->pkt.min_buf_align;
+
+ CU_ASSERT_FATAL(min_align > 0);
+
+ if (min_align > buf_align)
+ buf_align = min_align;
+
+ if (capa->pkt.buf_size_aligned) {
+ buf_align = buf_size;
+ CU_ASSERT_FATAL((buf_size % min_align) == 0);
+ }
+
+ mem_align = buf_align;
+ if (capa->pkt.min_mem_align > mem_align)
+ mem_align = capa->pkt.min_mem_align;
+
+ /* Prepare to align every buffer */
+ shm_size = (num + 1) * (buf_size + buf_align);
+
+ shm = odp_shm_reserve("test_pool_ext_populate", shm_size, mem_align, 0);
+ if (shm == ODP_SHM_INVALID)
+ return ODP_SHM_INVALID;
+
+ buf_ptr = odp_shm_addr(shm);
+ CU_ASSERT_FATAL((uintptr_t)buf_ptr % mem_align == 0);
+
+ /* initialize entire pool memory with a pattern */
+ memset(buf_ptr, MAGIC_U8, shm_size);
+
+ /* Move from mem_align to buf_align */
+ while ((uintptr_t)buf_ptr % buf_align)
+ buf_ptr++;
+
+ for (i = 0; i < num; i++) {
+ if (i == num - 1)
+ flags = ODP_POOL_POPULATE_DONE;
+
+ buf[i] = buf_ptr;
+ CU_ASSERT_FATAL(odp_pool_ext_populate(pool, &buf[i], buf_size, 1, flags) == 0);
+
+ buf_ptr += buf_size;
+ while ((uintptr_t)buf_ptr % buf_align)
+ buf_ptr++;
+ }
+
+ return shm;
+}
+
+static void test_packet_pool_ext_populate(void)
+{
+ odp_shm_t shm;
+ odp_pool_t pool;
+ odp_pool_ext_param_t param;
+ odp_pool_ext_capability_t capa;
+ uint32_t buf_size, num_buf;
+ void *buf[EXT_NUM_BUF];
+
+ CU_ASSERT_FATAL(odp_pool_ext_capability(ODP_POOL_PACKET, &capa) == 0);
+
+ pool_ext_init_packet_pool_param(&param);
+ num_buf = param.pkt.num_buf;
+ buf_size = param.pkt.buf_size;
+
+ CU_ASSERT_FATAL(capa.pkt.min_head_align > 0);
+
+ pool = odp_pool_ext_create("pool_ext_0", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ shm = populate_pool(pool, &capa, buf, num_buf, buf_size);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+static uint32_t find_buf(odp_packet_t pkt, void *buf[], uint32_t num, uint32_t head_offset)
+{
+ uint32_t i;
+ uint8_t *ptr;
+ uint8_t *head = odp_packet_head(pkt);
+
+ for (i = 0; i < num; i++) {
+ ptr = buf[i];
+ ptr += head_offset;
+
+ if (head == ptr)
+ break;
+ }
+
+ return i;
+}
+
+#define PKT_LEN_NORMAL 0
+#define PKT_LEN_MAX 1
+#define PKT_LEN_SEGMENTED 2
+
+static void packet_pool_ext_alloc(int len_test)
+{
+ odp_shm_t shm;
+ odp_pool_t pool;
+ odp_pool_ext_param_t param;
+ odp_pool_ext_capability_t capa;
+ uint32_t i, j, buf_size, num_buf, num_pkt, num_alloc, buf_index;
+ uint32_t pkt_len, head_offset, trailer_size, headroom, max_headroom;
+ uint32_t hr, tr, uarea_size, max_payload, buf_data_size, app_hdr_size;
+ int num_seg;
+ uint8_t *app_hdr;
+ void *buf[EXT_NUM_BUF];
+ odp_packet_t pkt[EXT_NUM_BUF];
+ uint32_t seg_len = 0;
+
+ CU_ASSERT_FATAL(odp_pool_ext_capability(ODP_POOL_PACKET, &capa) == 0);
+
+ pool_ext_init_packet_pool_param(&param);
+ num_buf = param.pkt.num_buf;
+ buf_size = param.pkt.buf_size;
+ uarea_size = param.pkt.uarea_size;
+
+ pool = odp_pool_ext_create("pool_ext_0", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ shm = populate_pool(pool, &capa, buf, num_buf, buf_size);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ app_hdr_size = param.pkt.app_header_size;
+ head_offset = capa.pkt.odp_header_size + app_hdr_size;
+ max_headroom = capa.pkt.max_headroom_size;
+ headroom = param.pkt.headroom;
+ trailer_size = capa.pkt.odp_trailer_size;
+ buf_data_size = buf_size - head_offset - trailer_size;
+ max_payload = buf_data_size - max_headroom;
+ num_pkt = num_buf;
+ num_seg = 1;
+
+ if (len_test == PKT_LEN_NORMAL) {
+ pkt_len = (buf_data_size - headroom) / 2;
+ } else if (len_test == PKT_LEN_MAX) {
+ pkt_len = max_payload;
+ } else {
+ CU_ASSERT_FATAL(capa.pkt.max_segs_per_pkt > 1);
+ /* length that results 2 segments */
+ pkt_len = max_payload + (buf_size / 2);
+ num_seg = 2;
+ num_pkt = num_buf / num_seg;
+ }
+
+ for (i = 0; i < num_pkt; i++) {
+ pkt[i] = odp_packet_alloc(pool, pkt_len);
+ CU_ASSERT(pkt[i] != ODP_PACKET_INVALID);
+ if (pkt[i] == ODP_PACKET_INVALID)
+ break;
+
+ CU_ASSERT(odp_packet_is_valid(pkt[i]) == 1);
+ CU_ASSERT(odp_event_is_valid(odp_packet_to_event(pkt[i])) == 1);
+ CU_ASSERT(odp_packet_len(pkt[i]) == pkt_len);
+ CU_ASSERT(odp_packet_headroom(pkt[i]) >= headroom);
+ buf_index = find_buf(pkt[i], buf, num_buf, head_offset);
+ CU_ASSERT(buf_index < num_buf);
+ hr = (uintptr_t)odp_packet_data(pkt[i]) - (uintptr_t)odp_packet_head(pkt[i]);
+ CU_ASSERT(hr == odp_packet_headroom(pkt[i]));
+ CU_ASSERT(num_seg == odp_packet_num_segs(pkt[i]));
+ CU_ASSERT(odp_packet_data(pkt[i]) == odp_packet_data_seg_len(pkt[i], &seg_len));
+ CU_ASSERT(odp_packet_seg_len(pkt[i]) == seg_len);
+
+ if (num_seg == 1) {
+ tr = buf_data_size - hr - pkt_len;
+ CU_ASSERT(tr == odp_packet_tailroom(pkt[i]));
+ CU_ASSERT(odp_packet_seg_len(pkt[i]) == pkt_len);
+ } else {
+ odp_packet_seg_t seg = odp_packet_last_seg(pkt[i]);
+ uint32_t last_seg_len = odp_packet_seg_data_len(pkt[i], seg);
+ uint32_t max_tr = buf_data_size - last_seg_len;
+
+ CU_ASSERT(odp_packet_tailroom(pkt[i]) <= max_tr);
+ CU_ASSERT(pkt_len == (odp_packet_seg_len(pkt[i]) + last_seg_len));
+ }
+
+ CU_ASSERT(odp_packet_buf_len(pkt[i]) == num_seg * buf_data_size);
+
+ if (uarea_size) {
+ CU_ASSERT(odp_packet_user_area(pkt[i]) != NULL);
+ CU_ASSERT(odp_packet_user_area_size(pkt[i]) >= uarea_size);
+ }
+
+ /* Check that application header content has not changed */
+ app_hdr = (uint8_t *)odp_packet_head(pkt[i]) - app_hdr_size;
+ for (j = 0; j < app_hdr_size; j++)
+ CU_ASSERT(app_hdr[j] == MAGIC_U8);
+ }
+
+ num_alloc = i;
+ CU_ASSERT(num_alloc == num_pkt);
+
+ /* Pool is now empty */
+ CU_ASSERT(odp_packet_alloc(pool, pkt_len) == ODP_PACKET_INVALID);
+
+ for (i = 0; i < num_alloc; i++)
+ odp_packet_free(pkt[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+static void test_packet_pool_ext_alloc(void)
+{
+ packet_pool_ext_alloc(PKT_LEN_NORMAL);
+}
+
+static void test_packet_pool_ext_uarea_init(void)
+{
+ odp_pool_ext_capability_t capa;
+ odp_pool_ext_param_t param;
+ uint32_t num = ELEM_NUM, i;
+ uint32_t max_payload;
+ odp_pool_t pool;
+ uarea_init_t data;
+ odp_shm_t shm;
+ uint8_t *uarea;
+
+ CU_ASSERT_FATAL(odp_pool_ext_capability(ODP_POOL_PACKET, &capa) == 0);
+
+ memset(&data, 0, sizeof(uarea_init_t));
+ pool_ext_init_packet_pool_param(&param);
+ param.uarea_init.init_fn = init_event_uarea;
+ param.uarea_init.args = &data;
+ num = ODPH_MIN(num, param.pkt.num_buf);
+ param.pkt.num_buf = num;
+ param.pkt.uarea_size = 1;
+ pool = odp_pool_ext_create(NULL, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ void *buf[num];
+ odp_packet_t pkts[num];
+
+ shm = populate_pool(pool, &capa, buf, num, param.pkt.buf_size);
+
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ CU_ASSERT(data.count == num);
+
+ max_payload = param.pkt.buf_size;
+ max_payload -= capa.pkt.odp_header_size + param.pkt.app_header_size;
+ max_payload -= capa.pkt.max_headroom_size;
+ max_payload -= capa.pkt.odp_trailer_size;
+ for (i = 0; i < num; i++) {
+ CU_ASSERT(data.mark[i] == 1);
+
+ pkts[i] = odp_packet_alloc(pool, max_payload);
+
+ CU_ASSERT(pkts[i] != ODP_PACKET_INVALID);
+
+ if (pkts[i] == ODP_PACKET_INVALID)
+ break;
+
+ uarea = odp_packet_user_area(pkts[i]);
+
+ CU_ASSERT(*uarea == UAREA);
+ }
+
+ odp_packet_free_multi(pkts, i);
+ odp_pool_destroy(pool);
+ odp_shm_free(shm);
+}
+
+static void test_packet_pool_ext_alloc_max(void)
+{
+ packet_pool_ext_alloc(PKT_LEN_MAX);
+}
+
+static void test_packet_pool_ext_alloc_seg(void)
+{
+ packet_pool_ext_alloc(PKT_LEN_SEGMENTED);
+}
+
+static void test_packet_pool_ext_disassemble(void)
+{
+ odp_shm_t shm;
+ odp_pool_t pool;
+ odp_pool_ext_param_t param;
+ odp_pool_ext_capability_t capa;
+ uint32_t i, j, buf_size, num_buf, num_pkt, num_alloc, buf_index;
+ uint32_t pkt_len, head_offset, trailer_size, headroom, max_headroom;
+ uint32_t hr, max_payload, buf_data_size;
+ uint32_t num_seg;
+ void *buf[EXT_NUM_BUF];
+ odp_packet_t pkt_tbl[EXT_NUM_BUF];
+
+ CU_ASSERT_FATAL(odp_pool_ext_capability(ODP_POOL_PACKET, &capa) == 0);
+ CU_ASSERT_FATAL(capa.pkt.max_segs_per_pkt > 1);
+
+ pool_ext_init_packet_pool_param(&param);
+ num_buf = param.pkt.num_buf;
+ buf_size = param.pkt.buf_size;
+
+ pool = odp_pool_ext_create("pool_ext_0", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ shm = populate_pool(pool, &capa, buf, num_buf, buf_size);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ head_offset = capa.pkt.odp_header_size + param.pkt.app_header_size;
+ max_headroom = capa.pkt.max_headroom_size;
+ headroom = param.pkt.headroom;
+ trailer_size = capa.pkt.odp_trailer_size;
+ buf_data_size = buf_size - head_offset - trailer_size;
+ max_payload = buf_data_size - max_headroom;
+
+ /* length that results 2 segments */
+ pkt_len = max_payload + (buf_size / 2);
+ num_seg = 2;
+ num_pkt = num_buf / num_seg;
+
+ for (i = 0; i < num_pkt; i++) {
+ odp_packet_t pkt;
+ odp_packet_seg_t seg;
+ uint32_t num_pkt_buf, data_offset, data_len;
+ void *head, *data, *pkt_head;
+ odp_packet_buf_t pkt_buf[num_seg];
+ void *seg_data[num_seg];
+ uint32_t seg_len[num_seg];
+
+ pkt = odp_packet_alloc(pool, pkt_len);
+ pkt_tbl[i] = pkt;
+ CU_ASSERT(pkt != ODP_PACKET_INVALID);
+ if (pkt == ODP_PACKET_INVALID)
+ break;
+
+ CU_ASSERT(odp_packet_len(pkt) == pkt_len);
+ CU_ASSERT(odp_packet_headroom(pkt) >= headroom);
+ buf_index = find_buf(pkt, buf, num_buf, head_offset);
+ CU_ASSERT(buf_index < num_buf);
+ pkt_head = odp_packet_head(pkt);
+ hr = (uintptr_t)odp_packet_data(pkt) - (uintptr_t)pkt_head;
+ CU_ASSERT(hr == odp_packet_headroom(pkt));
+ CU_ASSERT((int)num_seg == odp_packet_num_segs(pkt));
+
+ seg = odp_packet_first_seg(pkt);
+ for (j = 0; j < num_seg; j++) {
+ seg_data[j] = odp_packet_seg_data(pkt, seg);
+ seg_len[j] = odp_packet_seg_data_len(pkt, seg);
+ seg = odp_packet_next_seg(pkt, seg);
+ }
+
+ CU_ASSERT(odp_packet_data(pkt) == seg_data[0]);
+ CU_ASSERT(odp_packet_seg_len(pkt) == seg_len[0])
+
+ /* Disassemble packet */
+ num_pkt_buf = odp_packet_disassemble(pkt, pkt_buf, num_seg);
+ CU_ASSERT_FATAL(num_pkt_buf == num_seg);
+
+ CU_ASSERT(odp_packet_buf_head(pkt_buf[0]) == pkt_head);
+ CU_ASSERT(odp_packet_buf_data_offset(pkt_buf[0]) == hr);
+
+ for (j = 0; j < num_seg; j++) {
+ CU_ASSERT(odp_packet_buf_size(pkt_buf[j]) == buf_data_size);
+
+ head = odp_packet_buf_head(pkt_buf[j]);
+ data_offset = odp_packet_buf_data_offset(pkt_buf[j]);
+ data = (uint8_t *)head + data_offset;
+ CU_ASSERT(seg_data[j] == data);
+ data_len = odp_packet_buf_data_len(pkt_buf[j]);
+ CU_ASSERT(seg_len[j] == data_len);
+
+ CU_ASSERT(odp_packet_buf_from_head(pool, head) == pkt_buf[j]);
+
+ /* Pull in head and tail by one byte */
+ odp_packet_buf_data_set(pkt_buf[j], data_offset + 1, data_len - 2);
+ CU_ASSERT(odp_packet_buf_data_offset(pkt_buf[j]) == data_offset + 1);
+ CU_ASSERT(odp_packet_buf_data_len(pkt_buf[j]) == data_len - 2);
+ }
+
+ /* Reassemble packet, each segment is now 2 bytes shorter */
+ pkt = odp_packet_reassemble(pool, pkt_buf, num_seg);
+
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_num_segs(pkt) == (int)num_seg);
+ pkt_tbl[i] = pkt;
+
+ CU_ASSERT(odp_packet_len(pkt) == (pkt_len - (num_seg * 2)));
+ }
+
+ num_alloc = i;
+ CU_ASSERT(num_alloc == num_pkt);
+
+ /* Pool is now empty */
+ CU_ASSERT(odp_packet_alloc(pool, pkt_len) == ODP_PACKET_INVALID);
+
+ for (i = 0; i < num_alloc; i++)
+ odp_packet_free(pkt_tbl[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+static int pool_suite_init(void)
+{
+ memset(&global_pool_capa, 0, sizeof(odp_pool_capability_t));
+ memset(&default_pool_param, 0, sizeof(odp_pool_param_t));
+
+ if (odp_pool_capability(&global_pool_capa) < 0) {
+ ODPH_ERR("odp_pool_capability() failed in suite init\n");
+ return -1;
+ }
+
+ odp_pool_param_init(&default_pool_param);
+
+ return 0;
+}
+
+static int pool_ext_suite_init(void)
+{
+ memset(&global_pool_ext_capa, 0, sizeof(odp_pool_ext_capability_t));
+
+ if (odp_pool_ext_capability(ODP_POOL_PACKET, &global_pool_ext_capa)) {
+ ODPH_ERR("Pool ext capa failed in suite init\n");
+ return -1;
+ }
+
+ if (global_pool_ext_capa.type != ODP_POOL_PACKET) {
+ ODPH_ERR("Bad type from pool ext capa in suite init\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int check_pool_ext_support(void)
+{
+ if (global_pool_ext_capa.max_pools == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int check_pool_ext_uarea_init_support(void)
+{
+ if (global_pool_ext_capa.max_pools == 0 || !global_pool_ext_capa.pkt.uarea_persistence ||
+ global_pool_ext_capa.pkt.max_uarea_size == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int check_pool_ext_segment_support(void)
+{
+ if (global_pool_ext_capa.max_pools == 0 || global_pool_ext_capa.pkt.max_segs_per_pkt < 2)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+odp_testinfo_t pool_suite[] = {
+ ODP_TEST_INFO(pool_test_param_init),
+ ODP_TEST_INFO(pool_test_create_destroy_buffer),
+ ODP_TEST_INFO(pool_test_create_destroy_packet),
+ ODP_TEST_INFO(pool_test_create_destroy_timeout),
+ ODP_TEST_INFO(pool_test_create_destroy_vector),
+ ODP_TEST_INFO_CONDITIONAL(pool_test_buffer_uarea_init, pool_check_buffer_uarea_init),
+ ODP_TEST_INFO_CONDITIONAL(pool_test_packet_uarea_init, pool_check_packet_uarea_init),
+ ODP_TEST_INFO_CONDITIONAL(pool_test_vector_uarea_init, pool_check_vector_uarea_init),
+ ODP_TEST_INFO_CONDITIONAL(pool_test_timeout_uarea_init, pool_check_timeout_uarea_init),
+ ODP_TEST_INFO(pool_test_lookup_info_print),
+ ODP_TEST_INFO(pool_test_same_name_buf),
+ ODP_TEST_INFO(pool_test_same_name_pkt),
+ ODP_TEST_INFO(pool_test_same_name_tmo),
+ ODP_TEST_INFO(pool_test_same_name_vec),
+ ODP_TEST_INFO(pool_test_alloc_buffer),
+ ODP_TEST_INFO(pool_test_alloc_buffer_min_cache),
+ ODP_TEST_INFO(pool_test_alloc_buffer_max_cache),
+ ODP_TEST_INFO(pool_test_alloc_packet_vector),
+ ODP_TEST_INFO(pool_test_alloc_packet_vector_min_cache),
+ ODP_TEST_INFO(pool_test_alloc_packet_vector_max_cache),
+ ODP_TEST_INFO(pool_test_alloc_packet),
+ ODP_TEST_INFO(pool_test_alloc_packet_min_cache),
+ ODP_TEST_INFO(pool_test_alloc_packet_max_cache),
+ ODP_TEST_INFO(pool_test_alloc_packet_subparam),
+ ODP_TEST_INFO(pool_test_alloc_timeout),
+ ODP_TEST_INFO(pool_test_alloc_timeout_min_cache),
+ ODP_TEST_INFO(pool_test_alloc_timeout_max_cache),
+ ODP_TEST_INFO(pool_test_info_packet),
+ ODP_TEST_INFO(pool_test_info_data_range),
+ ODP_TEST_INFO(pool_test_buf_max_num),
+ ODP_TEST_INFO(pool_test_pkt_max_num),
+ ODP_TEST_INFO(pool_test_packet_vector_max_num),
+ ODP_TEST_INFO(pool_test_pkt_seg_len),
+ ODP_TEST_INFO(pool_test_tmo_max_num),
+ ODP_TEST_INFO(pool_test_create_after_fork),
+ ODP_TEST_INFO(pool_test_pool_index),
+ ODP_TEST_INFO(pool_test_create_max_pkt_pools),
+ ODP_TEST_INFO_CONDITIONAL(pool_test_buffer_pool_statistics,
+ pool_check_buffer_pool_statistics),
+ ODP_TEST_INFO_CONDITIONAL(pool_test_packet_pool_statistics,
+ pool_check_packet_pool_statistics),
+ ODP_TEST_INFO_CONDITIONAL(pool_test_packet_vector_pool_statistics,
+ pool_check_packet_vector_pool_statistics),
+ ODP_TEST_INFO_CONDITIONAL(pool_test_timeout_pool_statistics,
+ pool_check_timeout_pool_statistics),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_testinfo_t pool_ext_suite[] = {
+ ODP_TEST_INFO(test_packet_pool_ext_capa),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_param_init, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_create, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_lookup, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_info, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_populate, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_alloc, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_uarea_init,
+ check_pool_ext_uarea_init_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_alloc_max, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_alloc_seg, check_pool_ext_segment_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_disassemble, check_pool_ext_segment_support),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t pool_suites[] = {
+ { .name = "Pool tests",
+ .testinfo_tbl = pool_suite,
+ .init_func = pool_suite_init,
+ },
+ { .name = "Ext mem pool tests",
+ .testinfo_tbl = pool_ext_suite,
+ .init_func = pool_ext_suite_init,
+ },
+ ODP_SUITE_INFO_NULL,
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(pool_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/queue/.gitignore b/test/validation/api/queue/.gitignore
index 469506a13..469506a13 100644
--- a/test/common_plat/validation/api/queue/.gitignore
+++ b/test/validation/api/queue/.gitignore
diff --git a/test/validation/api/queue/Makefile.am b/test/validation/api/queue/Makefile.am
new file mode 100644
index 000000000..94a6b28a9
--- /dev/null
+++ b/test/validation/api/queue/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = queue_main
+queue_main_SOURCES = queue.c
diff --git a/test/validation/api/queue/queue.c b/test/validation/api/queue/queue.c
new file mode 100644
index 000000000..4b5ccde65
--- /dev/null
+++ b/test/validation/api/queue/queue.c
@@ -0,0 +1,1176 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include <odp_cunit_common.h>
+
+#define MAX_WORKERS 32
+#define BURST_SIZE (8)
+#define MAX_NUM_EVENT (1 * 1024)
+#define MAX_ITERATION (100)
+#define MAX_QUEUES (64 * 1024)
+#define GLOBALS_NAME "queue_test_globals"
+#define DEQ_RETRIES 100
+#define ENQ_RETRIES 100
+
+typedef struct {
+ int num_workers;
+ odp_barrier_t barrier;
+ odp_queue_t queue;
+ odp_atomic_u32_t num_event;
+
+ struct {
+ odp_queue_t queue_a;
+ odp_queue_t queue_b;
+ int passed_a;
+ int passed_b;
+ int burst;
+ odp_pool_t pool;
+ odp_barrier_t barrier;
+ odp_atomic_u32_t counter;
+ } pair;
+
+ struct {
+ uint32_t num_event;
+ } thread[ODP_THREAD_COUNT_MAX];
+
+} test_globals_t;
+
+static int queue_context = 0xff;
+static odp_pool_t pool;
+
+static void generate_name(char *name, uint32_t index)
+{
+ /* Uniqueue name for up to 300M queues */
+ name[0] = 'A' + ((index / (26 * 26 * 26 * 26 * 26)) % 26);
+ name[1] = 'A' + ((index / (26 * 26 * 26 * 26)) % 26);
+ name[2] = 'A' + ((index / (26 * 26 * 26)) % 26);
+ name[3] = 'A' + ((index / (26 * 26)) % 26);
+ name[4] = 'A' + ((index / 26) % 26);
+ name[5] = 'A' + (index % 26);
+}
+
+static int queue_suite_init(void)
+{
+ odp_shm_t shm;
+ test_globals_t *globals;
+ odp_pool_param_t params;
+ int num_workers;
+
+ shm = odp_shm_reserve(GLOBALS_NAME, sizeof(test_globals_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Shared memory reserve failed\n");
+ return -1;
+ }
+
+ globals = odp_shm_addr(shm);
+ memset(globals, 0, sizeof(test_globals_t));
+
+ num_workers = odp_cpumask_default_worker(NULL, 0);
+
+ if (num_workers > MAX_WORKERS)
+ num_workers = MAX_WORKERS;
+
+ globals->num_workers = num_workers;
+ odp_barrier_init(&globals->barrier, num_workers);
+
+ odp_pool_param_init(&params);
+
+ params.buf.size = 4;
+ /* Allocate enough buffers taking into consideration core starvation
+ * due to caching */
+ params.buf.num = MAX_NUM_EVENT + params.buf.cache_size;
+ params.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("msg_pool", &params);
+
+ if (ODP_POOL_INVALID == pool) {
+ ODPH_ERR("Pool create failed\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int queue_suite_term(void)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_lookup(GLOBALS_NAME);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("SHM lookup failed\n");
+ return -1;
+ }
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("SHM free failed\n");
+ return -1;
+ }
+
+ if (odp_pool_destroy(pool)) {
+ ODPH_ERR("Pool destroy failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void queue_test_capa(void)
+{
+ odp_queue_capability_t capa;
+
+ memset(&capa, 0, sizeof(odp_queue_capability_t));
+ CU_ASSERT_FATAL(odp_queue_capability(&capa) == 0);
+
+ CU_ASSERT(capa.max_queues > 0);
+ CU_ASSERT(capa.max_queues >= capa.plain.max_num);
+ CU_ASSERT(capa.max_queues >= capa.plain.lockfree.max_num);
+ CU_ASSERT(capa.max_queues >= capa.plain.waitfree.max_num);
+}
+
+static void test_defaults(uint8_t fill)
+{
+ odp_queue_param_t param;
+
+ memset(&param, fill, sizeof(param));
+ odp_queue_param_init(&param);
+ CU_ASSERT(param.type == ODP_QUEUE_TYPE_PLAIN);
+ CU_ASSERT(param.enq_mode == ODP_QUEUE_OP_MT);
+ CU_ASSERT(param.deq_mode == ODP_QUEUE_OP_MT);
+ CU_ASSERT(param.sched.prio == odp_schedule_default_prio());
+ CU_ASSERT(param.sched.sync == ODP_SCHED_SYNC_PARALLEL);
+ CU_ASSERT(param.sched.group == ODP_SCHED_GROUP_ALL);
+ CU_ASSERT(param.sched.lock_count == 0);
+ CU_ASSERT(param.order == ODP_QUEUE_ORDER_KEEP);
+ CU_ASSERT(param.nonblocking == ODP_BLOCKING);
+ CU_ASSERT(param.context == NULL);
+ CU_ASSERT(param.context_len == 0);
+ CU_ASSERT(param.size == 0);
+}
+
+static void queue_test_param_init(void)
+{
+ test_defaults(0);
+ test_defaults(0xff);
+}
+
+static void queue_test_max_plain(void)
+{
+ odp_queue_capability_t capa;
+ odp_queue_param_t qparams;
+ char name[ODP_QUEUE_NAME_LEN];
+ odp_queue_t queue[MAX_QUEUES];
+ uint32_t num_queues, min, i;
+
+ memset(&capa, 0, sizeof(odp_queue_capability_t));
+ CU_ASSERT(odp_queue_capability(&capa) == 0);
+
+ CU_ASSERT(capa.max_queues != 0);
+ CU_ASSERT(capa.plain.max_num != 0);
+
+ min = capa.plain.max_num;
+
+ CU_ASSERT(capa.max_queues >= min);
+
+ for (i = 0; i < ODP_QUEUE_NAME_LEN; i++)
+ name[i] = 'A' + (i % 26);
+
+ name[ODP_QUEUE_NAME_LEN - 1] = 0;
+
+ odp_queue_param_init(&qparams);
+ CU_ASSERT(qparams.nonblocking == ODP_BLOCKING);
+
+ num_queues = capa.plain.max_num;
+
+ if (num_queues > MAX_QUEUES)
+ num_queues = MAX_QUEUES;
+
+ for (i = 0; i < num_queues; i++) {
+ generate_name(name, i);
+ queue[i] = odp_queue_create(name, &qparams);
+
+ if (queue[i] == ODP_QUEUE_INVALID) {
+ CU_FAIL("Queue create failed");
+ num_queues = i;
+ break;
+ }
+
+ CU_ASSERT(odp_queue_lookup(name) != ODP_QUEUE_INVALID);
+ }
+
+ for (i = 0; i < num_queues; i++)
+ CU_ASSERT(odp_queue_destroy(queue[i]) == 0);
+}
+
+static int queue_create_multi(const char *name[], const odp_queue_param_t param[],
+ odp_bool_t share_param, odp_queue_t queue[], uint32_t num)
+{
+ const uint32_t max_retries = 100;
+ uint32_t num_created = 0;
+ uint32_t num_retries = 0;
+
+ do {
+ const char **cur_name = name != NULL ? &name[num_created] : NULL;
+ const odp_queue_param_t *cur_param = share_param ? &param[0] : &param[num_created];
+ int ret = odp_queue_create_multi(cur_name, cur_param, share_param,
+ &queue[num_created], num - num_created);
+ if (ret < 0) {
+ CU_FAIL("Queue create multi failed");
+ break;
+ }
+ CU_ASSERT_FATAL((uint32_t)ret <= num - num_created);
+
+ num_retries = ret == 0 ? num_retries + 1 : 0;
+ num_created += ret;
+ } while (num_created < num && num_retries < max_retries);
+
+ return num_created;
+}
+
+static void queue_destroy_multi(odp_queue_t queue[], uint32_t num)
+{
+ uint32_t num_left = num;
+ uint32_t num_freed = 0;
+
+ while (num_left) {
+ int ret = odp_queue_destroy_multi(&queue[num_freed], num_left);
+
+ CU_ASSERT_FATAL(ret > 0 && (uint32_t)ret <= num_left);
+
+ num_left -= ret;
+ num_freed += ret;
+ }
+ CU_ASSERT_FATAL(num_freed == num);
+}
+
+static void queue_test_create_destroy_multi(void)
+{
+ odp_queue_capability_t capa;
+ odp_queue_param_t param_single;
+ odp_queue_param_t param[MAX_QUEUES];
+ odp_queue_t queue[MAX_QUEUES];
+ const char *name[MAX_QUEUES] = {NULL, "aaa", NULL, "bbb", "ccc", NULL, "ddd"};
+ uint32_t num_queues, num_created;
+
+ CU_ASSERT_FATAL(odp_queue_capability(&capa) == 0);
+ CU_ASSERT_FATAL(capa.plain.max_num != 0);
+
+ num_queues = capa.plain.max_num < MAX_QUEUES ? capa.plain.max_num : MAX_QUEUES;
+ for (uint32_t i = 0; i < num_queues; i++)
+ odp_queue_param_init(&param[i]);
+ odp_queue_param_init(&param_single);
+
+ /* Create queues using shared parameters */
+ num_created = queue_create_multi(name, &param_single, true, queue, num_queues);
+ CU_ASSERT(num_created == num_queues);
+ queue_destroy_multi(queue, num_created);
+
+ num_created = queue_create_multi(NULL, &param_single, true, queue, num_queues);
+ CU_ASSERT(num_created == num_queues);
+ queue_destroy_multi(queue, num_created);
+
+ /* Use separate parameters for each queue */
+ num_created = queue_create_multi(name, param, false, queue, num_queues);
+ CU_ASSERT(num_created == num_queues);
+ queue_destroy_multi(queue, num_created);
+
+ num_created = queue_create_multi(NULL, param, false, queue, num_queues);
+ CU_ASSERT(num_created == num_queues);
+ queue_destroy_multi(queue, num_created);
+}
+
+static void queue_test_mode(void)
+{
+ odp_queue_param_t qparams;
+ odp_queue_t queue;
+ int i, j;
+ odp_queue_op_mode_t mode[3] = { ODP_QUEUE_OP_MT,
+ ODP_QUEUE_OP_MT_UNSAFE,
+ ODP_QUEUE_OP_DISABLED };
+
+ odp_queue_param_init(&qparams);
+
+ /* Plain queue modes */
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 3; j++) {
+ /* Should not disable both enq and deq */
+ if (i == 2 && j == 2)
+ break;
+
+ qparams.enq_mode = mode[i];
+ qparams.deq_mode = mode[j];
+ queue = odp_queue_create("test_queue", &qparams);
+ CU_ASSERT(queue != ODP_QUEUE_INVALID);
+ if (queue != ODP_QUEUE_INVALID)
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+ }
+ }
+
+ odp_queue_param_init(&qparams);
+ qparams.type = ODP_QUEUE_TYPE_SCHED;
+
+ /* Scheduled queue modes. Dequeue mode is fixed. */
+ for (i = 0; i < 3; i++) {
+ qparams.enq_mode = mode[i];
+ queue = odp_queue_create("test_queue", &qparams);
+ CU_ASSERT(queue != ODP_QUEUE_INVALID);
+ if (queue != ODP_QUEUE_INVALID)
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+ }
+}
+
+static odp_event_t dequeue_event(odp_queue_t queue)
+{
+ odp_event_t ev;
+ int i;
+
+ for (i = 0; i < MAX_ITERATION; i++) {
+ ev = odp_queue_deq(queue);
+ if (ev != ODP_EVENT_INVALID)
+ break;
+ }
+
+ return ev;
+}
+
+static void test_burst(odp_nonblocking_t nonblocking,
+ odp_queue_op_mode_t enq_mode,
+ odp_queue_op_mode_t deq_mode)
+{
+ odp_queue_param_t param;
+ odp_queue_t queue;
+ odp_queue_capability_t capa;
+ uint32_t max_burst, burst, i, j;
+ odp_pool_t pool;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ uint32_t *data;
+
+ CU_ASSERT_FATAL(odp_queue_capability(&capa) == 0);
+
+ max_burst = capa.plain.max_size;
+
+ if (nonblocking == ODP_NONBLOCKING_LF) {
+ if (capa.plain.lockfree.max_num == 0) {
+ printf(" NO LOCKFREE QUEUES. Test skipped.\n");
+ return;
+ }
+
+ max_burst = capa.plain.lockfree.max_size;
+ }
+
+ if (max_burst == 0 || max_burst > MAX_NUM_EVENT)
+ max_burst = MAX_NUM_EVENT;
+
+ pool = odp_pool_lookup("msg_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_queue_param_init(&param);
+ param.type = ODP_QUEUE_TYPE_PLAIN;
+ param.nonblocking = nonblocking;
+ param.size = max_burst;
+ param.enq_mode = enq_mode;
+ param.deq_mode = deq_mode;
+
+ queue = odp_queue_create("burst test", &param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ CU_ASSERT(odp_queue_deq(queue) == ODP_EVENT_INVALID);
+
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ ev = odp_buffer_to_event(buf);
+ CU_ASSERT(odp_queue_enq(queue, ev) == 0);
+ ev = dequeue_event(queue);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ burst = max_burst / 4;
+ else
+ burst = max_burst;
+
+ for (i = 0; i < burst; i++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ data = odp_buffer_addr(buf);
+ *data = i;
+ ev = odp_buffer_to_event(buf);
+ CU_ASSERT(odp_queue_enq(queue, ev) == 0);
+ }
+
+ for (i = 0; i < burst; i++) {
+ ev = dequeue_event(queue);
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+ if (ev != ODP_EVENT_INVALID) {
+ buf = odp_buffer_from_event(ev);
+ data = odp_buffer_addr(buf);
+ CU_ASSERT(*data == i);
+ odp_event_free(ev);
+ }
+ }
+ }
+
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+}
+
+static void queue_test_burst(void)
+{
+ test_burst(ODP_BLOCKING, ODP_QUEUE_OP_MT, ODP_QUEUE_OP_MT);
+}
+
+static void queue_test_burst_spmc(void)
+{
+ test_burst(ODP_BLOCKING, ODP_QUEUE_OP_MT_UNSAFE, ODP_QUEUE_OP_MT);
+}
+
+static void queue_test_burst_mpsc(void)
+{
+ test_burst(ODP_BLOCKING, ODP_QUEUE_OP_MT, ODP_QUEUE_OP_MT_UNSAFE);
+}
+
+static void queue_test_burst_spsc(void)
+{
+ test_burst(ODP_BLOCKING, ODP_QUEUE_OP_MT_UNSAFE,
+ ODP_QUEUE_OP_MT_UNSAFE);
+}
+
+static void queue_test_burst_lf(void)
+{
+ test_burst(ODP_NONBLOCKING_LF, ODP_QUEUE_OP_MT, ODP_QUEUE_OP_MT);
+}
+
+static void queue_test_burst_lf_spmc(void)
+{
+ test_burst(ODP_NONBLOCKING_LF, ODP_QUEUE_OP_MT_UNSAFE, ODP_QUEUE_OP_MT);
+}
+
+static void queue_test_burst_lf_mpsc(void)
+{
+ test_burst(ODP_NONBLOCKING_LF, ODP_QUEUE_OP_MT, ODP_QUEUE_OP_MT_UNSAFE);
+}
+
+static void queue_test_burst_lf_spsc(void)
+{
+ test_burst(ODP_NONBLOCKING_LF, ODP_QUEUE_OP_MT_UNSAFE,
+ ODP_QUEUE_OP_MT_UNSAFE);
+}
+
+static int queue_pair_work_loop(void *arg)
+{
+ uint32_t i, events, burst, retry, max_retry;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ uint32_t *data;
+ odp_queue_t src_queue, dst_queue;
+ odp_pool_t pool;
+ int passed;
+ int thread_a;
+ test_globals_t *globals = arg;
+
+ burst = globals->pair.burst;
+ pool = globals->pair.pool;
+
+ /* Select which thread is A */
+ thread_a = odp_atomic_fetch_inc_u32(&globals->pair.counter);
+
+ if (thread_a) {
+ src_queue = globals->pair.queue_a;
+ dst_queue = globals->pair.queue_b;
+ } else {
+ src_queue = globals->pair.queue_b;
+ dst_queue = globals->pair.queue_a;
+ }
+
+ for (i = 0; i < burst; i++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT(buf != ODP_BUFFER_INVALID);
+
+ if (buf == ODP_BUFFER_INVALID)
+ return -1;
+
+ data = odp_buffer_addr(buf);
+ *data = i;
+ ev = odp_buffer_to_event(buf);
+ CU_ASSERT(odp_queue_enq(dst_queue, ev) == 0);
+ }
+
+ /* Wait until both threads are ready */
+ odp_barrier_wait(&globals->pair.barrier);
+ events = 0;
+ retry = 0;
+ max_retry = 0;
+ i = 0;
+ while (events < 10000 && retry < 300) {
+ ev = odp_queue_deq(src_queue);
+ if (ev == ODP_EVENT_INVALID) {
+ retry++;
+ /* Slow down polling period after 100 retries. This
+ * gives time for the other thread to answer, if it
+ * was e.g. interrupted by the OS. We give up if
+ * the source queue stays empty for about 100ms. */
+ if (retry > 200)
+ odp_time_wait_ns(ODP_TIME_MSEC_IN_NS);
+ else if (retry > 100)
+ odp_time_wait_ns(ODP_TIME_USEC_IN_NS);
+
+ if (retry > max_retry)
+ max_retry = retry;
+
+ continue;
+ }
+
+ events++;
+ retry = 0;
+ buf = odp_buffer_from_event(ev);
+ data = odp_buffer_addr(buf);
+ if (*data != i) {
+ ODPH_ERR("Seq error: expected %u, recv %u\n", i, *data);
+ CU_FAIL("Sequence number error");
+ }
+
+ i++;
+ if (i == burst)
+ i = 0;
+
+ CU_ASSERT(odp_queue_enq(dst_queue, ev) == 0);
+ }
+
+ passed = (events == 10000);
+
+ if (thread_a) {
+ globals->pair.passed_a = passed;
+ if (max_retry > 100)
+ printf("\n thread_a max_retry %u\n", max_retry);
+ } else {
+ globals->pair.passed_b = passed;
+ if (max_retry > 100)
+ printf("\n thread_b max_retry %u\n", max_retry);
+ }
+
+ return 0;
+}
+
+static void test_pair(odp_nonblocking_t nonblocking,
+ odp_queue_op_mode_t enq_mode,
+ odp_queue_op_mode_t deq_mode)
+{
+ odp_queue_param_t param;
+ odp_queue_t queue;
+ odp_queue_capability_t capa;
+ uint32_t max_burst, num;
+ odp_pool_t pool;
+ odp_event_t ev;
+ odp_shm_t shm;
+ test_globals_t *globals;
+ void *arg;
+
+ shm = odp_shm_lookup(GLOBALS_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ globals = odp_shm_addr(shm);
+
+ CU_ASSERT_FATAL(odp_queue_capability(&capa) == 0);
+
+ max_burst = 2 * BURST_SIZE;
+
+ if (nonblocking == ODP_NONBLOCKING_LF) {
+ if (capa.plain.lockfree.max_num == 0) {
+ printf(" NO LOCKFREE QUEUES. Test skipped.\n");
+ return;
+ }
+
+ if (capa.plain.lockfree.max_size &&
+ capa.plain.lockfree.max_size < max_burst)
+ max_burst = capa.plain.lockfree.max_size;
+ } else {
+ if (capa.plain.max_size && capa.plain.max_size < max_burst)
+ max_burst = capa.plain.max_size;
+ }
+
+ globals->pair.burst = max_burst / 2;
+
+ pool = odp_pool_lookup("msg_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ globals->pair.pool = pool;
+
+ odp_queue_param_init(&param);
+ param.type = ODP_QUEUE_TYPE_PLAIN;
+ param.nonblocking = nonblocking;
+ param.size = max_burst;
+ param.enq_mode = enq_mode;
+ param.deq_mode = deq_mode;
+
+ queue = odp_queue_create("queue_a", &param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+ globals->pair.queue_a = queue;
+ CU_ASSERT(odp_queue_deq(queue) == ODP_EVENT_INVALID);
+
+ queue = odp_queue_create("queue_b", &param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+ globals->pair.queue_b = queue;
+ CU_ASSERT(odp_queue_deq(queue) == ODP_EVENT_INVALID);
+
+ odp_barrier_init(&globals->pair.barrier, 2);
+ globals->pair.passed_a = 0;
+ globals->pair.passed_b = 0;
+ odp_atomic_init_u32(&globals->pair.counter, 0);
+
+ /* Create one worker thread */
+ arg = globals;
+ odp_cunit_thread_create(1, queue_pair_work_loop, &arg, 0, 0);
+
+ /* Run this thread as the second thread */
+ CU_ASSERT(queue_pair_work_loop(globals) == 0);
+
+ /* Wait worker to terminate */
+ odp_cunit_thread_join(1);
+
+ CU_ASSERT(globals->pair.passed_a);
+ CU_ASSERT(globals->pair.passed_b);
+
+ num = 0;
+
+ while ((ev = dequeue_event(globals->pair.queue_a))
+ != ODP_EVENT_INVALID) {
+ num++;
+ odp_event_free(ev);
+ }
+
+ while ((ev = dequeue_event(globals->pair.queue_b))
+ != ODP_EVENT_INVALID) {
+ num++;
+ odp_event_free(ev);
+ }
+
+ CU_ASSERT(num == max_burst);
+ CU_ASSERT(odp_queue_destroy(globals->pair.queue_a) == 0);
+ CU_ASSERT(odp_queue_destroy(globals->pair.queue_b) == 0);
+}
+
+static void queue_test_pair(void)
+{
+ test_pair(ODP_BLOCKING, ODP_QUEUE_OP_MT, ODP_QUEUE_OP_MT);
+}
+
+static void queue_test_pair_spmc(void)
+{
+ test_pair(ODP_BLOCKING, ODP_QUEUE_OP_MT_UNSAFE, ODP_QUEUE_OP_MT);
+}
+
+static void queue_test_pair_mpsc(void)
+{
+ test_pair(ODP_BLOCKING, ODP_QUEUE_OP_MT, ODP_QUEUE_OP_MT_UNSAFE);
+}
+
+static void queue_test_pair_spsc(void)
+{
+ test_pair(ODP_BLOCKING, ODP_QUEUE_OP_MT_UNSAFE, ODP_QUEUE_OP_MT_UNSAFE);
+}
+
+static void queue_test_pair_lf(void)
+{
+ test_pair(ODP_NONBLOCKING_LF, ODP_QUEUE_OP_MT, ODP_QUEUE_OP_MT);
+}
+
+static void queue_test_pair_lf_spmc(void)
+{
+ test_pair(ODP_NONBLOCKING_LF, ODP_QUEUE_OP_MT_UNSAFE, ODP_QUEUE_OP_MT);
+}
+
+static void queue_test_pair_lf_mpsc(void)
+{
+ test_pair(ODP_NONBLOCKING_LF, ODP_QUEUE_OP_MT, ODP_QUEUE_OP_MT_UNSAFE);
+}
+
+static void queue_test_pair_lf_spsc(void)
+{
+ test_pair(ODP_NONBLOCKING_LF, ODP_QUEUE_OP_MT_UNSAFE,
+ ODP_QUEUE_OP_MT_UNSAFE);
+}
+
+static void queue_test_param(void)
+{
+ odp_queue_t queue, null_queue;
+ odp_event_t enev[BURST_SIZE] = {ODP_EVENT_INVALID};
+ odp_event_t deev[BURST_SIZE] = {ODP_EVENT_INVALID};
+ odp_buffer_t buf;
+ odp_event_t ev;
+ odp_pool_t msg_pool;
+ odp_event_t *pev_tmp;
+ int i, deq_ret, ret;
+ int nr_deq_entries = 0;
+ int max_iteration = MAX_ITERATION;
+ odp_queue_param_t qparams;
+ odp_buffer_t enbuf;
+
+ odp_queue_param_init(&qparams);
+
+ /* Schedule type queue */
+ qparams.type = ODP_QUEUE_TYPE_SCHED;
+ qparams.sched.prio = odp_schedule_min_prio();
+ qparams.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qparams.sched.group = ODP_SCHED_GROUP_WORKER;
+
+ queue = odp_queue_create("test_queue", &qparams);
+ CU_ASSERT_FATAL(ODP_QUEUE_INVALID != queue);
+ CU_ASSERT(odp_queue_to_u64(queue) !=
+ odp_queue_to_u64(ODP_QUEUE_INVALID));
+ CU_ASSERT(queue == odp_queue_lookup("test_queue"));
+ CU_ASSERT(ODP_QUEUE_TYPE_SCHED == odp_queue_type(queue));
+ CU_ASSERT(odp_schedule_min_prio() == odp_queue_sched_prio(queue));
+ CU_ASSERT(ODP_SCHED_SYNC_PARALLEL == odp_queue_sched_type(queue));
+ CU_ASSERT(ODP_SCHED_GROUP_WORKER == odp_queue_sched_group(queue));
+
+ CU_ASSERT(odp_queue_context(queue) == NULL);
+ CU_ASSERT(0 == odp_queue_context_set(queue, &queue_context,
+ sizeof(queue_context)));
+
+ CU_ASSERT(&queue_context == odp_queue_context(queue));
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+
+ /* Create queue with no name */
+ odp_queue_param_init(&qparams);
+ null_queue = odp_queue_create(NULL, &qparams);
+ CU_ASSERT_FATAL(ODP_QUEUE_INVALID != null_queue);
+ CU_ASSERT(odp_queue_context(null_queue) == NULL);
+
+ /* Plain type queue */
+ odp_queue_param_init(&qparams);
+ qparams.type = ODP_QUEUE_TYPE_PLAIN;
+ qparams.context = &queue_context;
+ qparams.context_len = sizeof(queue_context);
+
+ queue = odp_queue_create("test_queue", &qparams);
+ CU_ASSERT_FATAL(ODP_QUEUE_INVALID != queue);
+ CU_ASSERT(queue == odp_queue_lookup("test_queue"));
+ CU_ASSERT(ODP_QUEUE_TYPE_PLAIN == odp_queue_type(queue));
+ CU_ASSERT(&queue_context == odp_queue_context(queue));
+
+ /* Destroy queue with no name */
+ CU_ASSERT(odp_queue_destroy(null_queue) == 0);
+
+ msg_pool = odp_pool_lookup("msg_pool");
+ buf = odp_buffer_alloc(msg_pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ ev = odp_buffer_to_event(buf);
+
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT(ret == 0);
+ if (ret) {
+ odp_buffer_free(buf);
+ } else {
+ CU_ASSERT(ev == odp_queue_deq(queue));
+ odp_buffer_free(buf);
+ }
+
+ for (i = 0; i < BURST_SIZE; i++) {
+ buf = odp_buffer_alloc(msg_pool);
+ enev[i] = odp_buffer_to_event(buf);
+ }
+
+ /*
+ * odp_queue_enq_multi may return 0..n buffers due to the resource
+ * constraints in the implementation at that given point of time.
+ * But here we assume that we succeed in enqueuing all buffers.
+ */
+ ret = odp_queue_enq_multi(queue, enev, BURST_SIZE);
+ CU_ASSERT(BURST_SIZE == ret);
+ i = ret < 0 ? 0 : ret;
+ for ( ; i < BURST_SIZE; i++)
+ odp_event_free(enev[i]);
+
+ pev_tmp = deev;
+ do {
+ deq_ret = odp_queue_deq_multi(queue, pev_tmp, BURST_SIZE);
+ nr_deq_entries += deq_ret;
+ max_iteration--;
+ pev_tmp += deq_ret;
+ CU_ASSERT(max_iteration >= 0);
+ } while (nr_deq_entries < BURST_SIZE);
+
+ for (i = 0; i < BURST_SIZE; i++) {
+ enbuf = odp_buffer_from_event(enev[i]);
+ CU_ASSERT(enev[i] == deev[i]);
+ odp_buffer_free(enbuf);
+ }
+
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+}
+
+static void queue_test_same_name(int sched)
+{
+ odp_queue_t queue, queue_a, queue_b;
+ odp_queue_param_t param;
+ const char *name = "same_name";
+
+ odp_queue_param_init(&param);
+
+ if (sched)
+ param.type = ODP_QUEUE_TYPE_SCHED;
+
+ queue_a = odp_queue_create(name, &param);
+ CU_ASSERT_FATAL(queue_a != ODP_QUEUE_INVALID);
+
+ queue = odp_queue_lookup(name);
+ CU_ASSERT(queue == queue_a);
+
+ /* Second queue with the same name */
+ queue_b = odp_queue_create(name, &param);
+ CU_ASSERT_FATAL(queue_b != ODP_QUEUE_INVALID);
+
+ queue = odp_queue_lookup(name);
+ CU_ASSERT(queue == queue_a || queue == queue_b);
+
+ CU_ASSERT_FATAL(odp_queue_destroy(queue_a) == 0);
+ CU_ASSERT_FATAL(odp_queue_destroy(queue_b) == 0);
+}
+
+static void queue_test_same_name_plain(void)
+{
+ queue_test_same_name(0);
+}
+
+static void queue_test_same_name_sched(void)
+{
+ queue_test_same_name(1);
+}
+
+static void queue_test_info(void)
+{
+ odp_queue_t q_plain, q_order;
+ const char *const nq_plain = "test_q_plain";
+ const char *const nq_order = "test_q_order";
+ odp_queue_info_t info;
+ odp_queue_param_t param;
+ odp_queue_capability_t capability;
+ odp_schedule_capability_t sched_capa;
+ char q_plain_ctx[] = "test_q_plain context data";
+ char q_order_ctx[] = "test_q_order context data";
+ uint32_t lock_count;
+ char *ctx;
+ uint32_t ret;
+
+ /* Create a plain queue and set context */
+ q_plain = odp_queue_create(nq_plain, NULL);
+ CU_ASSERT_FATAL(ODP_QUEUE_INVALID != q_plain);
+ CU_ASSERT(odp_queue_context_set(q_plain, q_plain_ctx,
+ sizeof(q_plain_ctx)) == 0);
+
+ memset(&capability, 0, sizeof(odp_queue_capability_t));
+ CU_ASSERT(odp_queue_capability(&capability) == 0);
+ CU_ASSERT(odp_schedule_capability(&sched_capa) == 0);
+ /* Create a scheduled ordered queue with explicitly set params */
+ odp_queue_param_init(&param);
+ param.type = ODP_QUEUE_TYPE_SCHED;
+ param.sched.prio = odp_schedule_default_prio();
+ param.sched.sync = ODP_SCHED_SYNC_ORDERED;
+ param.sched.group = ODP_SCHED_GROUP_ALL;
+ param.sched.lock_count = sched_capa.max_ordered_locks;
+ if (param.sched.lock_count == 0)
+ printf("\n Ordered locks NOT supported\n");
+ param.context = q_order_ctx;
+ q_order = odp_queue_create(nq_order, &param);
+ CU_ASSERT_FATAL(ODP_QUEUE_INVALID != q_order);
+
+ /* Check info and call print for a plain queue */
+ CU_ASSERT(odp_queue_info(q_plain, &info) == 0);
+ CU_ASSERT(strcmp(nq_plain, info.name) == 0);
+ CU_ASSERT(info.param.type == ODP_QUEUE_TYPE_PLAIN);
+ CU_ASSERT(info.param.type == odp_queue_type(q_plain));
+ CU_ASSERT(info.param.enq_mode == ODP_QUEUE_OP_MT);
+ CU_ASSERT(info.param.deq_mode == ODP_QUEUE_OP_MT);
+ CU_ASSERT(info.param.order == ODP_QUEUE_ORDER_KEEP);
+ CU_ASSERT(info.param.nonblocking == ODP_BLOCKING);
+ ctx = info.param.context; /* 'char' context ptr */
+ CU_ASSERT(ctx == q_plain_ctx);
+ CU_ASSERT(info.param.context == odp_queue_context(q_plain));
+ odp_queue_print(q_plain);
+
+ /* Check info and call print for a scheduled ordered queue */
+ CU_ASSERT(odp_queue_info(q_order, &info) == 0);
+ CU_ASSERT(strcmp(nq_order, info.name) == 0);
+ CU_ASSERT(info.param.type == ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT(info.param.type == odp_queue_type(q_order));
+ CU_ASSERT(info.param.enq_mode == ODP_QUEUE_OP_MT);
+ CU_ASSERT(info.param.deq_mode == ODP_QUEUE_OP_DISABLED);
+ CU_ASSERT(info.param.order == ODP_QUEUE_ORDER_KEEP);
+ CU_ASSERT(info.param.nonblocking == ODP_BLOCKING);
+ ctx = info.param.context; /* 'char' context ptr */
+ CU_ASSERT(ctx == q_order_ctx);
+ CU_ASSERT(info.param.context == odp_queue_context(q_order));
+ CU_ASSERT(info.param.sched.prio == odp_queue_sched_prio(q_order));
+ CU_ASSERT(info.param.sched.sync == odp_queue_sched_type(q_order));
+ CU_ASSERT(info.param.sched.group == odp_queue_sched_group(q_order));
+ ret = odp_queue_lock_count(q_order);
+ CU_ASSERT(ret == param.sched.lock_count);
+ lock_count = ret;
+ CU_ASSERT(info.param.sched.lock_count == lock_count);
+ odp_queue_print(q_order);
+
+ odp_queue_print_all();
+
+ CU_ASSERT(odp_queue_destroy(q_plain) == 0);
+ CU_ASSERT(odp_queue_destroy(q_order) == 0);
+}
+
+static uint32_t alloc_and_enqueue(odp_queue_t queue, odp_pool_t pool,
+ uint32_t num)
+{
+ uint32_t i, ret;
+ odp_buffer_t buf;
+ odp_event_t ev;
+
+ for (i = 0; i < num; i++) {
+ buf = odp_buffer_alloc(pool);
+
+ CU_ASSERT(buf != ODP_BUFFER_INVALID);
+
+ ev = odp_buffer_to_event(buf);
+
+ ret = odp_queue_enq(queue, ev);
+
+ CU_ASSERT(ret == 0);
+
+ if (ret)
+ break;
+ }
+
+ return i;
+}
+
+static uint32_t dequeue_and_free_all(odp_queue_t queue)
+{
+ odp_event_t ev;
+ uint32_t num, retries;
+
+ num = 0;
+ retries = 0;
+
+ while (1) {
+ ev = odp_queue_deq(queue);
+
+ if (ev == ODP_EVENT_INVALID) {
+ if (retries >= DEQ_RETRIES)
+ return num;
+
+ retries++;
+ continue;
+ }
+
+ retries = 0;
+ num++;
+
+ odp_event_free(ev);
+ }
+
+ return num;
+}
+
+static int enqueue_with_retry(odp_queue_t queue, odp_event_t ev)
+{
+ int i;
+
+ for (i = 0; i < ENQ_RETRIES; i++)
+ if (odp_queue_enq(queue, ev) == 0)
+ return 0;
+
+ return -1;
+}
+
+static int queue_test_worker(void *arg)
+{
+ uint32_t num, retries, num_workers;
+ int thr_id, ret;
+ odp_event_t ev;
+ odp_queue_t queue;
+ test_globals_t *globals = arg;
+
+ thr_id = odp_thread_id();
+ queue = globals->queue;
+ num_workers = globals->num_workers;
+
+ if (num_workers > 1)
+ odp_barrier_wait(&globals->barrier);
+
+ retries = 0;
+ num = odp_atomic_fetch_inc_u32(&globals->num_event);
+
+ /* On average, each worker deq-enq each event once */
+ while (num < (num_workers * MAX_NUM_EVENT)) {
+ ev = odp_queue_deq(queue);
+
+ if (ev == ODP_EVENT_INVALID) {
+ if (retries < DEQ_RETRIES) {
+ retries++;
+ continue;
+ }
+
+ /* Prevent thread to starve */
+ num = odp_atomic_fetch_inc_u32(&globals->num_event);
+ retries = 0;
+ continue;
+ }
+
+ globals->thread[thr_id].num_event++;
+
+ ret = enqueue_with_retry(queue, ev);
+
+ CU_ASSERT(ret == 0);
+
+ num = odp_atomic_fetch_inc_u32(&globals->num_event);
+ }
+
+ return 0;
+}
+
+static void reset_thread_stat(test_globals_t *globals)
+{
+ int i;
+
+ odp_atomic_init_u32(&globals->num_event, 0);
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ globals->thread[i].num_event = 0;
+}
+
+static void multithread_test(odp_nonblocking_t nonblocking)
+{
+ odp_shm_t shm;
+ test_globals_t *globals;
+ odp_queue_t queue;
+ odp_queue_param_t qparams;
+ odp_queue_capability_t capa;
+ uint32_t queue_size, max_size;
+ uint32_t num, sum, num_free, i;
+ int num_workers;
+ void *arg;
+
+ CU_ASSERT(odp_queue_capability(&capa) == 0);
+
+ queue_size = 2 * MAX_NUM_EVENT;
+
+ max_size = capa.plain.max_size;
+
+ if (nonblocking == ODP_NONBLOCKING_LF) {
+ if (capa.plain.lockfree.max_num == 0) {
+ printf(" NO LOCKFREE QUEUES. Test skipped.\n");
+ return;
+ }
+
+ max_size = capa.plain.lockfree.max_size;
+ }
+
+ if (max_size && queue_size > max_size)
+ queue_size = max_size;
+
+ num = MAX_NUM_EVENT;
+
+ if (num > queue_size)
+ num = queue_size / 2;
+
+ shm = odp_shm_lookup(GLOBALS_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ globals = odp_shm_addr(shm);
+ num_workers = globals->num_workers;
+
+ odp_queue_param_init(&qparams);
+ qparams.type = ODP_QUEUE_TYPE_PLAIN;
+ qparams.size = queue_size;
+ qparams.nonblocking = nonblocking;
+
+ queue = odp_queue_create("queue_test_mt", &qparams);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ globals->queue = queue;
+ reset_thread_stat(globals);
+
+ CU_ASSERT(alloc_and_enqueue(queue, pool, num) == num);
+
+ arg = globals;
+ odp_cunit_thread_create(num_workers, queue_test_worker, &arg, 0, 0);
+
+ /* Wait for worker threads to terminate */
+ odp_cunit_thread_join(num_workers);
+
+ sum = 0;
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ sum += globals->thread[i].num_event;
+
+ CU_ASSERT(sum != 0);
+
+ num_free = dequeue_and_free_all(queue);
+
+ CU_ASSERT(num_free == num);
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+}
+
+static void queue_test_mt_plain_block(void)
+{
+ multithread_test(ODP_BLOCKING);
+}
+
+static void queue_test_mt_plain_nonblock_lf(void)
+{
+ multithread_test(ODP_NONBLOCKING_LF);
+}
+
+odp_testinfo_t queue_suite[] = {
+ ODP_TEST_INFO(queue_test_capa),
+ ODP_TEST_INFO(queue_test_param_init),
+ ODP_TEST_INFO(queue_test_mode),
+ ODP_TEST_INFO(queue_test_max_plain),
+ ODP_TEST_INFO(queue_test_create_destroy_multi),
+ ODP_TEST_INFO(queue_test_burst),
+ ODP_TEST_INFO(queue_test_burst_spmc),
+ ODP_TEST_INFO(queue_test_burst_mpsc),
+ ODP_TEST_INFO(queue_test_burst_spsc),
+ ODP_TEST_INFO(queue_test_burst_lf),
+ ODP_TEST_INFO(queue_test_burst_lf_spmc),
+ ODP_TEST_INFO(queue_test_burst_lf_mpsc),
+ ODP_TEST_INFO(queue_test_burst_lf_spsc),
+ ODP_TEST_INFO(queue_test_pair),
+ ODP_TEST_INFO(queue_test_pair_spmc),
+ ODP_TEST_INFO(queue_test_pair_mpsc),
+ ODP_TEST_INFO(queue_test_pair_spsc),
+ ODP_TEST_INFO(queue_test_pair_lf),
+ ODP_TEST_INFO(queue_test_pair_lf_spmc),
+ ODP_TEST_INFO(queue_test_pair_lf_mpsc),
+ ODP_TEST_INFO(queue_test_pair_lf_spsc),
+ ODP_TEST_INFO(queue_test_param),
+ ODP_TEST_INFO(queue_test_same_name_plain),
+ ODP_TEST_INFO(queue_test_same_name_sched),
+ ODP_TEST_INFO(queue_test_info),
+ ODP_TEST_INFO(queue_test_mt_plain_block),
+ ODP_TEST_INFO(queue_test_mt_plain_nonblock_lf),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t queue_suites[] = {
+ {"Queue", queue_suite_init, queue_suite_term, queue_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(queue_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/random/.gitignore b/test/validation/api/random/.gitignore
index 2c88ec0b8..2c88ec0b8 100644
--- a/test/common_plat/validation/api/random/.gitignore
+++ b/test/validation/api/random/.gitignore
diff --git a/test/validation/api/random/Makefile.am b/test/validation/api/random/Makefile.am
new file mode 100644
index 000000000..743ecf1ff
--- /dev/null
+++ b/test/validation/api/random/Makefile.am
@@ -0,0 +1,5 @@
+include ../Makefile.inc
+
+test_PROGRAMS = random_main
+random_main_SOURCES = random.c
+LDADD += -lm
diff --git a/test/validation/api/random/random.c b/test/validation/api/random/random.c
new file mode 100644
index 000000000..551fe775d
--- /dev/null
+++ b/test/validation/api/random/random.c
@@ -0,0 +1,538 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+
+static void random_test_get_size(odp_random_kind_t kind)
+{
+ /* odp_random_data may fail to return data on every call (i.e. lack of
+ * entropy). Therefore loop with some sane loop timeout value. Note that
+ * it is not required for implementation to return data in the "timeout"
+ * amount of steps. Rather it is a way for preventing the test to loop
+ * forever.
+ * Also note that the timeout value here is chosen completely
+ * arbitrarily (although considered sane) and neither platforms or
+ * applications are not required to use it.
+ */
+ int32_t ret, timeout_ns = 1 * ODP_TIME_MSEC_IN_NS, sleep_ns = 100;
+ uint32_t bytes = 0;
+ uint8_t buf[32];
+
+ do {
+ ret = odp_random_data(buf + bytes, sizeof(buf) - bytes,
+ kind);
+ bytes += ret;
+ if (ret < 0 || bytes >= sizeof(buf))
+ break;
+ odp_time_wait_ns(sleep_ns);
+ timeout_ns -= sleep_ns;
+ } while (timeout_ns > 0);
+
+ CU_ASSERT(ret > 0);
+ CU_ASSERT(bytes == (int32_t)sizeof(buf));
+}
+
+static void random_test_get_size_basic(void)
+{
+ random_test_get_size(ODP_RANDOM_BASIC);
+}
+
+static void random_test_get_size_crypto(void)
+{
+ random_test_get_size(ODP_RANDOM_CRYPTO);
+}
+
+static void random_test_get_size_true(void)
+{
+ random_test_get_size(ODP_RANDOM_TRUE);
+}
+
+static void random_test_kind(void)
+{
+ int32_t rc;
+ uint8_t buf[4096];
+ uint32_t buf_size = sizeof(buf);
+ odp_random_kind_t max_kind = odp_random_max_kind();
+
+ rc = odp_random_data(buf, buf_size, max_kind);
+ CU_ASSERT(rc > 0);
+
+ switch (max_kind) {
+ case ODP_RANDOM_BASIC:
+ rc = odp_random_data(buf, 4, ODP_RANDOM_CRYPTO);
+ CU_ASSERT(rc < 0);
+ /* Fall through */
+
+ case ODP_RANDOM_CRYPTO:
+ rc = odp_random_data(buf, 4, ODP_RANDOM_TRUE);
+ CU_ASSERT(rc < 0);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void random_test_repeat(void)
+{
+ uint8_t buf1[1024];
+ uint8_t buf2[1024];
+ int32_t rc;
+ uint64_t seed1 = 12345897;
+ uint64_t seed2 = seed1;
+
+ rc = odp_random_test_data(buf1, sizeof(buf1), &seed1);
+ CU_ASSERT(rc == sizeof(buf1));
+
+ rc = odp_random_test_data(buf2, sizeof(buf2), &seed2);
+ CU_ASSERT(rc == sizeof(buf2));
+
+ CU_ASSERT(seed1 == seed2);
+ CU_ASSERT(memcmp(buf1, buf2, sizeof(buf1)) == 0);
+}
+
+static void random_data(uint8_t *buf, uint32_t len, odp_random_kind_t kind)
+{
+ static uint64_t seed;
+
+ switch (kind) {
+ case ODP_RANDOM_BASIC:
+ case ODP_RANDOM_CRYPTO:
+ case ODP_RANDOM_TRUE:
+ for (uint32_t i = 0; i < len;) {
+ int32_t r = odp_random_data(buf + i, len - i, kind);
+
+ CU_ASSERT_FATAL(r >= 0);
+ i += r;
+ }
+ break;
+ default:
+ CU_ASSERT_FATAL(odp_random_test_data(buf, len, &seed) ==
+ (int32_t)len);
+ }
+}
+
+static void random_test_align_and_overflow(odp_random_kind_t kind)
+{
+ uint8_t ODP_ALIGNED_CACHE buf[64];
+
+ for (int align = 8; align < 16; align++) {
+ for (int len = 1; len <= 16; len++) {
+ memset(buf, 1, sizeof(buf));
+ random_data(buf + align, len, kind);
+ CU_ASSERT(buf[align - 1] == 1);
+ CU_ASSERT(buf[align + len] == 1);
+ }
+ }
+}
+
+static void random_test_align_and_overflow_test(void)
+{
+ random_test_align_and_overflow(-1);
+}
+
+static void random_test_align_and_overflow_basic(void)
+{
+ random_test_align_and_overflow(ODP_RANDOM_BASIC);
+}
+
+static void random_test_align_and_overflow_crypto(void)
+{
+ random_test_align_and_overflow(ODP_RANDOM_CRYPTO);
+}
+
+static void random_test_align_and_overflow_true(void)
+{
+ random_test_align_and_overflow(ODP_RANDOM_TRUE);
+}
+
+/*
+ * Randomness tests
+ *
+ * The purpose of the following tests is to check that random data looks random.
+ * Some of the tests are based on [1].
+ *
+ * [1] Special Publication 800-22 revision 1a: A Statistical Test Suite for
+ * Random and Pseudorandom Number Generators for Cryptographic Applications
+ * National Institute of Standards and Technology (NIST), April 2010
+ * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-22r1a.pdf
+ */
+
+/*
+ * Alpha for P-value tests. This does not affect the tests that use a
+ * precomputed critical value.
+ */
+static const double alpha = 0.00000001;
+
+static uint32_t random_bits(int n, odp_random_kind_t kind)
+{
+ static uint8_t buf[32 * 1024];
+ const int size = sizeof(buf);
+ static int cur_n;
+ static odp_random_kind_t cur_kind;
+ static int bit;
+ uint32_t r = 0;
+
+ if (n != cur_n || kind != cur_kind) {
+ cur_n = n;
+ cur_kind = kind;
+ bit = size * 8;
+ }
+
+ for (int i = 0; i < n; ) {
+ if (bit >= size * 8) {
+ random_data(buf, size, kind);
+ bit = 0;
+ }
+ if (n - i >= 8 && !(bit & 7)) {
+ /* Full byte. */
+ r <<= 8;
+ r |= buf[bit / 8];
+ bit += 8;
+ i += 8;
+ continue;
+ }
+ /* Single bit. */
+ r <<= 1;
+ r |= (buf[bit / 8] >> (7 - (bit & 7))) & 1;
+ bit++;
+ i++;
+ }
+
+ return r;
+}
+
+static const char *res_str(int pass)
+{
+ return pass ? "pass" : "FAIL";
+}
+
+/*
+ * Pearson's chi-squared goodness-of-fit test for uniform distribution. The test
+ * is run with multiple different bit block lengths. The null hypothesis is that
+ * each possible bit pattern is equally likely. If the chi-squared statistic is
+ * equal to or larger than the critical value, we conclude that the data is
+ * biased.
+ */
+static void random_test_frequency(odp_random_kind_t kind)
+{
+ /* Mean number of hits per cell. */
+ const uint32_t expected = 50;
+
+ /* From LibreOffice CHISQ.INV.RT(0.00000001; df). */
+ const double critical[] = {
+ 32.8413, 40.1300, 50.8129, 68.0293,
+ 97.0285, 147.463, 237.614, 402.685,
+ 711.187, 1297.50, 2426.64, 4623.37,
+ 8929.74, 17419.3, 34224.0, 67587.1,
+ };
+
+ printf("\n\n");
+
+ for (int bits = 1; bits <= 8; bits++) {
+ const uint32_t cells = 1 << bits;
+ const uint64_t num = expected * cells;
+ uint64_t f[256] = { 0 };
+
+ for (uint64_t i = 0; i < num; i++)
+ f[random_bits(bits, kind)]++;
+
+ double chisq = 0, crit = critical[bits - 1];
+
+ for (uint64_t i = 0; i < cells; i++) {
+ double dif = (double)f[i] - expected;
+
+ chisq += dif * dif / expected;
+ }
+
+ printf("bits %d ; chisq %g ; df %u ; crit %g ; %s\n",
+ bits, chisq, cells - 1, crit, res_str(chisq < crit));
+
+ CU_ASSERT(chisq < crit);
+ }
+
+ printf("\n");
+}
+
+static void random_test_frequency_crypto(void)
+{
+ random_test_frequency(ODP_RANDOM_CRYPTO);
+}
+
+static void random_test_frequency_true(void)
+{
+ random_test_frequency(ODP_RANDOM_TRUE);
+}
+
+/*
+ * Pearson's chi-squared test for independence. The null hypothesis is that the
+ * values of different bytes are independent. If the chi-squared statistic is
+ * equal to or greater than the critical value, we conclude that the bytes in
+ * the byte pairs selected from the data are not independent.
+ */
+static void random_test_independence(odp_random_kind_t kind)
+{
+ /* Mean number of hits per cell. */
+ const uint32_t expected = 100;
+
+ /* LibreOffice CHISQ.INV.RT(0.00000001; 255*255) */
+ const double critical = 67069.2;
+
+ printf("\n\n");
+ printf("critical value: %g\n", critical);
+
+ for (int lag = 1; lag <= 8; lag++) {
+ const uint32_t cells = 256 * 256;
+ const uint64_t num = expected * cells;
+ const int size = 32 * 1024;
+ int pos = size;
+ uint8_t buf[size];
+ uint64_t freq[256][256] = { { 0 } };
+ uint32_t row[256] = { 0 }, col[256] = { 0 };
+
+ for (uint64_t i = 0; i < num; i++) {
+ if (pos + lag >= size) {
+ random_data(buf, size, kind);
+ pos = 0;
+ }
+
+ uint8_t r = buf[pos], c = buf[pos + lag];
+
+ freq[r][c]++;
+ row[r]++;
+ col[c]++;
+ pos++;
+ }
+
+ double chisq = 0;
+
+ for (int i = 0; i < 256; i++) {
+ for (int j = 0; j < 256; j++) {
+ double expect = (double)row[i] * (double)col[j] / (double)num;
+ double diff = (double)freq[i][j] - expect;
+
+ chisq += diff * diff / expect;
+ }
+ }
+
+ printf("lag %d ; chisq %g ; %s\n",
+ lag, chisq, res_str(chisq < critical));
+
+ CU_ASSERT(chisq < critical);
+ }
+
+ printf("\n");
+}
+
+static void random_test_independence_crypto(void)
+{
+ random_test_independence(ODP_RANDOM_CRYPTO);
+}
+
+/*
+ * Sec. 2.3 Runs Test [1]. The test is run with several different n values. A
+ * few long runs may go unnoticed if n is large, while longer period
+ * non-randomness may go unnoticed if n is small.
+ */
+static void random_test_runs(odp_random_kind_t kind)
+{
+ printf("\n\n");
+ printf("alpha: %g\n", alpha);
+
+ for (int n = 128; n <= 1024 * 1024; n *= 2) {
+ double pi, P_value;
+ int bit = random_bits(1, kind);
+ uint64_t ones = bit, V = 1;
+
+ for (int i = 1; i < n; i++) {
+ int prev_bit = bit;
+
+ bit = random_bits(1, kind);
+ ones += bit;
+ V += (bit != prev_bit);
+ }
+
+ pi = (double)ones / n;
+
+ /*
+ * Skip the prerequisite frequency test (Sec. 2.3.4
+ * step (2)), since it's effectively the same as
+ * random_test_frequency() with bits = 1.
+ */
+
+ P_value = erfc(fabs(V - 2 * n * pi * (1 - pi)) /
+ (2 * sqrt(2 * n) * pi * (1 - pi)));
+ printf("n %d ; pi %g ; V %" PRIu64 " ; P_value %g ; %s\n",
+ n, pi, V, P_value, res_str(P_value >= alpha));
+
+ CU_ASSERT(P_value >= alpha);
+ }
+
+ printf("\n");
+}
+
+static void random_test_runs_crypto(void)
+{
+ random_test_runs(ODP_RANDOM_CRYPTO);
+}
+
+static void random_test_runs_true(void)
+{
+ random_test_runs(ODP_RANDOM_TRUE);
+}
+
+static int mx_bit(uint32_t *m, int r, int c)
+{
+ return (m[r] >> c) & 1;
+}
+
+static int mx_rank(uint32_t *m, int rows, int cols)
+{
+ int rank = 0;
+
+ for (int r = 0, c = 0; r < rows && c < cols; ) {
+ int swapped = r;
+
+ if (!mx_bit(m, r, c)) {
+ for (int sr = r + 1; sr < rows; sr++) {
+ if (mx_bit(m, sr, c)) {
+ uint32_t t = m[r];
+
+ m[r] = m[sr];
+ m[sr] = t;
+ swapped = sr;
+ break;
+ }
+ }
+ if (!mx_bit(m, r, c)) {
+ c++;
+ continue;
+ }
+ }
+
+ rank++;
+
+ for (int sr = swapped + 1; sr < rows; sr++) {
+ if (mx_bit(m, sr, c))
+ m[sr] ^= m[r];
+ }
+
+ r++;
+ }
+
+ return rank;
+}
+
+/*
+ * Sec. 2.5 Binary Matrix Rank Test [1].
+ */
+static void random_test_matrix_rank(odp_random_kind_t kind)
+{
+ const int N = 100; /* [1] recommends at least 38. */
+ const double p[3] = { 0.2888, 0.5776, 0.1336 };
+
+ printf("\n\n");
+ printf("alpha: %g\n", alpha);
+ printf("N: %d\n", N);
+
+ int F[3] = { 0 };
+
+ for (int i = 0; i < N; i++) {
+ uint32_t mx[32];
+
+ random_data((uint8_t *)mx, sizeof(mx), kind);
+
+ switch (mx_rank(mx, 32, 32)) {
+ case 32:
+ F[0]++;
+ break;
+ case 31:
+ F[1]++;
+ break;
+ default:
+ F[2]++;
+ }
+ }
+
+ double chisq, P_value;
+
+ chisq = pow(F[0] - p[0] * N, 2) / (p[0] * N) +
+ pow(F[1] - p[1] * N, 2) / (p[1] * N) +
+ pow(F[2] - p[2] * N, 2) / (p[2] * N);
+ P_value = exp(-chisq / 2);
+
+ printf("P_value %g ; %s\n", P_value, res_str(P_value >= alpha));
+
+ CU_ASSERT(P_value >= alpha);
+}
+
+static void random_test_matrix_rank_crypto(void)
+{
+ random_test_matrix_rank(ODP_RANDOM_CRYPTO);
+}
+
+static void random_test_matrix_rank_true(void)
+{
+ random_test_matrix_rank(ODP_RANDOM_TRUE);
+}
+
+static int check_kind_basic(void)
+{
+ return odp_random_max_kind() >= ODP_RANDOM_BASIC;
+}
+
+static int check_kind_crypto(void)
+{
+ return odp_random_max_kind() >= ODP_RANDOM_CRYPTO;
+}
+
+static int check_kind_true(void)
+{
+ return odp_random_max_kind() >= ODP_RANDOM_TRUE;
+}
+
+odp_testinfo_t random_suite[] = {
+ ODP_TEST_INFO_CONDITIONAL(random_test_get_size_basic, check_kind_basic),
+ ODP_TEST_INFO_CONDITIONAL(random_test_get_size_crypto, check_kind_crypto),
+ ODP_TEST_INFO_CONDITIONAL(random_test_get_size_true, check_kind_true),
+ ODP_TEST_INFO(random_test_kind),
+ ODP_TEST_INFO(random_test_repeat),
+ ODP_TEST_INFO(random_test_align_and_overflow_test),
+ ODP_TEST_INFO_CONDITIONAL(random_test_align_and_overflow_basic, check_kind_basic),
+ ODP_TEST_INFO_CONDITIONAL(random_test_align_and_overflow_crypto, check_kind_crypto),
+ ODP_TEST_INFO_CONDITIONAL(random_test_align_and_overflow_true, check_kind_true),
+ ODP_TEST_INFO_CONDITIONAL(random_test_frequency_crypto, check_kind_crypto),
+ ODP_TEST_INFO_CONDITIONAL(random_test_frequency_true, check_kind_true),
+ ODP_TEST_INFO_CONDITIONAL(random_test_independence_crypto, check_kind_crypto),
+ ODP_TEST_INFO_CONDITIONAL(random_test_runs_crypto, check_kind_crypto),
+ ODP_TEST_INFO_CONDITIONAL(random_test_runs_true, check_kind_true),
+ ODP_TEST_INFO_CONDITIONAL(random_test_matrix_rank_crypto, check_kind_crypto),
+ ODP_TEST_INFO_CONDITIONAL(random_test_matrix_rank_true, check_kind_true),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t random_suites[] = {
+ {"Random", NULL, NULL, random_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(random_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/api/scheduler/.gitignore b/test/validation/api/scheduler/.gitignore
new file mode 100644
index 000000000..6892e6224
--- /dev/null
+++ b/test/validation/api/scheduler/.gitignore
@@ -0,0 +1,2 @@
+scheduler_main
+scheduler_no_predef_groups
diff --git a/test/validation/api/scheduler/Makefile.am b/test/validation/api/scheduler/Makefile.am
new file mode 100644
index 000000000..fc41ae5fe
--- /dev/null
+++ b/test/validation/api/scheduler/Makefile.am
@@ -0,0 +1,5 @@
+include ../Makefile.inc
+
+test_PROGRAMS = scheduler_main scheduler_no_predef_groups
+scheduler_main_SOURCES = scheduler.c
+scheduler_no_predef_groups = scheduler_no_predef_groups.c
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c
new file mode 100644
index 000000000..8dddd8d8f
--- /dev/null
+++ b/test/validation/api/scheduler/scheduler.c
@@ -0,0 +1,3770 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2019-2024, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include "odp_cunit_common.h"
+#include <odp/helper/odph_api.h>
+
+#define MAX_WORKERS 32
+#define MAX_ORDERED_LOCKS 2
+#define MAX_POOL_SIZE (1024 * 1024)
+#define MSG_POOL_SIZE (64 * 1024)
+#define QUEUES_PER_PRIO 16
+#define BUF_SIZE 64
+#define BUFS_PER_QUEUE 100
+#define BUFS_PER_QUEUE_EXCL 10000
+#define BURST_BUF_SIZE 4
+#define NUM_BUFS_PAUSE 1000
+#define NUM_BUFS_BEFORE_PAUSE 10
+#define NUM_GROUPS 2
+#define MAX_QUEUES (64 * 1024)
+
+#define DEFAULT_NUM_EV 50
+
+#define MAX_FLOWS 16
+#define FLOW_TEST_NUM_EV (10 * MAX_FLOWS)
+
+#define GLOBALS_SHM_NAME "test_globals"
+#define MSG_POOL_NAME "msg_pool"
+#define QUEUE_CTX_POOL_NAME "queue_ctx_pool"
+#define SHM_THR_ARGS_NAME "shm_thr_args"
+
+#define ONE_Q 1
+#define ONE_PRIO 1
+
+#define SCHD_ONE 0
+#define SCHD_MULTI 1
+
+#define DISABLE_EXCL_ATOMIC 0
+#define ENABLE_EXCL_ATOMIC 1
+
+#define MAGIC 0xdeadbeef
+#define MAGIC1 0xdeadbeef
+#define MAGIC2 0xcafef00d
+
+#define CHAOS_NUM_QUEUES 6
+#define CHAOS_NUM_BUFS_PER_QUEUE 6
+#define CHAOS_NUM_ROUNDS 1000
+#define CHAOS_NUM_EVENTS (CHAOS_NUM_QUEUES * CHAOS_NUM_BUFS_PER_QUEUE)
+#define CHAOS_DEBUG (CHAOS_NUM_ROUNDS < 1000)
+#define CHAOS_PTR_TO_NDX(p) ((uint64_t)(uint32_t)(uintptr_t)p)
+#define CHAOS_NDX_TO_PTR(n) ((void *)(uintptr_t)n)
+
+#define WAIT_TIMEOUT (100 * ODP_TIME_MSEC_IN_NS)
+#define WAIT_ROUNDS 5
+#define WAIT_TOLERANCE (15 * ODP_TIME_MSEC_IN_NS)
+#define WAIT_1MS_RETRIES 1000
+
+#define SCHED_AND_PLAIN_ROUNDS 10000
+#define ATOMICITY_ROUNDS 100
+
+#define FIFO_MAX_EVENTS 151
+
+/* Test global variables */
+typedef struct {
+ int num_workers;
+ odp_barrier_t barrier;
+ int buf_count;
+ int buf_count_cpy;
+ int queues_per_prio;
+ int test_debug_print;
+ odp_shm_t shm_glb;
+ odp_shm_t shm_args;
+ odp_pool_t pool;
+ odp_pool_t queue_ctx_pool;
+ uint32_t max_sched_queue_size;
+ uint64_t num_flows;
+ odp_ticketlock_t lock;
+ odp_spinlock_t atomic_lock;
+ struct {
+ odp_queue_t handle;
+ odp_atomic_u32_t state;
+ } atomicity_q;
+ struct {
+ odp_queue_t handle;
+ char name[ODP_QUEUE_NAME_LEN];
+ } chaos_q[CHAOS_NUM_QUEUES];
+ struct {
+ odp_queue_t sched;
+ odp_queue_t plain;
+ } sched_and_plain_q;
+ struct {
+ odp_atomic_u32_t helper_ready;
+ odp_atomic_u32_t helper_active;
+ } order_wait;
+ struct {
+ odp_barrier_t barrier;
+ int multi;
+ odp_queue_t queue;
+ odp_pool_t pool;
+ uint32_t num_events;
+ uint32_t num_enq;
+ uint32_t burst;
+ odp_atomic_u32_t cur_thr;
+ uint16_t num_thr;
+ odp_event_t event[FIFO_MAX_EVENTS];
+ } fifo;
+
+} test_globals_t;
+
+typedef struct {
+ test_globals_t *globals;
+ odp_schedule_sync_t sync;
+ int num_queues;
+ int num_prio;
+ int num_bufs;
+ int num_workers;
+ int enable_schd_multi;
+ int enable_excl_atomic;
+} thread_args_t;
+
+typedef struct {
+ uint64_t sequence;
+ uint64_t lock_sequence[MAX_ORDERED_LOCKS];
+ uint64_t output_sequence;
+} buf_contents;
+
+typedef struct {
+ odp_buffer_t ctx_handle;
+ odp_queue_t pq_handle;
+ uint64_t sequence;
+ uint64_t lock_sequence[MAX_ORDERED_LOCKS];
+} queue_context;
+
+typedef struct {
+ uint64_t evno;
+ uint64_t seqno;
+} chaos_buf;
+
+static test_globals_t *globals;
+
+static int drain_queues(void)
+{
+ odp_event_t ev;
+ uint64_t wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
+ int ret = 0;
+
+ while ((ev = odp_schedule(NULL, wait)) != ODP_EVENT_INVALID) {
+ odp_event_free(ev);
+ ret++;
+ }
+
+ return ret;
+}
+
+static void release_context(odp_schedule_sync_t sync)
+{
+ if (sync == ODP_SCHED_SYNC_ATOMIC)
+ odp_schedule_release_atomic();
+ else if (sync == ODP_SCHED_SYNC_ORDERED)
+ odp_schedule_release_ordered();
+}
+
+static void test_init(uint8_t fill)
+{
+ odp_schedule_config_t default_config;
+
+ memset(&default_config, fill, sizeof(default_config));
+ odp_schedule_config_init(&default_config);
+
+ CU_ASSERT(default_config.max_flow_id == 0);
+
+ CU_ASSERT(default_config.sched_group.all);
+ CU_ASSERT(default_config.sched_group.control);
+ CU_ASSERT(default_config.sched_group.worker);
+}
+
+static void scheduler_test_init(void)
+{
+ test_init(0);
+ test_init(0xff);
+}
+
+static void scheduler_test_capa(void)
+{
+ odp_schedule_capability_t sched_capa;
+ odp_queue_capability_t queue_capa;
+
+ memset(&sched_capa, 0, sizeof(odp_schedule_capability_t));
+ CU_ASSERT_FATAL(odp_schedule_capability(&sched_capa) == 0);
+ CU_ASSERT_FATAL(odp_queue_capability(&queue_capa) == 0);
+
+ CU_ASSERT(sched_capa.max_groups != 0);
+ CU_ASSERT(sched_capa.max_prios != 0);
+ CU_ASSERT(sched_capa.max_queues != 0);
+ CU_ASSERT(queue_capa.max_queues >= sched_capa.max_queues);
+}
+
+static void sched_queue_param_init(odp_queue_param_t *param)
+{
+ odp_queue_param_init(param);
+ param->type = ODP_QUEUE_TYPE_SCHED;
+ param->sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ param->sched.prio = odp_schedule_default_prio();
+ param->sched.group = ODP_SCHED_GROUP_ALL;
+}
+
+static void scheduler_test_wait_time(void)
+{
+ int i;
+ odp_queue_t queue;
+ odp_event_t ev;
+ uint64_t wait_time;
+ odp_queue_param_t qp;
+ odp_time_t lower_limit, upper_limit;
+ odp_time_t start_time, end_time, diff;
+ uint64_t duration_ns = WAIT_ROUNDS * WAIT_TIMEOUT;
+
+ /* check on read */
+ wait_time = odp_schedule_wait_time(0);
+ wait_time = odp_schedule_wait_time(1);
+
+ /* check ODP_SCHED_NO_WAIT */
+ sched_queue_param_init(&qp);
+ queue = odp_queue_create("dummy_queue", &qp);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ wait_time = odp_schedule_wait_time(WAIT_TIMEOUT);
+ start_time = odp_time_local();
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ end_time = odp_time_local();
+ CU_ASSERT_FATAL(ev == ODP_EVENT_INVALID);
+
+ diff = odp_time_diff(end_time, start_time);
+ lower_limit = ODP_TIME_NULL;
+ upper_limit = odp_time_local_from_ns(WAIT_TOLERANCE);
+
+ CU_ASSERT(odp_time_cmp(diff, lower_limit) >= 0);
+ CU_ASSERT(odp_time_cmp(diff, upper_limit) <= 0);
+
+ /* check time correctness */
+ printf("\nTesting wait time for %.3f sec ...\n", (double)duration_ns / ODP_TIME_SEC_IN_NS);
+ start_time = odp_time_local();
+ for (i = 0; i < WAIT_ROUNDS; i++) {
+ ev = odp_schedule(NULL, wait_time);
+ CU_ASSERT_FATAL(ev == ODP_EVENT_INVALID);
+ }
+ end_time = odp_time_local();
+
+ diff = odp_time_diff(end_time, start_time);
+ lower_limit = odp_time_local_from_ns(duration_ns - WAIT_TOLERANCE);
+ upper_limit = odp_time_local_from_ns(duration_ns + WAIT_TOLERANCE);
+
+ if (odp_time_cmp(diff, lower_limit) <= 0) {
+ ODPH_ERR("Exceed lower limit: diff is %" PRIu64 ", lower_limit %" PRIu64 "\n",
+ odp_time_to_ns(diff), odp_time_to_ns(lower_limit));
+ CU_FAIL("Exceed lower limit\n");
+ }
+
+ if (odp_time_cmp(diff, upper_limit) >= 0) {
+ ODPH_ERR("Exceed upper limit: diff is %" PRIu64 ", upper_limit %" PRIu64 "\n",
+ odp_time_to_ns(diff), odp_time_to_ns(upper_limit));
+ CU_FAIL("Exceed upper limit\n");
+ }
+
+ CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
+}
+
+static void scheduler_test_num_prio(void)
+{
+ int num_prio, min_prio, max_prio, default_prio;
+
+ num_prio = odp_schedule_num_prio();
+ CU_ASSERT(num_prio > 0);
+
+ min_prio = odp_schedule_min_prio();
+ max_prio = odp_schedule_max_prio();
+ default_prio = odp_schedule_default_prio();
+
+ CU_ASSERT(min_prio <= max_prio);
+ CU_ASSERT(min_prio <= default_prio);
+ CU_ASSERT(default_prio <= max_prio);
+ CU_ASSERT(num_prio == (max_prio - min_prio + 1));
+}
+
+static void scheduler_test_queue_destroy(void)
+{
+ odp_pool_t p;
+ odp_pool_param_t params;
+ odp_queue_param_t qp;
+ odp_queue_t queue, from;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ uint32_t *u32;
+ int i, ret;
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
+ ODP_SCHED_SYNC_ATOMIC,
+ ODP_SCHED_SYNC_ORDERED};
+
+ odp_pool_param_init(&params);
+ params.buf.size = 100;
+ params.buf.align = 0;
+ params.buf.num = 1;
+ params.type = ODP_POOL_BUFFER;
+
+ p = odp_pool_create("sched_destroy_pool", &params);
+
+ CU_ASSERT_FATAL(p != ODP_POOL_INVALID);
+
+ sched_queue_param_init(&qp);
+
+ for (i = 0; i < 3; i++) {
+ qp.sched.sync = sync[i];
+ queue = odp_queue_create("sched_destroy_queue", &qp);
+
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ buf = odp_buffer_alloc(p);
+
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ u32 = odp_buffer_addr(buf);
+ u32[0] = MAGIC;
+
+ ev = odp_buffer_to_event(buf);
+
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT(ret == 0);
+ if (ret)
+ odp_buffer_free(buf);
+
+ ev = odp_schedule(&from, ODP_SCHED_WAIT);
+
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+
+ CU_ASSERT_FATAL(from == queue);
+
+ buf = odp_buffer_from_event(ev);
+ u32 = odp_buffer_addr(buf);
+
+ CU_ASSERT_FATAL(u32[0] == MAGIC);
+
+ odp_buffer_free(buf);
+ release_context(qp.sched.sync);
+
+ /* Make sure atomic/ordered context is released */
+ CU_ASSERT(drain_queues() == 0);
+
+ CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
+ }
+
+ CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
+}
+
+static void scheduler_test_wait(void)
+{
+ odp_pool_t p;
+ odp_pool_param_t pool_param;
+ odp_queue_param_t queue_param;
+ odp_queue_t queue, from;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ uint32_t *u32;
+ uint32_t i, j, num_enq, retry;
+ int ret;
+ uint32_t num_ev = 50;
+ uint32_t num_retry = 1000;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.buf.size = 10;
+ pool_param.buf.num = num_ev;
+ pool_param.type = ODP_POOL_BUFFER;
+
+ p = odp_pool_create("sched_test_wait", &pool_param);
+
+ CU_ASSERT_FATAL(p != ODP_POOL_INVALID);
+
+ sched_queue_param_init(&queue_param);
+ queue = odp_queue_create("sched_test_wait", &queue_param);
+
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ for (i = 0; i < 4; i++) {
+ num_enq = 0;
+
+ for (j = 0; j < num_ev; j++) {
+ buf = odp_buffer_alloc(p);
+
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ u32 = odp_buffer_addr(buf);
+ u32[0] = MAGIC;
+
+ ev = odp_buffer_to_event(buf);
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT(ret == 0);
+ if (ret) {
+ odp_buffer_free(buf);
+ continue;
+ }
+
+ num_enq++;
+ }
+
+ CU_ASSERT(num_enq == num_ev);
+
+ for (j = 0; j < num_enq; j++) {
+ if (i == 0) {
+ ev = odp_schedule(&from, ODP_SCHED_WAIT);
+ } else if (i == 1) {
+ ret = odp_schedule_multi_wait(&from, &ev, 1);
+ CU_ASSERT_FATAL(ret == 1);
+ } else if (i == 2) {
+ retry = 0;
+ do {
+ ev = odp_schedule(&from,
+ ODP_SCHED_NO_WAIT);
+ retry++;
+ } while (ev == ODP_EVENT_INVALID &&
+ retry < num_retry);
+ } else {
+ retry = 0;
+ do {
+ ret = odp_schedule_multi_no_wait(&from,
+ &ev,
+ 1);
+ retry++;
+ } while (ret == 0 && retry < num_retry);
+ CU_ASSERT_FATAL(ret == 1);
+ }
+
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(from == queue);
+
+ buf = odp_buffer_from_event(ev);
+ u32 = odp_buffer_addr(buf);
+
+ CU_ASSERT(u32[0] == MAGIC);
+
+ odp_buffer_free(buf);
+ }
+ }
+
+ /* Make sure that scheduler is empty */
+ drain_queues();
+
+ CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
+ CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
+}
+
+static void scheduler_test_queue_size(void)
+{
+ odp_schedule_config_t default_config;
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ odp_queue_param_t queue_param;
+ odp_queue_t queue, from;
+ odp_event_t ev;
+ odp_buffer_t buf;
+ uint32_t i, j, queue_size, num;
+ int ret;
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
+ ODP_SCHED_SYNC_ATOMIC,
+ ODP_SCHED_SYNC_ORDERED};
+
+ queue_size = DEFAULT_NUM_EV;
+
+ /* Scheduler has been already configured. Use default config as max
+ * queue size. */
+ odp_schedule_config_init(&default_config);
+ if (default_config.queue_size &&
+ queue_size > default_config.queue_size)
+ queue_size = default_config.queue_size;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.buf.size = 100;
+ pool_param.buf.align = 0;
+ pool_param.buf.num = DEFAULT_NUM_EV;
+ pool_param.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("test_queue_size", &pool_param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < 3; i++) {
+ /* Ensure that scheduler is empty */
+ for (j = 0; j < 10;) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ CU_ASSERT(ev == ODP_EVENT_INVALID);
+
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ else
+ j++;
+ }
+
+ sched_queue_param_init(&queue_param);
+ queue_param.sched.sync = sync[i];
+ queue_param.size = queue_size;
+
+ queue = odp_queue_create("test_queue_size", &queue_param);
+
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ for (j = 0; j < queue_size; j++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ ev = odp_buffer_to_event(buf);
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT(ret == 0);
+
+ if (ret)
+ odp_event_free(ev);
+ }
+
+ num = 0;
+ for (j = 0; j < 100 * DEFAULT_NUM_EV; j++) {
+ ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ CU_ASSERT(from == queue);
+ odp_event_free(ev);
+ num++;
+ }
+
+ CU_ASSERT(num == queue_size);
+
+ CU_ASSERT(drain_queues() == 0);
+
+ CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
+ }
+
+ CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
+}
+
+static void scheduler_test_full_queues(void)
+{
+ odp_schedule_config_t default_config;
+ odp_pool_t pool;
+ odp_pool_capability_t pool_capa;
+ odp_pool_param_t pool_param;
+ odp_schedule_capability_t sched_capa;
+ odp_queue_param_t queue_param;
+ odp_event_t ev;
+ uint32_t i, j, k, num_bufs, events_per_queue, num_queues;
+ uint32_t queue_size = 2048;
+ int ret;
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
+ ODP_SCHED_SYNC_ATOMIC,
+ ODP_SCHED_SYNC_ORDERED};
+
+ CU_ASSERT_FATAL(!odp_schedule_capability(&sched_capa));
+ if (sched_capa.max_queue_size && queue_size > sched_capa.max_queue_size)
+ queue_size = sched_capa.max_queue_size;
+
+ /* Scheduler has been already configured. Use default config as queue
+ * size and queue count. */
+ odp_schedule_config_init(&default_config);
+ if (default_config.queue_size)
+ queue_size = default_config.queue_size;
+ num_queues = default_config.num_queues;
+
+ odp_queue_t queue[num_queues];
+
+ CU_ASSERT_FATAL(!odp_pool_capability(&pool_capa));
+ num_bufs = num_queues * queue_size;
+ if (pool_capa.buf.max_num && num_bufs > pool_capa.buf.max_num)
+ num_bufs = pool_capa.buf.max_num;
+ if (num_bufs > MAX_POOL_SIZE)
+ num_bufs = MAX_POOL_SIZE;
+ events_per_queue = num_bufs / num_queues;
+
+ /* Make sure there is at least one event for each queue */
+ while (events_per_queue == 0) {
+ num_queues--;
+ events_per_queue = num_bufs / num_queues;
+ }
+
+ odp_pool_param_init(&pool_param);
+ pool_param.buf.size = 100;
+ pool_param.buf.align = 0;
+ pool_param.buf.num = num_bufs;
+ pool_param.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("test_full_queues", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ /* Ensure that scheduler is empty */
+ drain_queues();
+
+ /* Run test for each scheduler synchronization type */
+ for (i = 0; i < 3; i++) {
+ uint64_t wait_time;
+ uint32_t num_enq = 0;
+ uint32_t num = 0;
+
+ /* Create and fill all queues */
+ for (j = 0; j < num_queues; j++) {
+ sched_queue_param_init(&queue_param);
+ queue_param.sched.sync = sync[i];
+ queue_param.size = events_per_queue;
+
+ queue[j] = odp_queue_create("test_full_queues",
+ &queue_param);
+ CU_ASSERT_FATAL(queue[j] != ODP_QUEUE_INVALID);
+
+ for (k = 0; k < events_per_queue; k++) {
+ odp_buffer_t buf = odp_buffer_alloc(pool);
+
+ CU_ASSERT(buf != ODP_BUFFER_INVALID);
+ if (buf == ODP_BUFFER_INVALID)
+ continue;
+
+ ev = odp_buffer_to_event(buf);
+ ret = odp_queue_enq(queue[j], ev);
+ CU_ASSERT(ret == 0);
+ if (ret) {
+ odp_event_free(ev);
+ continue;
+ }
+ num_enq++;
+ }
+ }
+ /* Run normal scheduling rounds */
+ wait_time = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+ for (j = 0; j < num_bufs; j++) {
+ odp_queue_t src_queue;
+
+ ev = odp_schedule(&src_queue, wait_time);
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ ret = odp_queue_enq(src_queue, ev);
+ CU_ASSERT(ret == 0);
+ if (ret) {
+ odp_event_free(ev);
+ num_enq--;
+ }
+ }
+ /* Clean-up */
+ wait_time = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
+ for (j = 0; j < num_enq; j++) {
+ ev = odp_schedule(NULL, wait_time);
+
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ odp_event_free(ev);
+ num++;
+ }
+
+ CU_ASSERT(num == num_enq);
+ CU_ASSERT(drain_queues() == 0);
+
+ for (j = 0; j < num_queues; j++)
+ CU_ASSERT_FATAL(odp_queue_destroy(queue[j]) == 0);
+ }
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void scheduler_test_max_queues(odp_schedule_sync_t sync)
+{
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ odp_schedule_capability_t sched_capa;
+ odp_queue_param_t queue_param;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ odp_queue_t src_queue;
+ uint64_t wait_time;
+ uint32_t i, src_idx;
+ uint32_t num_rounds = 4;
+ uint32_t num_queues = 64 * 1024;
+
+ CU_ASSERT_FATAL(odp_schedule_capability(&sched_capa) == 0);
+ if (num_queues > sched_capa.max_queues)
+ num_queues = sched_capa.max_queues;
+
+ CU_ASSERT_FATAL(num_queues > 0);
+
+ odp_queue_t queue[num_queues];
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_BUFFER;
+ pool_param.buf.size = 100;
+ pool_param.buf.num = 1;
+
+ pool = odp_pool_create("test_max_queues", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ /* Ensure that scheduler is empty */
+ drain_queues();
+
+ sched_queue_param_init(&queue_param);
+ queue_param.sched.sync = sync;
+
+ for (i = 0; i < num_queues; i++) {
+ queue[i] = odp_queue_create("test_max_queues", &queue_param);
+ if (queue[i] == ODP_QUEUE_INVALID)
+ ODPH_ERR("Queue create failed %u/%u\n", i, num_queues);
+
+ CU_ASSERT_FATAL(queue[i] != ODP_QUEUE_INVALID);
+ }
+
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ ev = odp_buffer_to_event(buf);
+
+ CU_ASSERT_FATAL(odp_queue_enq(queue[0], ev) == 0);
+
+ wait_time = odp_schedule_wait_time(500 * ODP_TIME_MSEC_IN_NS);
+ src_idx = 0;
+
+ /* Send one event through all queues couple of times */
+ for (i = 0; i < (num_rounds * num_queues); i++) {
+ uint32_t round = i / num_queues;
+
+ ev = odp_schedule(&src_queue, wait_time);
+ if (ev == ODP_EVENT_INVALID) {
+ ODPH_ERR("Event was lost. Round %u, queue idx %u\n", round, src_idx);
+ CU_FAIL("Event was lost\n");
+ break;
+ }
+
+ CU_ASSERT(src_queue == queue[src_idx]);
+
+ src_idx++;
+ if (src_idx == num_queues)
+ src_idx = 0;
+
+ if (odp_queue_enq(queue[src_idx], ev)) {
+ ODPH_ERR("Enqueue failed. Round %u, queue idx %u\n", round, src_idx);
+ CU_FAIL("Enqueue failed\n")
+ odp_event_free(ev);
+ break;
+ }
+ }
+
+ /* Free event and scheduling context */
+ for (i = 0; i < 2; i++) {
+ ev = odp_schedule(NULL, wait_time);
+
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ odp_event_free(ev);
+ }
+
+ CU_ASSERT(drain_queues() == 0);
+
+ for (i = 0; i < num_queues; i++)
+ CU_ASSERT_FATAL(odp_queue_destroy(queue[i]) == 0);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void scheduler_test_max_queues_p(void)
+{
+ scheduler_test_max_queues(ODP_SCHED_SYNC_PARALLEL);
+}
+
+static void scheduler_test_max_queues_a(void)
+{
+ scheduler_test_max_queues(ODP_SCHED_SYNC_ATOMIC);
+}
+
+static void scheduler_test_max_queues_o(void)
+{
+ scheduler_test_max_queues(ODP_SCHED_SYNC_ORDERED);
+}
+
+static void scheduler_test_order_ignore(void)
+{
+ odp_queue_capability_t queue_capa;
+ odp_schedule_config_t default_config;
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ odp_queue_param_t queue_param;
+ odp_queue_t ordered, plain, from;
+ odp_event_t ev;
+ odp_buffer_t buf;
+ uint32_t j, queue_size, num;
+ int ret;
+
+ odp_schedule_config_init(&default_config);
+ CU_ASSERT_FATAL(odp_queue_capability(&queue_capa) == 0);
+
+ queue_size = DEFAULT_NUM_EV;
+ if (default_config.queue_size &&
+ queue_size > default_config.queue_size)
+ queue_size = default_config.queue_size;
+
+ if (queue_capa.plain.max_size &&
+ queue_size > queue_capa.plain.max_size)
+ queue_size = queue_capa.plain.max_size;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.buf.size = 100;
+ pool_param.buf.align = 0;
+ pool_param.buf.num = DEFAULT_NUM_EV;
+ pool_param.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("test_order_ignore", &pool_param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ /* Ensure that scheduler is empty */
+ for (j = 0; j < 10;) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ CU_ASSERT(ev == ODP_EVENT_INVALID);
+
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ else
+ j++;
+ }
+
+ sched_queue_param_init(&queue_param);
+ queue_param.sched.sync = ODP_SCHED_SYNC_ORDERED;
+
+ ordered = odp_queue_create("ordered", &queue_param);
+ CU_ASSERT_FATAL(ordered != ODP_QUEUE_INVALID);
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_PLAIN;
+ queue_param.order = ODP_QUEUE_ORDER_IGNORE;
+
+ plain = odp_queue_create("plain", &queue_param);
+ CU_ASSERT_FATAL(plain != ODP_QUEUE_INVALID);
+
+ num = 0;
+ for (j = 0; j < queue_size; j++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ ev = odp_buffer_to_event(buf);
+ ret = odp_queue_enq(ordered, ev);
+
+ if (ret)
+ odp_event_free(ev);
+ else
+ num++;
+ }
+
+ CU_ASSERT(num == queue_size);
+
+ num = 0;
+ for (j = 0; j < 100 * DEFAULT_NUM_EV; j++) {
+ ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ CU_ASSERT(from == ordered);
+ ret = odp_queue_enq(plain, ev);
+
+ if (ret)
+ odp_event_free(ev);
+ else
+ num++;
+ }
+
+ CU_ASSERT(num == queue_size);
+
+ num = 0;
+ for (j = 0; j < 100 * DEFAULT_NUM_EV; j++) {
+ ev = odp_queue_deq(plain);
+
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ odp_event_free(ev);
+ num++;
+ }
+
+ CU_ASSERT(num == queue_size);
+
+ CU_ASSERT(drain_queues() == 0);
+ CU_ASSERT_FATAL(odp_queue_destroy(ordered) == 0);
+ CU_ASSERT_FATAL(odp_queue_destroy(plain) == 0);
+ CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
+}
+
+static void scheduler_test_group_info_predef(void)
+{
+ odp_schedule_group_info_t info;
+ odp_thrmask_t thrmask;
+ odp_schedule_group_t group;
+ int thr;
+
+ thr = odp_thread_id();
+
+ group = ODP_SCHED_GROUP_ALL;
+ odp_thrmask_zero(&thrmask);
+ CU_ASSERT(odp_schedule_group_thrmask(group, &thrmask) == 0);
+ CU_ASSERT(odp_thrmask_isset(&thrmask, thr));
+ memset(&info, 0, sizeof(odp_schedule_group_info_t));
+ CU_ASSERT(odp_schedule_group_info(group, &info) == 0);
+ CU_ASSERT(odp_thrmask_equal(&info.thrmask, &thrmask));
+ printf("\n Schedule group all name: %s\n", info.name);
+
+ /* This test case runs a control thread */
+ group = ODP_SCHED_GROUP_CONTROL;
+ odp_thrmask_zero(&thrmask);
+ CU_ASSERT(odp_schedule_group_thrmask(group, &thrmask) == 0);
+ CU_ASSERT(odp_thrmask_isset(&thrmask, thr));
+ memset(&info, 0, sizeof(odp_schedule_group_info_t));
+ CU_ASSERT(odp_schedule_group_info(group, &info) == 0);
+ CU_ASSERT(odp_thrmask_equal(&info.thrmask, &thrmask));
+ printf(" Schedule group control name: %s\n", info.name);
+
+ group = ODP_SCHED_GROUP_WORKER;
+ odp_thrmask_zero(&thrmask);
+ CU_ASSERT(odp_schedule_group_thrmask(group, &thrmask) == 0);
+ CU_ASSERT(!odp_thrmask_isset(&thrmask, thr));
+ memset(&info, 0, sizeof(odp_schedule_group_info_t));
+ CU_ASSERT(odp_schedule_group_info(group, &info) == 0);
+ CU_ASSERT(odp_thrmask_equal(&info.thrmask, &thrmask));
+ printf(" Schedule group worker name: %s\n", info.name);
+}
+
+static void scheduler_test_create_group(void)
+{
+ odp_thrmask_t mask;
+ odp_schedule_group_t group;
+ int thr_id;
+ odp_pool_t pool;
+ odp_pool_param_t pool_params;
+ odp_queue_t queue, from;
+ odp_queue_param_t qp;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ uint64_t wait_time;
+
+ thr_id = odp_thread_id();
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr_id);
+
+ group = odp_schedule_group_create("create_group", &mask);
+ CU_ASSERT_FATAL(group != ODP_SCHED_GROUP_INVALID);
+
+ odp_pool_param_init(&pool_params);
+ pool_params.buf.size = 100;
+ pool_params.buf.num = 2;
+ pool_params.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("create_group", &pool_params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sched_queue_param_init(&qp);
+ qp.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ qp.sched.group = group;
+
+ queue = odp_queue_create("create_group", &qp);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ ev = odp_buffer_to_event(buf);
+
+ CU_ASSERT_FATAL(odp_queue_enq(queue, ev) == 0);
+
+ wait_time = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
+ ev = odp_schedule(&from, wait_time);
+
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(from == queue);
+
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+
+ /* Free schedule context */
+ drain_queues();
+
+ CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
+ CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
+ CU_ASSERT_FATAL(odp_schedule_group_destroy(group) == 0);
+
+ /* Run scheduler after the group has been destroyed */
+ CU_ASSERT_FATAL(odp_schedule(NULL, wait_time) == ODP_EVENT_INVALID);
+}
+
+static void scheduler_test_create_max_groups(void)
+{
+ odp_thrmask_t mask;
+ int thr_id;
+ uint32_t i;
+ odp_queue_param_t queue_param;
+ odp_schedule_capability_t sched_capa;
+
+ CU_ASSERT_FATAL(!odp_schedule_capability(&sched_capa));
+ uint32_t max_groups = sched_capa.max_groups - 3; /* Enabled predefined groups */
+ odp_schedule_group_t group[max_groups];
+ odp_queue_t queue[max_groups];
+
+ CU_ASSERT_FATAL(max_groups > 0);
+ CU_ASSERT_FATAL(sched_capa.max_queues >= sched_capa.max_groups);
+
+ thr_id = odp_thread_id();
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr_id);
+
+ sched_queue_param_init(&queue_param);
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+
+ for (i = 0; i < max_groups; i++) {
+ group[i] = odp_schedule_group_create("max_groups", &mask);
+ if (group[i] == ODP_SCHED_GROUP_INVALID) {
+ ODPH_ERR("schedule group create %u failed\n", i);
+ break;
+ }
+
+ queue_param.sched.group = group[i];
+ queue[i] = odp_queue_create("max_groups", &queue_param);
+ CU_ASSERT_FATAL(queue[i] != ODP_QUEUE_INVALID);
+ }
+
+ CU_ASSERT(i == max_groups);
+ max_groups = i;
+
+ for (i = 0; i < max_groups; i++) {
+ CU_ASSERT_FATAL(odp_queue_destroy(queue[i]) == 0);
+ CU_ASSERT_FATAL(odp_schedule_group_destroy(group[i]) == 0);
+ }
+}
+
+static void scheduler_test_groups(void)
+{
+ odp_pool_t p;
+ odp_pool_param_t params;
+ odp_queue_t queue_grp1, queue_grp2;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ uint32_t *u32;
+ int i, j, rc;
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
+ ODP_SCHED_SYNC_ATOMIC,
+ ODP_SCHED_SYNC_ORDERED};
+ int thr_id = odp_thread_id();
+ odp_thrmask_t zeromask, mymask, testmask;
+ odp_schedule_group_t mygrp1, mygrp2, null_grp, lookup;
+ odp_schedule_group_info_t info;
+
+ odp_thrmask_zero(&zeromask);
+ odp_thrmask_zero(&mymask);
+ odp_thrmask_set(&mymask, thr_id);
+
+ /* Can't find a group before we create it */
+ lookup = odp_schedule_group_lookup("Test Group 1");
+ CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);
+
+ /* Now create the group */
+ mygrp1 = odp_schedule_group_create("Test Group 1", &zeromask);
+ CU_ASSERT_FATAL(mygrp1 != ODP_SCHED_GROUP_INVALID);
+
+ /* Verify we can now find it */
+ lookup = odp_schedule_group_lookup("Test Group 1");
+ CU_ASSERT(lookup == mygrp1);
+
+ /* Threadmask should be retrievable and be what we expect */
+ rc = odp_schedule_group_thrmask(mygrp1, &testmask);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));
+
+ /* Now join the group and verify we're part of it */
+ rc = odp_schedule_group_join(mygrp1, &mymask);
+ CU_ASSERT(rc == 0);
+
+ rc = odp_schedule_group_thrmask(mygrp1, &testmask);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));
+
+ /* Info struct */
+ memset(&info, 0, sizeof(odp_schedule_group_info_t));
+ rc = odp_schedule_group_info(mygrp1, &info);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(odp_thrmask_equal(&info.thrmask, &mymask) != 0);
+ CU_ASSERT(strcmp(info.name, "Test Group 1") == 0);
+
+ /* We can't join or leave an unknown group */
+ rc = odp_schedule_group_join(ODP_SCHED_GROUP_INVALID, &mymask);
+ CU_ASSERT(rc != 0);
+
+ rc = odp_schedule_group_leave(ODP_SCHED_GROUP_INVALID, &mymask);
+ CU_ASSERT(rc != 0);
+
+ /* But we can leave our group */
+ rc = odp_schedule_group_leave(mygrp1, &mymask);
+ CU_ASSERT(rc == 0);
+
+ rc = odp_schedule_group_thrmask(mygrp1, &testmask);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));
+
+ /* Create group with no name */
+ null_grp = odp_schedule_group_create(NULL, &zeromask);
+ CU_ASSERT(null_grp != ODP_SCHED_GROUP_INVALID);
+
+ /* We shouldn't be able to find our second group before creating it */
+ lookup = odp_schedule_group_lookup("Test Group 2");
+ CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);
+
+ /* Now create it and verify we can find it */
+ mygrp2 = odp_schedule_group_create("Test Group 2", &mymask);
+ CU_ASSERT_FATAL(mygrp2 != ODP_SCHED_GROUP_INVALID);
+
+ lookup = odp_schedule_group_lookup("Test Group 2");
+ CU_ASSERT(lookup == mygrp2);
+
+ /* Destroy group with no name */
+ CU_ASSERT_FATAL(odp_schedule_group_destroy(null_grp) == 0);
+
+ /* Verify we're part of group 2 */
+ rc = odp_schedule_group_thrmask(mygrp2, &testmask);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));
+
+ /* Leave group 2 */
+ rc = odp_schedule_group_leave(mygrp2, &mymask);
+ CU_ASSERT(rc == 0);
+
+ /* Verify we're not part of group 2 anymore */
+ rc = odp_schedule_group_thrmask(mygrp2, &testmask);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));
+
+ /* Now verify scheduler adherence to groups */
+ odp_pool_param_init(&params);
+ params.buf.size = 100;
+ params.buf.align = 0;
+ params.buf.num = 2;
+ params.type = ODP_POOL_BUFFER;
+
+ p = odp_pool_create("sched_group_pool", &params);
+
+ CU_ASSERT_FATAL(p != ODP_POOL_INVALID);
+
+ for (i = 0; i < 3; i++) {
+ odp_queue_param_t qp;
+ odp_queue_t queue, from;
+ odp_schedule_group_t mygrp[NUM_GROUPS];
+ odp_queue_t queue_grp[NUM_GROUPS];
+ uint64_t wait_time;
+ int num = NUM_GROUPS;
+ int schedule_retries;
+
+ sched_queue_param_init(&qp);
+ qp.sched.sync = sync[i];
+ qp.sched.group = mygrp1;
+
+ /* Create and populate a group in group 1 */
+ queue_grp1 = odp_queue_create("sched_group_test_queue_1", &qp);
+ CU_ASSERT_FATAL(queue_grp1 != ODP_QUEUE_INVALID);
+ CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp1) == mygrp1);
+
+ buf = odp_buffer_alloc(p);
+
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ u32 = odp_buffer_addr(buf);
+ u32[0] = MAGIC1;
+
+ ev = odp_buffer_to_event(buf);
+ rc = odp_queue_enq(queue_grp1, ev);
+ CU_ASSERT(rc == 0);
+ if (rc)
+ odp_buffer_free(buf);
+
+ /* Now create and populate a queue in group 2 */
+ qp.sched.group = mygrp2;
+ queue_grp2 = odp_queue_create("sched_group_test_queue_2", &qp);
+ CU_ASSERT_FATAL(queue_grp2 != ODP_QUEUE_INVALID);
+ CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp2) == mygrp2);
+
+ buf = odp_buffer_alloc(p);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ u32 = odp_buffer_addr(buf);
+ u32[0] = MAGIC2;
+
+ ev = odp_buffer_to_event(buf);
+ rc = odp_queue_enq(queue_grp2, ev);
+ CU_ASSERT(rc == 0);
+ if (rc)
+ odp_buffer_free(buf);
+
+ /* Swap between two groups. Application should serve both
+ * groups to avoid potential head of line blocking in
+ * scheduler. */
+ mygrp[0] = mygrp1;
+ mygrp[1] = mygrp2;
+ queue_grp[0] = queue_grp1;
+ queue_grp[1] = queue_grp2;
+ j = 0;
+
+ /* Ensure that each test run starts from mygrp1 */
+ odp_schedule_group_leave(mygrp1, &mymask);
+ odp_schedule_group_leave(mygrp2, &mymask);
+ odp_schedule_group_join(mygrp1, &mymask);
+
+ wait_time = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+ schedule_retries = 0;
+ while (num) {
+ queue = queue_grp[j];
+ ev = odp_schedule(&from, wait_time);
+
+ if (ev == ODP_EVENT_INVALID) {
+ CU_ASSERT_FATAL(schedule_retries <
+ WAIT_1MS_RETRIES);
+ schedule_retries++;
+ continue;
+ } else {
+ schedule_retries = 0;
+ }
+
+ CU_ASSERT_FATAL(from == queue);
+
+ buf = odp_buffer_from_event(ev);
+ u32 = odp_buffer_addr(buf);
+
+ if (from == queue_grp1) {
+ /* CU_ASSERT_FATAL needs these brackets */
+ CU_ASSERT_FATAL(u32[0] == MAGIC1);
+ } else {
+ CU_ASSERT_FATAL(u32[0] == MAGIC2);
+ }
+
+ odp_buffer_free(buf);
+
+ /* Change group */
+ rc = odp_schedule_group_leave(mygrp[j], &mymask);
+ CU_ASSERT_FATAL(rc == 0);
+
+ j = (j + 1) % NUM_GROUPS;
+ rc = odp_schedule_group_join(mygrp[j], &mymask);
+ CU_ASSERT_FATAL(rc == 0);
+
+ /* Tell scheduler we're about to request an event.
+ * Not needed, but a convenient place to test this API.
+ */
+ odp_schedule_prefetch(1);
+
+ num--;
+ }
+
+ /* Release scheduler context and leave groups */
+ odp_schedule_group_join(mygrp1, &mymask);
+ odp_schedule_group_join(mygrp2, &mymask);
+ CU_ASSERT(drain_queues() == 0);
+ odp_schedule_group_leave(mygrp1, &mymask);
+ odp_schedule_group_leave(mygrp2, &mymask);
+
+ /* Done with queues for this round */
+ CU_ASSERT_FATAL(odp_queue_destroy(queue_grp1) == 0);
+ CU_ASSERT_FATAL(odp_queue_destroy(queue_grp2) == 0);
+
+ /* Verify we can no longer find our queues */
+ CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_1") ==
+ ODP_QUEUE_INVALID);
+ CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_2") ==
+ ODP_QUEUE_INVALID);
+ }
+
+ CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp1) == 0);
+ CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp2) == 0);
+ CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
+}
+
+static int chaos_thread(void *arg)
+{
+ uint64_t i, wait;
+ int rc;
+ chaos_buf *cbuf;
+ odp_event_t ev;
+ odp_queue_t from;
+ thread_args_t *args = (thread_args_t *)arg;
+ test_globals_t *globals = args->globals;
+ int me = odp_thread_id();
+ odp_time_t start_time, end_time, diff;
+
+ if (CHAOS_DEBUG)
+ printf("Chaos thread %d starting...\n", me);
+
+ /* Wait for all threads to start */
+ odp_barrier_wait(&globals->barrier);
+ start_time = odp_time_local();
+
+ /* Run the test */
+ wait = odp_schedule_wait_time(5 * ODP_TIME_MSEC_IN_NS);
+ for (i = 0; i < CHAOS_NUM_ROUNDS; i++) {
+ ev = odp_schedule(&from, wait);
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
+ CU_ASSERT_FATAL(cbuf != NULL);
+ if (CHAOS_DEBUG)
+ printf("Thread %d received event %" PRIu64
+ " seq %" PRIu64
+ " from Q %s, sending to Q %s\n",
+ me, cbuf->evno, cbuf->seqno,
+ globals->
+ chaos_q
+ [CHAOS_PTR_TO_NDX(odp_queue_context(from))].name,
+ globals->
+ chaos_q[cbuf->seqno % CHAOS_NUM_QUEUES].name);
+
+ rc = odp_queue_enq(
+ globals->
+ chaos_q[cbuf->seqno++ % CHAOS_NUM_QUEUES].handle,
+ ev);
+ CU_ASSERT_FATAL(rc == 0);
+ }
+
+ if (CHAOS_DEBUG)
+ printf("Thread %d completed %d rounds...terminating\n",
+ odp_thread_id(), CHAOS_NUM_EVENTS);
+
+ end_time = odp_time_local();
+ diff = odp_time_diff(end_time, start_time);
+
+ printf("Thread %d ends, elapsed time = %" PRIu64 "us\n",
+ odp_thread_id(), odp_time_to_ns(diff) / 1000);
+
+ /* Make sure scheduling context is released */
+ odp_schedule_pause();
+ while ((ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT))
+ != ODP_EVENT_INVALID) {
+ odp_event_free(ev);
+ }
+
+ /* Don't resume scheduling until all threads have finished */
+ odp_barrier_wait(&globals->barrier);
+ odp_schedule_resume();
+
+ drain_queues();
+
+ return 0;
+}
+
+static void chaos_run(unsigned int qtype)
+{
+ odp_pool_t pool;
+ odp_pool_param_t params;
+ odp_queue_param_t qp;
+ odp_buffer_t buf;
+ chaos_buf *cbuf;
+ test_globals_t *globals;
+ thread_args_t *args;
+ odp_shm_t shm;
+ int i, rc;
+ void *arg_ptr;
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
+ ODP_SCHED_SYNC_ATOMIC,
+ ODP_SCHED_SYNC_ORDERED};
+ const unsigned int num_sync = ODPH_ARRAY_SIZE(sync);
+ const char *const qtypes[] = {"parallel", "atomic", "ordered"};
+
+ /* Set up the scheduling environment */
+ shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ globals = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
+
+ shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ args = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(args);
+
+ args->globals = globals;
+
+ odp_pool_param_init(&params);
+ params.buf.size = sizeof(chaos_buf);
+ params.buf.align = 0;
+ params.buf.num = CHAOS_NUM_EVENTS;
+ params.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("sched_chaos_pool", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sched_queue_param_init(&qp);
+
+ for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
+ uint32_t ndx = (qtype == num_sync ? i % num_sync : qtype);
+
+ qp.sched.sync = sync[ndx];
+ snprintf(globals->chaos_q[i].name,
+ sizeof(globals->chaos_q[i].name),
+ "chaos queue %d - %s", i,
+ qtypes[ndx]);
+
+ globals->chaos_q[i].handle =
+ odp_queue_create(globals->chaos_q[i].name, &qp);
+ CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
+ ODP_QUEUE_INVALID);
+ rc = odp_queue_context_set(globals->chaos_q[i].handle,
+ CHAOS_NDX_TO_PTR(i), 0);
+ CU_ASSERT_FATAL(rc == 0);
+ }
+
+ /* Now populate the queues with the initial seed elements */
+ for (i = 0; i < CHAOS_NUM_EVENTS; i++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ cbuf = odp_buffer_addr(buf);
+ cbuf->evno = i;
+ cbuf->seqno = 0;
+ rc = odp_queue_enq(
+ globals->chaos_q[i % CHAOS_NUM_QUEUES].handle,
+ odp_buffer_to_event(buf));
+ CU_ASSERT_FATAL(rc == 0);
+ }
+
+ arg_ptr = args;
+ odp_cunit_thread_create(globals->num_workers, chaos_thread, &arg_ptr, 0, 0);
+
+ odp_cunit_thread_join(globals->num_workers);
+
+ if (CHAOS_DEBUG)
+ printf("Thread %d returning from chaos threads..cleaning up\n",
+ odp_thread_id());
+
+ for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
+ if (CHAOS_DEBUG)
+ printf("Destroying queue %s\n",
+ globals->chaos_q[i].name);
+ rc = odp_queue_destroy(globals->chaos_q[i].handle);
+ CU_ASSERT(rc == 0);
+ }
+
+ rc = odp_pool_destroy(pool);
+ CU_ASSERT(rc == 0);
+}
+
+static void scheduler_test_parallel(void)
+{
+ chaos_run(0);
+}
+
+static void scheduler_test_atomic(void)
+{
+ chaos_run(1);
+}
+
+static void scheduler_test_ordered(void)
+{
+ chaos_run(2);
+}
+
+static void scheduler_test_chaos(void)
+{
+ chaos_run(3);
+}
+
+static int schedule_common_(void *arg)
+{
+ thread_args_t *args = (thread_args_t *)arg;
+ odp_schedule_sync_t sync;
+ test_globals_t *globals;
+ queue_context *qctx;
+ buf_contents *bctx, *bctx_cpy;
+ odp_pool_t pool;
+ int locked;
+ int num;
+ odp_buffer_t buf;
+ odp_queue_t from;
+
+ globals = args->globals;
+ sync = args->sync;
+
+ pool = odp_pool_lookup(MSG_POOL_NAME);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ if (args->num_workers > 1)
+ odp_barrier_wait(&globals->barrier);
+
+ while (1) {
+ from = ODP_QUEUE_INVALID;
+ num = 0;
+
+ odp_ticketlock_lock(&globals->lock);
+ if (globals->buf_count == 0) {
+ odp_ticketlock_unlock(&globals->lock);
+ break;
+ }
+ odp_ticketlock_unlock(&globals->lock);
+
+ if (args->enable_schd_multi) {
+ odp_event_t events[BURST_BUF_SIZE],
+ ev_cpy[BURST_BUF_SIZE];
+ odp_buffer_t buf_cpy[BURST_BUF_SIZE];
+ int j;
+
+ num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT,
+ events, BURST_BUF_SIZE);
+ CU_ASSERT(num >= 0);
+ CU_ASSERT(num <= BURST_BUF_SIZE);
+ if (num == 0)
+ continue;
+
+ if (sync == ODP_SCHED_SYNC_ORDERED) {
+ uint32_t ndx;
+ uint32_t ndx_max;
+ int rc;
+
+ ndx_max = odp_queue_lock_count(from);
+ CU_ASSERT_FATAL(ndx_max > 0);
+
+ qctx = odp_queue_context(from);
+
+ for (j = 0; j < num; j++) {
+ bctx = odp_buffer_addr(
+ odp_buffer_from_event
+ (events[j]));
+
+ buf_cpy[j] = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf_cpy[j] !=
+ ODP_BUFFER_INVALID);
+ bctx_cpy = odp_buffer_addr(buf_cpy[j]);
+ memcpy(bctx_cpy, bctx,
+ sizeof(buf_contents));
+ bctx_cpy->output_sequence =
+ bctx_cpy->sequence;
+ ev_cpy[j] =
+ odp_buffer_to_event(buf_cpy[j]);
+ }
+
+ rc = odp_queue_enq_multi(qctx->pq_handle,
+ ev_cpy, num);
+ CU_ASSERT(rc == num);
+
+ bctx = odp_buffer_addr(
+ odp_buffer_from_event(events[0]));
+ for (ndx = 0; ndx < ndx_max; ndx++) {
+ odp_schedule_order_lock(ndx);
+ CU_ASSERT(bctx->sequence ==
+ qctx->lock_sequence[ndx]);
+ qctx->lock_sequence[ndx] += num;
+ odp_schedule_order_unlock(ndx);
+ }
+ }
+
+ for (j = 0; j < num; j++) {
+ CU_ASSERT(odp_event_is_valid(events[j]) == 1);
+ odp_event_free(events[j]);
+ }
+ } else {
+ odp_event_t ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ CU_ASSERT(odp_event_is_valid(ev) == 1);
+ buf = odp_buffer_from_event(ev);
+ num = 1;
+ if (sync == ODP_SCHED_SYNC_ORDERED) {
+ uint32_t ndx;
+ uint32_t ndx_max;
+ int rc;
+ odp_buffer_t buf_cpy;
+
+ ndx_max = odp_queue_lock_count(from);
+ CU_ASSERT_FATAL(ndx_max > 0);
+
+ qctx = odp_queue_context(from);
+ bctx = odp_buffer_addr(buf);
+ buf_cpy = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf_cpy != ODP_BUFFER_INVALID);
+ bctx_cpy = odp_buffer_addr(buf_cpy);
+ memcpy(bctx_cpy, bctx, sizeof(buf_contents));
+ bctx_cpy->output_sequence = bctx_cpy->sequence;
+
+ rc = odp_queue_enq(qctx->pq_handle,
+ odp_buffer_to_event
+ (buf_cpy));
+ CU_ASSERT(rc == 0);
+
+ for (ndx = 0; ndx < ndx_max; ndx++) {
+ odp_schedule_order_lock(ndx);
+ CU_ASSERT(bctx->sequence ==
+ qctx->lock_sequence[ndx]);
+ qctx->lock_sequence[ndx] += num;
+ odp_schedule_order_unlock(ndx);
+ }
+ }
+
+ odp_buffer_free(buf);
+ }
+
+ if (args->enable_excl_atomic) {
+ locked = odp_spinlock_trylock(&globals->atomic_lock);
+ CU_ASSERT(locked != 0);
+ CU_ASSERT(from != ODP_QUEUE_INVALID);
+ if (locked) {
+ int cnt;
+ odp_time_t time = ODP_TIME_NULL;
+ /* Do some work here to keep the thread busy */
+ for (cnt = 0; cnt < 1000; cnt++)
+ time = odp_time_sum(time,
+ odp_time_local());
+
+ odp_spinlock_unlock(&globals->atomic_lock);
+ }
+ }
+
+ release_context(sync);
+ odp_ticketlock_lock(&globals->lock);
+
+ globals->buf_count -= num;
+
+ if (globals->buf_count < 0) {
+ odp_ticketlock_unlock(&globals->lock);
+ CU_FAIL_FATAL("Buffer counting failed");
+ }
+
+ odp_ticketlock_unlock(&globals->lock);
+ }
+
+ if (args->num_workers > 1)
+ odp_barrier_wait(&globals->barrier);
+
+ if (sync == ODP_SCHED_SYNC_ORDERED)
+ locked = odp_ticketlock_trylock(&globals->lock);
+ else
+ locked = 0;
+
+ if (locked && globals->buf_count_cpy > 0) {
+ odp_event_t ev;
+ odp_queue_t pq;
+ uint64_t seq;
+ uint64_t bcount = 0;
+ int i, j;
+ char name[32];
+ uint64_t num_bufs = args->num_bufs;
+ uint64_t buf_count = globals->buf_count_cpy;
+
+ for (i = 0; i < args->num_prio; i++) {
+ for (j = 0; j < args->num_queues; j++) {
+ snprintf(name, sizeof(name),
+ "plain_%d_%d_o", i, j);
+ pq = odp_queue_lookup(name);
+ CU_ASSERT_FATAL(pq != ODP_QUEUE_INVALID);
+
+ seq = 0;
+ while (1) {
+ ev = odp_queue_deq(pq);
+
+ if (ev == ODP_EVENT_INVALID) {
+ CU_ASSERT(seq == num_bufs);
+ break;
+ }
+
+ bctx = odp_buffer_addr(
+ odp_buffer_from_event(ev));
+
+ CU_ASSERT(bctx->sequence == seq);
+ seq++;
+ bcount++;
+ odp_event_free(ev);
+ }
+ }
+ }
+ CU_ASSERT(bcount == buf_count);
+ globals->buf_count_cpy = 0;
+ }
+
+ if (locked)
+ odp_ticketlock_unlock(&globals->lock);
+
+ /* Clear scheduler atomic / ordered context between tests */
+ CU_ASSERT(drain_queues() == 0);
+
+ if (num)
+ printf("\nDROPPED %i events\n\n", num);
+
+ return 0;
+}
+
+static void fill_queues(thread_args_t *args)
+{
+ odp_schedule_sync_t sync;
+ int num_queues, num_prio;
+ odp_pool_t pool;
+ int i, j, k;
+ int buf_count = 0;
+ test_globals_t *globals;
+ char name[32];
+ int ret;
+ odp_buffer_t buf;
+ odp_event_t ev;
+
+ globals = args->globals;
+ sync = args->sync;
+ num_queues = args->num_queues;
+ num_prio = args->num_prio;
+
+ pool = odp_pool_lookup(MSG_POOL_NAME);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < num_prio; i++) {
+ for (j = 0; j < num_queues; j++) {
+ odp_queue_t queue;
+
+ switch (sync) {
+ case ODP_SCHED_SYNC_PARALLEL:
+ snprintf(name, sizeof(name),
+ "sched_%d_%d_n", i, j);
+ break;
+ case ODP_SCHED_SYNC_ATOMIC:
+ snprintf(name, sizeof(name),
+ "sched_%d_%d_a", i, j);
+ break;
+ case ODP_SCHED_SYNC_ORDERED:
+ snprintf(name, sizeof(name),
+ "sched_%d_%d_o", i, j);
+ break;
+ default:
+ CU_ASSERT_FATAL(0);
+ break;
+ }
+
+ queue = odp_queue_lookup(name);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ for (k = 0; k < args->num_bufs; k++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ ev = odp_buffer_to_event(buf);
+ if (sync == ODP_SCHED_SYNC_ORDERED) {
+ queue_context *qctx =
+ odp_queue_context(queue);
+ buf_contents *bctx =
+ odp_buffer_addr(buf);
+ bctx->sequence = qctx->sequence++;
+ }
+
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT_FATAL(ret == 0);
+
+ if (ret)
+ odp_buffer_free(buf);
+ else
+ buf_count++;
+ }
+ }
+ }
+
+ globals->buf_count = buf_count;
+ globals->buf_count_cpy = buf_count;
+}
+
+static void reset_queues(thread_args_t *args)
+{
+ int i, j, k;
+ int num_prio = args->num_prio;
+ int num_queues = args->num_queues;
+ char name[32];
+
+ for (i = 0; i < num_prio; i++) {
+ for (j = 0; j < num_queues; j++) {
+ odp_queue_t queue;
+
+ snprintf(name, sizeof(name),
+ "sched_%d_%d_o", i, j);
+ queue = odp_queue_lookup(name);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ for (k = 0; k < args->num_bufs; k++) {
+ queue_context *qctx =
+ odp_queue_context(queue);
+ uint32_t ndx;
+ uint32_t ndx_max;
+
+ ndx_max = odp_queue_lock_count(queue);
+ CU_ASSERT_FATAL(ndx_max > 0);
+ qctx->sequence = 0;
+ for (ndx = 0; ndx < ndx_max; ndx++)
+ qctx->lock_sequence[ndx] = 0;
+ }
+ }
+ }
+}
+
+static void schedule_common(odp_schedule_sync_t sync, int num_queues,
+ int num_prio, int enable_schd_multi)
+{
+ thread_args_t args;
+ odp_shm_t shm;
+ test_globals_t *globals;
+
+ shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ globals = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
+
+ memset(&args, 0, sizeof(thread_args_t));
+ args.globals = globals;
+ args.sync = sync;
+ args.num_queues = num_queues;
+ args.num_prio = num_prio;
+ args.num_bufs = BUFS_PER_QUEUE;
+ args.num_workers = 1;
+ args.enable_schd_multi = enable_schd_multi;
+ args.enable_excl_atomic = 0; /* Not needed with a single CPU */
+
+ fill_queues(&args);
+
+ schedule_common_(&args);
+ if (sync == ODP_SCHED_SYNC_ORDERED)
+ reset_queues(&args);
+}
+
+static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
+ int num_prio, int enable_schd_multi,
+ int enable_excl_atomic)
+{
+ odp_shm_t shm;
+ test_globals_t *globals;
+ thread_args_t *args;
+ void *arg_ptr;
+
+ shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ globals = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
+
+ shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ args = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(args);
+
+ args->globals = globals;
+ args->sync = sync;
+ args->num_queues = num_queues;
+ args->num_prio = num_prio;
+ if (enable_excl_atomic)
+ args->num_bufs = globals->max_sched_queue_size;
+ else
+ args->num_bufs = BUFS_PER_QUEUE;
+ args->num_workers = globals->num_workers;
+ args->enable_schd_multi = enable_schd_multi;
+ args->enable_excl_atomic = enable_excl_atomic;
+
+ fill_queues(args);
+
+ if (globals->test_debug_print)
+ odp_schedule_print();
+
+ /* Create and launch worker threads */
+ arg_ptr = args;
+ odp_cunit_thread_create(globals->num_workers, schedule_common_, &arg_ptr, 0, 0);
+
+ /* Wait for worker threads to terminate */
+ odp_cunit_thread_join(globals->num_workers);
+
+ /* Cleanup ordered queues for next pass */
+ if (sync == ODP_SCHED_SYNC_ORDERED)
+ reset_queues(args);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_PARALLEL */
+static void scheduler_test_1q_1t_n(void)
+{
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, ONE_Q, ONE_PRIO, SCHD_ONE);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
+static void scheduler_test_1q_1t_a(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
+static void scheduler_test_1q_1t_o(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_PARALLEL */
+static void scheduler_test_mq_1t_n(void)
+{
+ /* Only one priority involved in these tests, but use
+ the same number of queues the more general case uses */
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, globals->queues_per_prio,
+ ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
+static void scheduler_test_mq_1t_a(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ATOMIC, globals->queues_per_prio,
+ ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
+static void scheduler_test_mq_1t_o(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ORDERED, globals->queues_per_prio,
+ ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_PARALLEL */
+static void scheduler_test_mq_1t_prio_n(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, globals->queues_per_prio, prio,
+ SCHD_ONE);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
+static void scheduler_test_mq_1t_prio_a(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ schedule_common(ODP_SCHED_SYNC_ATOMIC, globals->queues_per_prio, prio,
+ SCHD_ONE);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
+static void scheduler_test_mq_1t_prio_o(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ schedule_common(ODP_SCHED_SYNC_ORDERED, globals->queues_per_prio, prio,
+ SCHD_ONE);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_PARALLEL */
+static void scheduler_test_mq_mt_prio_n(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ parallel_execute(ODP_SCHED_SYNC_PARALLEL, globals->queues_per_prio,
+ prio, SCHD_ONE, DISABLE_EXCL_ATOMIC);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
+static void scheduler_test_mq_mt_prio_a(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ parallel_execute(ODP_SCHED_SYNC_ATOMIC, globals->queues_per_prio, prio,
+ SCHD_ONE, DISABLE_EXCL_ATOMIC);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
+static void scheduler_test_mq_mt_prio_o(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ parallel_execute(ODP_SCHED_SYNC_ORDERED, globals->queues_per_prio, prio,
+ SCHD_ONE, DISABLE_EXCL_ATOMIC);
+}
+
+/* 1 queue many threads check exclusive access on ATOMIC queues */
+static void scheduler_test_1q_mt_a_excl(void)
+{
+ parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE,
+ ENABLE_EXCL_ATOMIC);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_PARALLEL multi */
+static void scheduler_test_multi_1q_1t_n(void)
+{
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, ONE_Q, ONE_PRIO, SCHD_MULTI);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
+static void scheduler_test_multi_1q_1t_a(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_MULTI);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED multi */
+static void scheduler_test_multi_1q_1t_o(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_PARALLEL multi */
+static void scheduler_test_multi_mq_1t_n(void)
+{
+ /* Only one priority involved in these tests, but use
+ the same number of queues the more general case uses */
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, globals->queues_per_prio,
+ ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
+static void scheduler_test_multi_mq_1t_a(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ATOMIC, globals->queues_per_prio,
+ ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED multi */
+static void scheduler_test_multi_mq_1t_o(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ORDERED, globals->queues_per_prio,
+ ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_PARALLEL multi */
+static void scheduler_test_multi_mq_1t_prio_n(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, globals->queues_per_prio, prio,
+ SCHD_MULTI);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
+static void scheduler_test_multi_mq_1t_prio_a(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ schedule_common(ODP_SCHED_SYNC_ATOMIC, globals->queues_per_prio, prio,
+ SCHD_MULTI);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED multi */
+static void scheduler_test_multi_mq_1t_prio_o(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ schedule_common(ODP_SCHED_SYNC_ORDERED, globals->queues_per_prio, prio,
+ SCHD_MULTI);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_PARALLEL multi */
+static void scheduler_test_multi_mq_mt_prio_n(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ parallel_execute(ODP_SCHED_SYNC_PARALLEL, globals->queues_per_prio,
+ prio, SCHD_MULTI, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi */
+static void scheduler_test_multi_mq_mt_prio_a(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ parallel_execute(ODP_SCHED_SYNC_ATOMIC, globals->queues_per_prio, prio,
+ SCHD_MULTI, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED multi */
+static void scheduler_test_multi_mq_mt_prio_o(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ parallel_execute(ODP_SCHED_SYNC_ORDERED, globals->queues_per_prio, prio,
+ SCHD_MULTI, 0);
+}
+
+/* 1 queue many threads check exclusive access on ATOMIC queues multi */
+static void scheduler_test_multi_1q_mt_a_excl(void)
+{
+ parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_MULTI,
+ ENABLE_EXCL_ATOMIC);
+}
+
+static void scheduler_test_pause_resume(void)
+{
+ odp_queue_param_t qp;
+ odp_queue_t queue;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ odp_queue_t from;
+ odp_pool_t pool;
+ int i;
+ int local_bufs = 0;
+ int ret;
+
+ sched_queue_param_init(&qp);
+ queue = odp_queue_create("pause_resume", &qp);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = odp_pool_lookup(MSG_POOL_NAME);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < NUM_BUFS_PAUSE; i++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ ev = odp_buffer_to_event(buf);
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT_FATAL(ret == 0);
+ }
+
+ for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
+ from = ODP_QUEUE_INVALID;
+ ev = odp_schedule(&from, ODP_SCHED_WAIT);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(from == queue);
+ buf = odp_buffer_from_event(ev);
+ odp_buffer_free(buf);
+ }
+
+ odp_schedule_pause();
+
+ while (1) {
+ ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ CU_ASSERT(from == queue);
+ buf = odp_buffer_from_event(ev);
+ odp_buffer_free(buf);
+ local_bufs++;
+ }
+
+ CU_ASSERT(local_bufs <= NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE);
+
+ odp_schedule_resume();
+
+ for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i < NUM_BUFS_PAUSE; i++) {
+ ev = odp_schedule(&from, ODP_SCHED_WAIT);
+ CU_ASSERT(from == queue);
+ buf = odp_buffer_from_event(ev);
+ odp_buffer_free(buf);
+ }
+
+ CU_ASSERT(drain_queues() == 0);
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+}
+
+static void scheduler_test_pause_enqueue(void)
+{
+ odp_queue_param_t qp;
+ odp_queue_t queue;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ odp_event_t ev_tbl[NUM_BUFS_BEFORE_PAUSE];
+ odp_queue_t from;
+ odp_pool_t pool;
+ int i;
+ int ret;
+ int local_bufs;
+
+ sched_queue_param_init(&qp);
+ queue = odp_queue_create("pause_enqueue", &qp);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = odp_pool_lookup(MSG_POOL_NAME);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < NUM_BUFS_PAUSE; i++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ ev = odp_buffer_to_event(buf);
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT_FATAL(ret == 0);
+ }
+
+ for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
+ from = ODP_QUEUE_INVALID;
+ ev = odp_schedule(&from, ODP_SCHED_WAIT);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(from == queue);
+ ev_tbl[i] = ev;
+ }
+
+ /* Pause, enqueue, schedule, resume */
+ odp_schedule_pause();
+
+ for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
+ ev = ev_tbl[i];
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT_FATAL(ret == 0);
+ }
+
+ local_bufs = 0;
+ while (1) {
+ from = ODP_QUEUE_INVALID;
+ ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ CU_ASSERT(from == queue);
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT_FATAL(ret == 0);
+
+ local_bufs++;
+ CU_ASSERT_FATAL(local_bufs <= NUM_BUFS_PAUSE);
+ }
+
+ odp_schedule_resume();
+
+ for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
+ from = ODP_QUEUE_INVALID;
+ ev = odp_schedule(&from, ODP_SCHED_WAIT);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(from == queue);
+ ev_tbl[i] = ev;
+ }
+
+ /* Pause, schedule, enqueue, resume */
+ odp_schedule_pause();
+
+ local_bufs = 0;
+ while (1) {
+ from = ODP_QUEUE_INVALID;
+ ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ CU_ASSERT(from == queue);
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT_FATAL(ret == 0);
+
+ local_bufs++;
+ CU_ASSERT_FATAL(local_bufs <= NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE);
+ }
+
+ for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
+ ev = ev_tbl[i];
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT_FATAL(ret == 0);
+ }
+
+ odp_schedule_resume();
+
+ /* Free all */
+ CU_ASSERT(drain_queues() == NUM_BUFS_PAUSE);
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+}
+
+/* Basic, single threaded ordered lock API testing */
+static void scheduler_test_ordered_lock(void)
+{
+ odp_queue_param_t qp;
+ odp_queue_t queue;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ odp_queue_t from;
+ odp_pool_t pool;
+ int i;
+ int ret;
+ uint32_t lock_count;
+ odp_schedule_capability_t sched_capa;
+
+ CU_ASSERT_FATAL(!odp_schedule_capability(&sched_capa));
+
+ sched_queue_param_init(&qp);
+ qp.sched.sync = ODP_SCHED_SYNC_ORDERED;
+ qp.sched.lock_count = sched_capa.max_ordered_locks;
+
+ queue = odp_queue_create("ordered_lock", &qp);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+ CU_ASSERT_FATAL(odp_queue_type(queue) == ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT_FATAL(odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ORDERED);
+
+ lock_count = odp_queue_lock_count(queue);
+
+ if (lock_count == 0) {
+ printf(" NO ORDERED LOCKS. Ordered locks not tested.\n");
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+ return;
+ }
+
+ pool = odp_pool_lookup(MSG_POOL_NAME);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < BUFS_PER_QUEUE; i++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ ev = odp_buffer_to_event(buf);
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT(ret == 0);
+
+ if (ret)
+ odp_buffer_free(buf);
+ }
+
+ for (i = 0; i < BUFS_PER_QUEUE / 2; i++) {
+ from = ODP_QUEUE_INVALID;
+ ev = odp_schedule(&from, ODP_SCHED_WAIT);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(from == queue);
+ buf = odp_buffer_from_event(ev);
+ odp_schedule_order_lock(0);
+ odp_schedule_order_unlock(0);
+ odp_buffer_free(buf);
+ }
+
+ if (lock_count < 2) {
+ printf(" ONLY ONE ORDERED LOCK. Unlock_lock not tested.\n");
+ CU_ASSERT(drain_queues() == BUFS_PER_QUEUE / 2);
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+ return;
+ }
+
+ for (i = 0; i < BUFS_PER_QUEUE / 2; i++) {
+ from = ODP_QUEUE_INVALID;
+ ev = odp_schedule(&from, ODP_SCHED_WAIT);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(from == queue);
+ buf = odp_buffer_from_event(ev);
+ odp_schedule_order_lock(0);
+ odp_schedule_order_unlock_lock(0, 1);
+ odp_schedule_order_unlock(1);
+ odp_buffer_free(buf);
+ }
+
+ CU_ASSERT(drain_queues() == 0);
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+}
+
+static void enqueue_event(odp_queue_t queue)
+{
+ odp_pool_t pool;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ int ret;
+
+ pool = odp_pool_lookup(MSG_POOL_NAME);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ ev = odp_buffer_to_event(buf);
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT_FATAL(ret == 0);
+}
+
+static void scheduler_test_order_wait_1_thread(void)
+{
+ odp_queue_param_t queue_param;
+ odp_queue_t queue;
+ odp_event_t ev;
+
+ sched_queue_param_init(&queue_param);
+ queue_param.sched.sync = ODP_SCHED_SYNC_ORDERED;
+ queue = odp_queue_create("ordered queue", &queue_param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+ CU_ASSERT_FATAL(odp_queue_type(queue) == ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT_FATAL(odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ORDERED);
+
+ /* Set up an ordered scheduling context */
+ enqueue_event(queue);
+ ev = odp_schedule(NULL, ODP_SCHED_WAIT);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ odp_event_free(ev);
+
+ /* Check that order wait does not get stuck or crash */
+ odp_schedule_order_wait();
+
+ /* Release the context */
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ CU_ASSERT(ev == ODP_EVENT_INVALID);
+
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+}
+
+static int order_wait_helper(void *arg ODP_UNUSED)
+{
+ odp_event_t ev;
+
+ ev = odp_schedule(NULL, odp_schedule_wait_time(ODP_TIME_SEC_IN_NS));
+
+ if (ev != ODP_EVENT_INVALID) {
+ odp_event_free(ev);
+
+ odp_atomic_store_rel_u32(&globals->order_wait.helper_active, 1);
+ odp_atomic_store_rel_u32(&globals->order_wait.helper_ready, 1);
+
+ /* Wait that the main thread can attempt to overtake us */
+ odp_time_wait_ns(ODP_TIME_SEC_IN_NS);
+
+ odp_atomic_store_rel_u32(&globals->order_wait.helper_active, 0);
+ }
+
+ /* We are not interested in further events */
+ odp_schedule_pause();
+ /* Release context */
+ while ((ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT))
+ != ODP_EVENT_INVALID) {
+ /* We got an event that was meant for the main thread */
+ odp_event_free(ev);
+ }
+
+ return 0;
+}
+
+static void scheduler_test_order_wait_2_threads(void)
+{
+ odp_schedule_capability_t sched_capa;
+ odp_queue_param_t queue_param;
+ odp_queue_t queue;
+ int ret;
+ odp_time_t start;
+ odp_event_t ev;
+ int num = 1;
+
+ CU_ASSERT(!odp_schedule_capability(&sched_capa));
+
+ sched_queue_param_init(&queue_param);
+ queue_param.sched.sync = ODP_SCHED_SYNC_ORDERED;
+ queue = odp_queue_create("ordered queue", &queue_param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+ CU_ASSERT_FATAL(odp_queue_type(queue) == ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT_FATAL(odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ORDERED);
+
+ odp_atomic_init_u32(&globals->order_wait.helper_ready, 0);
+ odp_atomic_init_u32(&globals->order_wait.helper_active, 0);
+
+ ret = odp_cunit_thread_create(num, order_wait_helper, NULL, 0, 0);
+ CU_ASSERT_FATAL(ret == num);
+
+ /* Send an event to the helper thread */
+ enqueue_event(queue);
+
+ /* Wait that the helper thread gets the event */
+ start = odp_time_local();
+ while (!odp_atomic_load_acq_u32(&globals->order_wait.helper_ready)) {
+ odp_time_t now = odp_time_local();
+
+ if (odp_time_diff_ns(now, start) > ODP_TIME_SEC_IN_NS) {
+ CU_FAIL("Timeout waiting for helper\n");
+ break;
+ }
+ }
+
+ /* Try to send an event to ourselves */
+ enqueue_event(queue);
+ /*
+ * If ordered queues are implemented as atomic queues, the schedule
+ * call here will not return anything until the helper thread has
+ * released the scheduling context of the first event. So we have
+ * to wait long enough before giving up.
+ */
+ ev = odp_schedule(NULL, odp_schedule_wait_time(2 * ODP_TIME_SEC_IN_NS));
+ if (ev == ODP_EVENT_INVALID) {
+ /* Helper thread got the event. Give up. */
+ printf("SKIPPED...");
+ goto out;
+ }
+ odp_event_free(ev);
+
+ /*
+ * We are now in an ordered scheduling context and behind the helper
+ * thread in source queue order if the helper thread has not released
+ * the scheuduling context.
+ */
+
+ if (!odp_atomic_load_acq_u32(&globals->order_wait.helper_active)) {
+ /*
+ * Helper thread has released the context already.
+ * We cannot test order wait fully.
+ */
+ printf("reduced test...");
+ }
+
+ /*
+ * The function we are testing: Wait until there are no scheduling
+ * contexts that precede ours.
+ */
+ odp_schedule_order_wait();
+
+ /*
+ * If order wait is supported, we are now first in the source queue
+ * order, so the helper thread must have released its context.
+ */
+ if (sched_capa.order_wait)
+ CU_ASSERT(!odp_atomic_load_acq_u32(&globals->order_wait.helper_active));
+
+ /* Release the context */
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ CU_ASSERT(ev == ODP_EVENT_INVALID);
+
+out:
+ CU_ASSERT(odp_cunit_thread_join(num) == 0);
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+}
+
+static int sched_and_plain_thread(void *arg)
+{
+ odp_event_t ev1, ev2;
+ thread_args_t *args = (thread_args_t *)arg;
+ test_globals_t *globals = args->globals;
+ odp_queue_t sched_queue = globals->sched_and_plain_q.sched;
+ odp_queue_t plain_queue = globals->sched_and_plain_q.plain;
+ odp_schedule_sync_t sync = odp_queue_sched_type(sched_queue);
+ uint64_t i, wait;
+
+ /* Wait for all threads to start */
+ odp_barrier_wait(&globals->barrier);
+
+ /* Run the test */
+ wait = odp_schedule_wait_time(10 * ODP_TIME_MSEC_IN_NS);
+ for (i = 0; i < SCHED_AND_PLAIN_ROUNDS; i++) {
+ uint32_t rand_val;
+
+ /* Dequeue events from scheduled and plain queues */
+ ev1 = odp_schedule(NULL, wait);
+ if (ev1 == ODP_EVENT_INVALID)
+ continue;
+
+ if (sync == ODP_SCHED_SYNC_ORDERED)
+ odp_schedule_order_lock(0);
+
+ ev2 = odp_queue_deq(plain_queue);
+ CU_ASSERT_FATAL(ev2 != ODP_EVENT_INVALID);
+
+ /* Add random delay to stress scheduler implementation */
+ odp_random_data((uint8_t *)&rand_val, sizeof(rand_val),
+ ODP_RANDOM_BASIC);
+ odp_time_wait_ns(rand_val % ODP_TIME_USEC_IN_NS);
+
+ /* Enqueue events back to the end of the queues */
+ CU_ASSERT_FATAL(!odp_queue_enq(plain_queue, ev2));
+
+ if (sync == ODP_SCHED_SYNC_ORDERED)
+ odp_schedule_order_unlock(0);
+
+ CU_ASSERT_FATAL(!odp_queue_enq(sched_queue, ev1));
+ }
+
+ /* Make sure scheduling context is released */
+ odp_schedule_pause();
+ while ((ev1 = odp_schedule(NULL, ODP_SCHED_NO_WAIT)) != ODP_EVENT_INVALID) {
+ if (sync == ODP_SCHED_SYNC_ORDERED)
+ odp_schedule_order_lock(0);
+
+ ev2 = odp_queue_deq(plain_queue);
+ CU_ASSERT_FATAL(ev2 != ODP_EVENT_INVALID);
+
+ CU_ASSERT_FATAL(!odp_queue_enq(plain_queue, ev2));
+
+ if (sync == ODP_SCHED_SYNC_ORDERED)
+ odp_schedule_order_unlock(0);
+
+ CU_ASSERT_FATAL(!odp_queue_enq(sched_queue, ev1));
+ }
+
+ /* Don't resume scheduling until all threads have finished */
+ odp_barrier_wait(&globals->barrier);
+ odp_schedule_resume();
+
+ return 0;
+}
+
+static void scheduler_test_sched_and_plain(odp_schedule_sync_t sync)
+{
+ thread_args_t *args;
+ test_globals_t *globals;
+ odp_queue_t sched_queue;
+ odp_queue_t plain_queue;
+ odp_pool_t pool;
+ odp_queue_param_t queue_param;
+ odp_pool_param_t pool_param;
+ odp_queue_capability_t queue_capa;
+ odp_schedule_capability_t sched_capa;
+ odp_shm_t shm;
+ odp_event_t ev;
+ uint32_t *buf_data;
+ uint32_t seq;
+ uint64_t wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
+ uint32_t events_per_queue = BUFS_PER_QUEUE / 2;
+ uint32_t prev_seq;
+ int first;
+ void *arg_ptr;
+
+ CU_ASSERT_FATAL(!odp_schedule_capability(&sched_capa));
+ CU_ASSERT_FATAL(!odp_queue_capability(&queue_capa))
+
+ if (sync == ODP_SCHED_SYNC_ORDERED &&
+ sched_capa.max_ordered_locks == 0) {
+ printf("\n NO ORDERED LOCKS. scheduler_test_ordered_and_plain skipped.\n");
+ return;
+ }
+
+ /* Set up the scheduling environment */
+ shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ globals = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
+
+ shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ args = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(args);
+ args->globals = globals;
+
+ /* Make sure all events fit to queues */
+ if (sched_capa.max_queue_size &&
+ sched_capa.max_queue_size < events_per_queue)
+ events_per_queue = sched_capa.max_queue_size;
+ if (queue_capa.plain.max_size &&
+ queue_capa.plain.max_size < events_per_queue)
+ events_per_queue = queue_capa.plain.max_size;
+
+ sched_queue_param_init(&queue_param);
+ queue_param.sched.sync = sync;
+ queue_param.size = events_per_queue;
+ if (sync == ODP_SCHED_SYNC_ORDERED)
+ queue_param.sched.lock_count = 1;
+
+ sched_queue = odp_queue_create(NULL, &queue_param);
+ CU_ASSERT_FATAL(sched_queue != ODP_QUEUE_INVALID);
+ globals->sched_and_plain_q.sched = sched_queue;
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_PLAIN;
+ queue_param.size = events_per_queue;
+
+ plain_queue = odp_queue_create(NULL, &queue_param);
+ CU_ASSERT_FATAL(sched_queue != ODP_QUEUE_INVALID);
+ globals->sched_and_plain_q.plain = plain_queue;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.buf.size = 100;
+ pool_param.buf.num = 2 * events_per_queue;
+ pool_param.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("sched_to_plain_pool", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ /* Create and enq test events with sequential sequence numbers */
+ for (seq = 0; seq < events_per_queue; seq++) {
+ odp_buffer_t buf1, buf2;
+
+ buf1 = odp_buffer_alloc(pool);
+ if (buf1 == ODP_BUFFER_INVALID)
+ break;
+ buf2 = odp_buffer_alloc(pool);
+ if (buf2 == ODP_BUFFER_INVALID) {
+ odp_buffer_free(buf1);
+ break;
+ }
+ buf_data = odp_buffer_addr(buf1);
+ *buf_data = seq;
+ buf_data = odp_buffer_addr(buf2);
+ *buf_data = seq;
+
+ /* Events flow id is 0 by default */
+ CU_ASSERT_FATAL(!odp_queue_enq(sched_queue,
+ odp_buffer_to_event(buf1)));
+ CU_ASSERT_FATAL(!odp_queue_enq(plain_queue,
+ odp_buffer_to_event(buf2)));
+ }
+ CU_ASSERT_FATAL(seq > 2);
+
+ arg_ptr = args;
+ odp_cunit_thread_create(globals->num_workers, sched_and_plain_thread, &arg_ptr, 0, 0);
+
+ odp_cunit_thread_join(globals->num_workers);
+
+ /* Check plain queue sequence numbers and free events */
+ first = 1;
+ while (1) {
+ ev = odp_queue_deq(plain_queue);
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ buf_data = odp_buffer_addr(odp_buffer_from_event(ev));
+ seq = *buf_data;
+
+ if (first) {
+ first = 0;
+ prev_seq = seq;
+ continue;
+ }
+
+ CU_ASSERT(seq == prev_seq + 1 || seq == 0)
+ prev_seq = seq;
+ odp_event_free(ev);
+ }
+
+ /* Check scheduled queue sequence numbers and free events */
+ first = 1;
+ while (1) {
+ ev = odp_schedule(NULL, wait);
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ buf_data = odp_buffer_addr(odp_buffer_from_event(ev));
+ seq = *buf_data;
+
+ if (first) {
+ first = 0;
+ prev_seq = seq;
+ continue;
+ }
+
+ CU_ASSERT(seq == prev_seq + 1 || seq == 0)
+ prev_seq = seq;
+ odp_event_free(ev);
+ }
+
+ CU_ASSERT(!odp_queue_destroy(sched_queue));
+ CU_ASSERT(!odp_queue_destroy(plain_queue));
+ CU_ASSERT(!odp_pool_destroy(pool));
+}
+
+static void scheduler_test_atomic_and_plain(void)
+{
+ scheduler_test_sched_and_plain(ODP_SCHED_SYNC_ATOMIC);
+}
+
+static void scheduler_test_ordered_and_plain(void)
+{
+ scheduler_test_sched_and_plain(ODP_SCHED_SYNC_ORDERED);
+}
+
+static void scheduler_fifo_init(odp_schedule_sync_t sync, int multi, uint32_t num_thr)
+{
+ odp_queue_t queue;
+ odp_pool_t pool;
+ odp_buffer_t buf;
+ uint32_t *seq;
+ uint32_t i;
+ odp_queue_param_t queue_param;
+ odp_pool_param_t pool_param;
+ odp_schedule_capability_t sched_capa;
+ uint32_t num_events = FIFO_MAX_EVENTS;
+
+ CU_ASSERT_FATAL(!odp_schedule_capability(&sched_capa));
+
+ /* Make sure events fit into the queue */
+ if (sched_capa.max_queue_size && num_events > sched_capa.max_queue_size)
+ num_events = sched_capa.max_queue_size;
+
+ sched_queue_param_init(&queue_param);
+ queue_param.sched.sync = sync;
+ queue_param.size = num_events;
+
+ queue = odp_queue_create("sched_fifo", &queue_param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_BUFFER;
+ pool_param.buf.size = 32;
+ pool_param.buf.num = num_events;
+
+ pool = odp_pool_create("sched_fifo", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < num_events; i++) {
+ buf = odp_buffer_alloc(pool);
+ if (buf == ODP_BUFFER_INVALID)
+ break;
+
+ seq = odp_buffer_addr(buf);
+ *seq = i;
+ globals->fifo.event[i] = odp_buffer_to_event(buf);
+ }
+
+ CU_ASSERT_FATAL(i == num_events);
+
+ odp_barrier_init(&globals->fifo.barrier, num_thr);
+
+ globals->fifo.multi = multi;
+ globals->fifo.queue = queue;
+ globals->fifo.pool = pool;
+ globals->fifo.num_events = num_events;
+ globals->fifo.num_enq = 0;
+ globals->fifo.burst = 0;
+ globals->fifo.num_thr = num_thr;
+ odp_atomic_init_u32(&globals->fifo.cur_thr, 0);
+}
+
+static int scheduler_fifo_test(void *arg)
+{
+ odp_queue_t from;
+ odp_buffer_t buf;
+ int ret;
+ uint32_t *seq;
+ uint32_t i, num, cur_thr;
+ uint32_t num_enq = 0;
+ uint32_t thr;
+ uint16_t num_thr = globals->fifo.num_thr;
+ uint64_t wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
+ int multi = globals->fifo.multi;
+ odp_queue_t queue = globals->fifo.queue;
+ odp_pool_t pool = globals->fifo.pool;
+ uint32_t num_events = globals->fifo.num_events;
+ odp_event_t events[num_events];
+
+ /* Thread index as argument */
+ thr = (uintptr_t)arg;
+
+ odp_barrier_wait(&globals->fifo.barrier);
+
+ /* Threads enqueue events in round robin */
+ while (1) {
+ cur_thr = odp_atomic_load_acq_u32(&globals->fifo.cur_thr);
+ if (cur_thr != thr)
+ continue;
+
+ num_enq = globals->fifo.num_enq;
+
+ if (num_enq >= num_events) {
+ odp_atomic_store_u32(&globals->fifo.cur_thr, (cur_thr + 1) % num_thr);
+ break;
+ }
+
+ if (multi) {
+ num = globals->fifo.burst + 1;
+ globals->fifo.burst = num % 10;
+
+ if (num > (num_events - num_enq))
+ num = num_events - num_enq;
+
+ ret = odp_queue_enq_multi(queue, &globals->fifo.event[num_enq], num);
+ CU_ASSERT(ret > 0);
+ CU_ASSERT_FATAL(ret <= (int)num);
+ } else {
+ ret = odp_queue_enq(queue, globals->fifo.event[num_enq]);
+ CU_ASSERT(ret == 0);
+ if (ret == 0)
+ ret = 1;
+ }
+
+ if (ret > 0)
+ globals->fifo.num_enq += ret;
+
+ odp_atomic_store_rel_u32(&globals->fifo.cur_thr, (cur_thr + 1) % num_thr);
+ }
+
+ odp_barrier_wait(&globals->fifo.barrier);
+
+ if (thr != 0)
+ return 0;
+
+ /* Thread 0 checks event order and destroys queue/pool */
+ CU_ASSERT(globals->fifo.num_enq == num_events);
+ if (globals->fifo.num_enq > num_events)
+ return -1;
+
+ num_events = globals->fifo.num_enq;
+
+ for (i = 0; i < num_events; i++)
+ events[i] = ODP_EVENT_INVALID;
+
+ num = 0;
+
+ while (1) {
+ uint32_t num_recv;
+ int max_num = 3;
+ odp_event_t ev[max_num];
+
+ from = ODP_QUEUE_INVALID;
+
+ if (multi) {
+ ret = odp_schedule_multi(&from, wait, ev, max_num);
+ CU_ASSERT_FATAL(ret >= 0 && ret <= max_num);
+
+ if (ret == 0)
+ break;
+ } else {
+ ev[0] = odp_schedule(&from, wait);
+ if (ev[0] == ODP_EVENT_INVALID)
+ break;
+
+ ret = 1;
+ }
+
+ num_recv = ret;
+ CU_ASSERT(num < num_events);
+
+ if (num >= num_events) {
+ /* Drop extra events */
+ odp_event_free_multi(ev, num_recv);
+ continue;
+ }
+
+ for (i = 0; i < num_recv; i++) {
+ CU_ASSERT(odp_event_type(ev[i]) == ODP_EVENT_BUFFER);
+ events[num] = ev[i];
+ num++;
+ }
+
+ CU_ASSERT(from == queue);
+ }
+
+ CU_ASSERT(num == num_events);
+
+ for (i = 0; i < num; i++) {
+ buf = odp_buffer_from_event(events[i]);
+ seq = odp_buffer_addr(buf);
+
+ CU_ASSERT(*seq == i);
+
+ if (*seq != i)
+ ODPH_ERR("Bad sequence number %u, expected %u\n", *seq, i);
+
+ odp_buffer_free(buf);
+ }
+
+ CU_ASSERT_FATAL(!odp_queue_destroy(queue));
+ CU_ASSERT_FATAL(!odp_pool_destroy(pool));
+
+ return 0;
+}
+
+static void scheduler_fifo_parallel_single(void)
+{
+ scheduler_fifo_init(ODP_SCHED_SYNC_PARALLEL, 0, 1);
+ scheduler_fifo_test(0);
+}
+
+static void scheduler_fifo_parallel_multi(void)
+{
+ scheduler_fifo_init(ODP_SCHED_SYNC_PARALLEL, 1, 1);
+ scheduler_fifo_test(0);
+}
+
+static void scheduler_fifo_atomic_single(void)
+{
+ scheduler_fifo_init(ODP_SCHED_SYNC_ATOMIC, 0, 1);
+ scheduler_fifo_test(0);
+}
+
+static void scheduler_fifo_atomic_multi(void)
+{
+ scheduler_fifo_init(ODP_SCHED_SYNC_ATOMIC, 1, 1);
+ scheduler_fifo_test(0);
+}
+
+static void scheduler_fifo_ordered_single(void)
+{
+ scheduler_fifo_init(ODP_SCHED_SYNC_ORDERED, 0, 1);
+ scheduler_fifo_test(0);
+}
+
+static void scheduler_fifo_ordered_multi(void)
+{
+ scheduler_fifo_init(ODP_SCHED_SYNC_ORDERED, 1, 1);
+ scheduler_fifo_test(0);
+}
+
+static void scheduler_fifo_mt(odp_schedule_sync_t sync, int multi)
+{
+ uint32_t i;
+ uint32_t num_thr = globals->num_workers;
+ uintptr_t arg[num_thr];
+
+ scheduler_fifo_init(sync, multi, num_thr);
+
+ for (i = 0; i < num_thr; i++)
+ arg[i] = i;
+
+ odp_cunit_thread_create(num_thr, scheduler_fifo_test, (void **)&arg[0], 1, 0);
+
+ /* Wait for worker threads to terminate */
+ odp_cunit_thread_join(num_thr);
+}
+
+static void scheduler_fifo_mt_parallel_single(void)
+{
+ scheduler_fifo_mt(ODP_SCHED_SYNC_PARALLEL, 0);
+}
+
+static void scheduler_fifo_mt_parallel_multi(void)
+{
+ scheduler_fifo_mt(ODP_SCHED_SYNC_PARALLEL, 1);
+}
+
+static void scheduler_fifo_mt_atomic_single(void)
+{
+ scheduler_fifo_mt(ODP_SCHED_SYNC_ATOMIC, 0);
+}
+
+static void scheduler_fifo_mt_atomic_multi(void)
+{
+ scheduler_fifo_mt(ODP_SCHED_SYNC_ATOMIC, 1);
+}
+
+static void scheduler_fifo_mt_ordered_single(void)
+{
+ scheduler_fifo_mt(ODP_SCHED_SYNC_ORDERED, 0);
+}
+
+static void scheduler_fifo_mt_ordered_multi(void)
+{
+ scheduler_fifo_mt(ODP_SCHED_SYNC_ORDERED, 1);
+}
+
+static int atomicity_test_run(void *arg)
+{
+ thread_args_t *args = (thread_args_t *)arg;
+ odp_event_t ev;
+ odp_queue_t atomic_queue = args->globals->atomicity_q.handle;
+ odp_queue_t from;
+ odp_atomic_u32_t *state;
+ uint32_t old;
+ uint32_t num_processed = 0;
+
+ if (args->num_workers > 1)
+ odp_barrier_wait(&globals->barrier);
+
+ while (num_processed < ATOMICITY_ROUNDS) {
+ ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ CU_ASSERT(from == atomic_queue);
+ if (from != atomic_queue) {
+ odp_event_free(ev);
+ continue;
+ }
+
+ state = odp_queue_context(from);
+ CU_ASSERT_FATAL(state != NULL);
+
+ old = 0;
+ CU_ASSERT_FATAL(odp_atomic_cas_acq_rel_u32(state, &old, 1));
+
+ /* Hold atomic context a while to better reveal possible atomicity bugs */
+ odp_time_wait_ns(ODP_TIME_MSEC_IN_NS);
+
+ old = 1;
+ CU_ASSERT_FATAL(odp_atomic_cas_acq_rel_u32(state, &old, 0));
+
+ CU_ASSERT_FATAL(odp_queue_enq(from, ev) == 0);
+
+ num_processed++;
+ }
+
+ /* Release atomic context and get rid of possible prescheduled events */
+ odp_schedule_pause();
+ while ((ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT)) != ODP_EVENT_INVALID)
+ CU_ASSERT_FATAL(odp_queue_enq(atomic_queue, ev) == 0);
+
+ if (args->num_workers > 1)
+ odp_barrier_wait(&globals->barrier);
+
+ odp_schedule_resume();
+ drain_queues();
+
+ return 0;
+}
+
+static void scheduler_test_atomicity(void)
+{
+ odp_shm_t shm;
+ test_globals_t *globals;
+ thread_args_t *args;
+ odp_pool_t pool;
+ odp_queue_t queue;
+ odp_queue_param_t queue_param;
+ int i;
+ void *arg_ptr;
+
+ shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ globals = odp_shm_addr(shm);
+ CU_ASSERT_FATAL(globals != NULL);
+
+ shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ args = odp_shm_addr(shm);
+ CU_ASSERT_FATAL(args != NULL);
+
+ pool = odp_pool_lookup(MSG_POOL_NAME);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ queue_param.size = globals->max_sched_queue_size;
+ queue_param.context = &globals->atomicity_q.state;
+ queue_param.context_len = sizeof(globals->atomicity_q.state);
+
+ queue = odp_queue_create("atomicity_test", &queue_param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ for (i = 0; i < BUFS_PER_QUEUE; i++) {
+ odp_buffer_t buf = odp_buffer_alloc(pool);
+
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ CU_ASSERT_FATAL(odp_queue_enq(queue, odp_buffer_to_event(buf)) == 0);
+ }
+ globals->atomicity_q.handle = queue;
+ odp_atomic_init_u32(&globals->atomicity_q.state, 0);
+
+ /* Create and launch worker threads */
+ args->num_workers = globals->num_workers;
+ arg_ptr = args;
+ odp_cunit_thread_create(globals->num_workers, atomicity_test_run, &arg_ptr, 0, 0);
+
+ /* Wait for worker threads to terminate */
+ odp_cunit_thread_join(globals->num_workers);
+
+ odp_queue_destroy(globals->atomicity_q.handle);
+}
+
+static int create_queues(test_globals_t *globals)
+{
+ int i, j, prios, rc;
+ odp_queue_capability_t queue_capa;
+ odp_schedule_capability_t sched_capa;
+ odp_schedule_config_t default_config;
+ odp_pool_t queue_ctx_pool;
+ odp_pool_param_t params;
+ odp_buffer_t queue_ctx_buf;
+ queue_context *qctx, *pqctx;
+ uint32_t ndx;
+ odp_queue_param_t p;
+ unsigned int num_sched;
+ unsigned int num_plain;
+ int queues_per_prio;
+ int sched_types;
+
+ if (odp_queue_capability(&queue_capa) < 0) {
+ ODPH_ERR("Queue capability query failed\n");
+ return -1;
+ }
+
+ if (odp_schedule_capability(&sched_capa) < 0) {
+ ODPH_ERR("Queue capability query failed\n");
+ return -1;
+ }
+
+ /* Limit to test maximum */
+ if (sched_capa.max_ordered_locks > MAX_ORDERED_LOCKS) {
+ sched_capa.max_ordered_locks = MAX_ORDERED_LOCKS;
+ printf("Testing only %u ordered locks\n",
+ sched_capa.max_ordered_locks);
+ }
+
+ globals->max_sched_queue_size = BUFS_PER_QUEUE_EXCL;
+ odp_schedule_config_init(&default_config);
+ if (default_config.queue_size &&
+ globals->max_sched_queue_size > default_config.queue_size) {
+ printf("Max sched queue size %u\n", default_config.queue_size);
+ globals->max_sched_queue_size = default_config.queue_size;
+ }
+
+ prios = odp_schedule_num_prio();
+
+ /* Adjust 'queues_per_prio' until all required queues can be created */
+ sched_types = 3;
+ queues_per_prio = QUEUES_PER_PRIO;
+ num_sched = (prios * queues_per_prio * sched_types) + CHAOS_NUM_QUEUES;
+ num_plain = (prios * queues_per_prio);
+ while ((num_sched > default_config.num_queues ||
+ num_plain > queue_capa.plain.max_num ||
+ num_sched + num_plain > queue_capa.max_queues) &&
+ queues_per_prio) {
+ queues_per_prio--;
+ num_sched = (prios * queues_per_prio * sched_types) +
+ CHAOS_NUM_QUEUES;
+ num_plain = (prios * queues_per_prio);
+ }
+ if (!queues_per_prio) {
+ ODPH_ERR("Not enough queues. At least %d scheduled queues and "
+ "%d plain queues required.\n",
+ ((prios * sched_types) + CHAOS_NUM_QUEUES), prios);
+ return -1;
+ }
+ globals->queues_per_prio = queues_per_prio;
+
+ odp_pool_param_init(&params);
+ params.buf.size = sizeof(queue_context);
+ params.buf.num = prios * queues_per_prio * 2;
+ params.type = ODP_POOL_BUFFER;
+
+ queue_ctx_pool = odp_pool_create(QUEUE_CTX_POOL_NAME, &params);
+
+ if (queue_ctx_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Pool creation failed (queue ctx)\n");
+ return -1;
+ }
+ globals->queue_ctx_pool = queue_ctx_pool;
+
+ for (i = 0; i < prios; i++) {
+ odp_queue_param_init(&p);
+ p.type = ODP_QUEUE_TYPE_SCHED;
+ p.sched.prio = i;
+
+ for (j = 0; j < queues_per_prio; j++) {
+ /* Per sched sync type */
+ char name[32];
+ odp_queue_t q, pq;
+
+ snprintf(name, sizeof(name), "sched_%d_%d_n", i, j);
+ p.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ q = odp_queue_create(name, &p);
+
+ if (q == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Parallel queue create failed\n");
+ return -1;
+ }
+
+ snprintf(name, sizeof(name), "sched_%d_%d_a", i, j);
+ p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ p.size = globals->max_sched_queue_size;
+ q = odp_queue_create(name, &p);
+
+ if (q == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Atomic queue create failed\n");
+ return -1;
+ }
+
+ snprintf(name, sizeof(name), "plain_%d_%d_o", i, j);
+ pq = odp_queue_create(name, NULL);
+ if (pq == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Plain queue create failed\n");
+ return -1;
+ }
+
+ queue_ctx_buf = odp_buffer_alloc(queue_ctx_pool);
+
+ if (queue_ctx_buf == ODP_BUFFER_INVALID) {
+ ODPH_ERR("Cannot allocate plain queue ctx buf\n");
+ return -1;
+ }
+
+ pqctx = odp_buffer_addr(queue_ctx_buf);
+ pqctx->ctx_handle = queue_ctx_buf;
+ pqctx->sequence = 0;
+
+ rc = odp_queue_context_set(pq, pqctx, 0);
+
+ if (rc != 0) {
+ ODPH_ERR("Cannot set plain queue context\n");
+ return -1;
+ }
+
+ snprintf(name, sizeof(name), "sched_%d_%d_o", i, j);
+ p.sched.sync = ODP_SCHED_SYNC_ORDERED;
+ p.sched.lock_count = sched_capa.max_ordered_locks;
+ p.size = 0;
+ q = odp_queue_create(name, &p);
+
+ if (q == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Ordered queue create failed\n");
+ return -1;
+ }
+ if (odp_queue_lock_count(q) !=
+ sched_capa.max_ordered_locks) {
+ printf("Queue %" PRIu64 " created with "
+ "%d locks instead of expected %d\n",
+ odp_queue_to_u64(q),
+ odp_queue_lock_count(q),
+ sched_capa.max_ordered_locks);
+ return -1;
+ }
+
+ queue_ctx_buf = odp_buffer_alloc(queue_ctx_pool);
+
+ if (queue_ctx_buf == ODP_BUFFER_INVALID) {
+ ODPH_ERR("Cannot allocate queue ctx buf\n");
+ return -1;
+ }
+
+ qctx = odp_buffer_addr(queue_ctx_buf);
+ qctx->ctx_handle = queue_ctx_buf;
+ qctx->pq_handle = pq;
+ qctx->sequence = 0;
+
+ for (ndx = 0;
+ ndx < sched_capa.max_ordered_locks;
+ ndx++) {
+ qctx->lock_sequence[ndx] = 0;
+ }
+
+ rc = odp_queue_context_set(q, qctx, 0);
+
+ if (rc != 0) {
+ ODPH_ERR("Cannot set queue context\n");
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int destroy_queue(const char *name)
+{
+ odp_queue_t q;
+ queue_context *qctx;
+
+ q = odp_queue_lookup(name);
+
+ if (q == ODP_QUEUE_INVALID)
+ return -1;
+ qctx = odp_queue_context(q);
+ if (qctx)
+ odp_buffer_free(qctx->ctx_handle);
+
+ return odp_queue_destroy(q);
+}
+
+static int destroy_queues(void)
+{
+ int i, j, prios;
+
+ prios = odp_schedule_num_prio();
+
+ for (i = 0; i < prios; i++) {
+ for (j = 0; j < globals->queues_per_prio; j++) {
+ char name[32];
+
+ snprintf(name, sizeof(name), "sched_%d_%d_n", i, j);
+ if (destroy_queue(name) != 0)
+ return -1;
+
+ snprintf(name, sizeof(name), "sched_%d_%d_a", i, j);
+ if (destroy_queue(name) != 0)
+ return -1;
+
+ snprintf(name, sizeof(name), "sched_%d_%d_o", i, j);
+ if (destroy_queue(name) != 0)
+ return -1;
+
+ snprintf(name, sizeof(name), "plain_%d_%d_o", i, j);
+ if (destroy_queue(name) != 0)
+ return -1;
+ }
+ }
+
+ if (odp_pool_destroy(globals->queue_ctx_pool) != 0) {
+ ODPH_ERR("Failed to destroy queue ctx pool\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int check_flow_aware_support(void)
+{
+ if (globals->num_flows == 0) {
+ printf("\nTest: scheduler_test_flow_aware: SKIPPED\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void scheduler_test_flow_aware(void)
+{
+ odp_schedule_capability_t sched_capa;
+ odp_schedule_config_t sched_config;
+ odp_pool_param_t pool_param;
+ odp_pool_t pool;
+ odp_queue_param_t queue_param;
+ odp_queue_t queue, from;
+ uint32_t j, queue_size, num, num_flows, flow_id;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ int i, ret;
+ uint32_t flow_stat[MAX_FLOWS];
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
+ ODP_SCHED_SYNC_ATOMIC,
+ ODP_SCHED_SYNC_ORDERED};
+
+ /* Test should be skipped when no flows */
+ CU_ASSERT_FATAL(globals->num_flows);
+ CU_ASSERT_FATAL(odp_schedule_capability(&sched_capa) == 0);
+
+ num_flows = globals->num_flows;
+
+ queue_size = FLOW_TEST_NUM_EV;
+ odp_schedule_config_init(&sched_config);
+ if (sched_config.queue_size &&
+ queue_size > sched_config.queue_size)
+ queue_size = sched_config.queue_size;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.buf.size = 100;
+ pool_param.buf.align = 0;
+ pool_param.buf.num = FLOW_TEST_NUM_EV;
+ pool_param.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("test_flow_aware", &pool_param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < 3; i++) {
+ memset(flow_stat, 0, sizeof(flow_stat));
+ flow_id = 0;
+
+ sched_queue_param_init(&queue_param);
+ queue_param.sched.sync = sync[i];
+ queue_param.size = queue_size;
+
+ queue = odp_queue_create("test_flow_aware", &queue_param);
+
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ for (j = 0; j < queue_size; j++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ ev = odp_buffer_to_event(buf);
+
+ odp_event_flow_id_set(ev, flow_id);
+ CU_ASSERT(odp_event_flow_id(ev) == flow_id);
+
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT(ret == 0);
+
+ if (ret) {
+ odp_event_free(ev);
+ continue;
+ }
+
+ flow_stat[flow_id]++;
+
+ flow_id++;
+ if (flow_id == num_flows)
+ flow_id = 0;
+ }
+
+ num = 0;
+ for (j = 0; j < 100 * FLOW_TEST_NUM_EV; j++) {
+ ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ CU_ASSERT(from == queue);
+
+ flow_id = odp_event_flow_id(ev);
+ flow_stat[flow_id]--;
+
+ odp_event_free(ev);
+ num++;
+ }
+
+ CU_ASSERT(num == queue_size);
+
+ for (j = 0; j < num_flows; j++) {
+ CU_ASSERT(flow_stat[j] == 0);
+ if (flow_stat[j])
+ printf("flow id %" PRIu32 ", missing %" PRIi32
+ " events\n", j, flow_stat[j]);
+ }
+
+ drain_queues();
+ CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+/* Queues created but no events */
+static void scheduler_test_print(void)
+{
+ odp_schedule_print();
+}
+
+/* Queues with initial events enqueued */
+static void scheduler_test_mq_mt_prio_a_print(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ globals->test_debug_print = 1;
+
+ parallel_execute(ODP_SCHED_SYNC_ATOMIC, globals->queues_per_prio, prio,
+ SCHD_ONE, DISABLE_EXCL_ATOMIC);
+
+ globals->test_debug_print = 0;
+}
+
+static int scheduler_test_global_init(void)
+{
+ odp_shm_t shm;
+ thread_args_t *args;
+ odp_pool_t pool;
+ odp_pool_param_t params;
+ uint64_t num_flows;
+ odp_schedule_capability_t sched_capa;
+ odp_schedule_config_t sched_config;
+
+ shm = odp_shm_reserve(GLOBALS_SHM_NAME,
+ sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Shared memory reserve failed (globals)\n");
+ return -1;
+ }
+
+ globals = odp_shm_addr(shm);
+
+ if (!globals) {
+ ODPH_ERR("Shared memory reserve failed (globals)\n");
+ return -1;
+ }
+
+ memset(globals, 0, sizeof(test_globals_t));
+ globals->shm_glb = shm;
+
+ globals->num_workers = odp_cpumask_default_worker(NULL, 0);
+ if (globals->num_workers > MAX_WORKERS)
+ globals->num_workers = MAX_WORKERS;
+
+ shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Shared memory reserve failed (args)\n");
+ return -1;
+ }
+
+ args = odp_shm_addr(shm);
+ globals->shm_args = shm;
+
+ if (!args) {
+ ODPH_ERR("Shared memory reserve failed (args)\n");
+ return -1;
+ }
+
+ memset(args, 0, sizeof(thread_args_t));
+
+ /* Barrier to sync test case execution */
+ odp_barrier_init(&globals->barrier, globals->num_workers);
+ odp_ticketlock_init(&globals->lock);
+ odp_spinlock_init(&globals->atomic_lock);
+
+ odp_pool_param_init(&params);
+ params.buf.size = BUF_SIZE;
+ params.buf.align = 0;
+ params.buf.num = MSG_POOL_SIZE;
+ params.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create(MSG_POOL_NAME, &params);
+
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Pool creation failed (msg)\n");
+ return -1;
+ }
+
+ globals->pool = pool;
+
+ if (odp_schedule_capability(&sched_capa)) {
+ ODPH_ERR("odp_schedule_capability() failed\n");
+ return -1;
+ }
+
+ num_flows = 0;
+ odp_schedule_config_init(&sched_config);
+
+ /* Enable flow aware scheduling */
+ if (sched_capa.max_flow_id > 0) {
+ num_flows = MAX_FLOWS;
+ if ((MAX_FLOWS - 1) > sched_capa.max_flow_id)
+ num_flows = sched_capa.max_flow_id + 1;
+
+ sched_config.max_flow_id = num_flows - 1;
+ }
+
+ globals->num_flows = num_flows;
+
+ /* Configure the scheduler. All test cases share the config. */
+ if (odp_schedule_config(&sched_config)) {
+ ODPH_ERR("odp_schedule_config() failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int scheduler_multi_suite_init(void)
+{
+ /* Line feeds to separate output from basic suite prints */
+ printf("\n\n");
+
+ if (create_queues(globals) != 0)
+ return -1;
+
+ return 0;
+}
+
+static int scheduler_multi_suite_term(void)
+{
+ if (destroy_queues() != 0) {
+ ODPH_ERR("Failed to destroy queues\n");
+ return -1;
+ }
+
+ if (odp_cunit_print_inactive())
+ return -1;
+
+ return 0;
+}
+
+static int scheduler_basic_suite_init(void)
+{
+ return 0;
+}
+
+static int scheduler_basic_suite_term(void)
+{
+ if (odp_cunit_print_inactive())
+ return -1;
+
+ return 0;
+}
+
+static int global_init(odp_instance_t *inst)
+{
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("odph_options() failed.\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
+ ODPH_ERR("odp_init_global() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("odp_init_local() failed.\n");
+ return -1;
+ }
+
+ if (scheduler_test_global_init()) {
+ ODPH_ERR("scheduler test global init failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int global_term(odp_instance_t inst)
+{
+ if (odp_pool_destroy(globals->pool))
+ ODPH_ERR("Failed to destroy pool\n");
+
+ if (odp_shm_free(globals->shm_args))
+ ODPH_ERR("Failed to free shm\n");
+
+ if (odp_shm_free(globals->shm_glb))
+ ODPH_ERR("Failed to free shm\n");
+
+ if (odp_term_local()) {
+ ODPH_ERR("odp_term_local() failed.\n");
+ return -1;
+ }
+
+ if (odp_term_global(inst)) {
+ ODPH_ERR("odp_term_global() failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Basic scheduler test suite */
+odp_testinfo_t scheduler_basic_suite[] = {
+ ODP_TEST_INFO(scheduler_test_init),
+ ODP_TEST_INFO(scheduler_test_capa),
+ ODP_TEST_INFO(scheduler_test_wait_time),
+ ODP_TEST_INFO(scheduler_test_num_prio),
+ ODP_TEST_INFO(scheduler_test_queue_destroy),
+ ODP_TEST_INFO(scheduler_test_wait),
+ ODP_TEST_INFO(scheduler_test_queue_size),
+ ODP_TEST_INFO(scheduler_test_full_queues),
+ ODP_TEST_INFO(scheduler_test_max_queues_p),
+ ODP_TEST_INFO(scheduler_test_max_queues_a),
+ ODP_TEST_INFO(scheduler_test_max_queues_o),
+ ODP_TEST_INFO(scheduler_test_order_ignore),
+ ODP_TEST_INFO(scheduler_test_group_info_predef),
+ ODP_TEST_INFO(scheduler_test_create_group),
+ ODP_TEST_INFO(scheduler_test_create_max_groups),
+ ODP_TEST_INFO(scheduler_test_groups),
+ ODP_TEST_INFO(scheduler_test_pause_resume),
+ ODP_TEST_INFO(scheduler_test_pause_enqueue),
+ ODP_TEST_INFO(scheduler_test_ordered_lock),
+ ODP_TEST_INFO(scheduler_test_order_wait_1_thread),
+ ODP_TEST_INFO(scheduler_test_order_wait_2_threads),
+ ODP_TEST_INFO_CONDITIONAL(scheduler_test_flow_aware,
+ check_flow_aware_support),
+ ODP_TEST_INFO(scheduler_test_parallel),
+ ODP_TEST_INFO(scheduler_test_atomic),
+ ODP_TEST_INFO(scheduler_test_ordered),
+ ODP_TEST_INFO(scheduler_test_atomic_and_plain),
+ ODP_TEST_INFO(scheduler_test_ordered_and_plain),
+ ODP_TEST_INFO(scheduler_fifo_parallel_single),
+ ODP_TEST_INFO(scheduler_fifo_parallel_multi),
+ ODP_TEST_INFO(scheduler_fifo_atomic_single),
+ ODP_TEST_INFO(scheduler_fifo_atomic_multi),
+ ODP_TEST_INFO(scheduler_fifo_ordered_single),
+ ODP_TEST_INFO(scheduler_fifo_ordered_multi),
+ ODP_TEST_INFO(scheduler_fifo_mt_parallel_single),
+ ODP_TEST_INFO(scheduler_fifo_mt_parallel_multi),
+ ODP_TEST_INFO(scheduler_fifo_mt_atomic_single),
+ ODP_TEST_INFO(scheduler_fifo_mt_atomic_multi),
+ ODP_TEST_INFO(scheduler_fifo_mt_ordered_single),
+ ODP_TEST_INFO(scheduler_fifo_mt_ordered_multi),
+ ODP_TEST_INFO(scheduler_test_atomicity),
+ ODP_TEST_INFO_NULL
+};
+
+/* Scheduler test suite which runs events through hundreds of queues. Queues are created once
+ * in suite init phase. */
+odp_testinfo_t scheduler_multi_suite[] = {
+ ODP_TEST_INFO(scheduler_test_print),
+ ODP_TEST_INFO(scheduler_test_chaos),
+ ODP_TEST_INFO(scheduler_test_1q_1t_n),
+ ODP_TEST_INFO(scheduler_test_1q_1t_a),
+ ODP_TEST_INFO(scheduler_test_1q_1t_o),
+ ODP_TEST_INFO(scheduler_test_mq_1t_n),
+ ODP_TEST_INFO(scheduler_test_mq_1t_a),
+ ODP_TEST_INFO(scheduler_test_mq_1t_o),
+ ODP_TEST_INFO(scheduler_test_mq_1t_prio_n),
+ ODP_TEST_INFO(scheduler_test_mq_1t_prio_a),
+ ODP_TEST_INFO(scheduler_test_mq_1t_prio_o),
+ ODP_TEST_INFO(scheduler_test_mq_mt_prio_n),
+ ODP_TEST_INFO(scheduler_test_mq_mt_prio_a),
+ ODP_TEST_INFO(scheduler_test_mq_mt_prio_o),
+ ODP_TEST_INFO(scheduler_test_1q_mt_a_excl),
+ ODP_TEST_INFO(scheduler_test_multi_1q_1t_n),
+ ODP_TEST_INFO(scheduler_test_multi_1q_1t_a),
+ ODP_TEST_INFO(scheduler_test_multi_1q_1t_o),
+ ODP_TEST_INFO(scheduler_test_multi_mq_1t_n),
+ ODP_TEST_INFO(scheduler_test_multi_mq_1t_a),
+ ODP_TEST_INFO(scheduler_test_multi_mq_1t_o),
+ ODP_TEST_INFO(scheduler_test_multi_mq_1t_prio_n),
+ ODP_TEST_INFO(scheduler_test_multi_mq_1t_prio_a),
+ ODP_TEST_INFO(scheduler_test_multi_mq_1t_prio_o),
+ ODP_TEST_INFO(scheduler_test_multi_mq_mt_prio_n),
+ ODP_TEST_INFO(scheduler_test_multi_mq_mt_prio_a),
+ ODP_TEST_INFO(scheduler_test_multi_mq_mt_prio_o),
+ ODP_TEST_INFO(scheduler_test_multi_1q_mt_a_excl),
+ ODP_TEST_INFO(scheduler_test_mq_mt_prio_a_print),
+ ODP_TEST_INFO_NULL
+};
+
+odp_suiteinfo_t scheduler_suites[] = {
+ {"Scheduler basic",
+ scheduler_basic_suite_init, scheduler_basic_suite_term, scheduler_basic_suite
+ },
+ {"Scheduler multi",
+ scheduler_multi_suite_init, scheduler_multi_suite_term, scheduler_multi_suite
+ },
+
+ ODP_SUITE_INFO_NULL,
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ odp_cunit_register_global_init(global_init);
+ odp_cunit_register_global_term(global_term);
+
+ ret = odp_cunit_register(scheduler_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/api/scheduler/scheduler_no_predef_groups.c b/test/validation/api/scheduler/scheduler_no_predef_groups.c
new file mode 100644
index 000000000..ad6f6d3a2
--- /dev/null
+++ b/test/validation/api/scheduler/scheduler_no_predef_groups.c
@@ -0,0 +1,225 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include "odp_cunit_common.h"
+#include <odp/helper/odph_api.h>
+
+static int drain_queues(void)
+{
+ odp_event_t ev;
+ uint64_t wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
+ int ret = 0;
+
+ while ((ev = odp_schedule(NULL, wait)) != ODP_EVENT_INVALID) {
+ odp_event_free(ev);
+ ret++;
+ }
+
+ return ret;
+}
+
+static void scheduler_test_create_group(void)
+{
+ odp_thrmask_t mask;
+ odp_schedule_group_t group;
+ int thr_id;
+ odp_pool_t pool;
+ odp_pool_param_t pool_params;
+ odp_queue_t queue, from;
+ odp_queue_param_t qp;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ uint64_t wait_time;
+
+ thr_id = odp_thread_id();
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr_id);
+
+ group = odp_schedule_group_create("create_group", &mask);
+ CU_ASSERT_FATAL(group != ODP_SCHED_GROUP_INVALID);
+
+ odp_pool_param_init(&pool_params);
+ pool_params.buf.size = 100;
+ pool_params.buf.num = 2;
+ pool_params.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("create_group", &pool_params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_queue_param_init(&qp);
+ qp.type = ODP_QUEUE_TYPE_SCHED;
+ qp.sched.prio = odp_schedule_default_prio();
+ qp.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ qp.sched.group = group;
+
+ queue = odp_queue_create("create_group", &qp);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ ev = odp_buffer_to_event(buf);
+
+ CU_ASSERT_FATAL(odp_queue_enq(queue, ev) == 0);
+
+ wait_time = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
+ ev = odp_schedule(&from, wait_time);
+
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(from == queue);
+
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+
+ /* Free schedule context */
+ drain_queues();
+
+ CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
+ CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
+ CU_ASSERT_FATAL(odp_schedule_group_destroy(group) == 0);
+
+ /* Run scheduler after the group has been destroyed */
+ CU_ASSERT_FATAL(odp_schedule(NULL, wait_time) == ODP_EVENT_INVALID);
+}
+
+static void scheduler_test_create_max_groups(void)
+{
+ odp_thrmask_t mask;
+ int thr_id;
+ uint32_t i;
+ odp_queue_param_t queue_param;
+ odp_schedule_capability_t sched_capa;
+
+ CU_ASSERT_FATAL(!odp_schedule_capability(&sched_capa));
+ uint32_t max_groups = sched_capa.max_groups;
+ odp_schedule_group_t group[max_groups];
+ odp_queue_t queue[max_groups];
+
+ CU_ASSERT_FATAL(max_groups > 0);
+ CU_ASSERT_FATAL(sched_capa.max_queues >= sched_capa.max_groups);
+
+ thr_id = odp_thread_id();
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr_id);
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.prio = odp_schedule_default_prio();
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+
+ for (i = 0; i < max_groups; i++) {
+ group[i] = odp_schedule_group_create("max_groups", &mask);
+ if (group[i] == ODP_SCHED_GROUP_INVALID) {
+ ODPH_ERR("schedule group create %u failed\n", i);
+ break;
+ }
+
+ queue_param.sched.group = group[i];
+ queue[i] = odp_queue_create("max_groups", &queue_param);
+ CU_ASSERT_FATAL(queue[i] != ODP_QUEUE_INVALID);
+ }
+
+ CU_ASSERT(i == max_groups);
+ max_groups = i;
+
+ for (i = 0; i < max_groups; i++) {
+ CU_ASSERT_FATAL(odp_queue_destroy(queue[i]) == 0);
+ CU_ASSERT_FATAL(odp_schedule_group_destroy(group[i]) == 0);
+ }
+}
+
+static int scheduler_suite_init(void)
+{
+ odp_schedule_capability_t sched_capa;
+ odp_schedule_config_t sched_config;
+
+ if (odp_schedule_capability(&sched_capa)) {
+ ODPH_ERR("odp_schedule_capability() failed\n");
+ return -1;
+ }
+
+ odp_schedule_config_init(&sched_config);
+
+ /* Disable all predefined groups */
+ sched_config.sched_group.all = false;
+ sched_config.sched_group.control = false;
+ sched_config.sched_group.worker = false;
+
+ /* Configure the scheduler. All test cases share the config. */
+ if (odp_schedule_config(&sched_config)) {
+ ODPH_ERR("odp_schedule_config() failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int scheduler_suite_term(void)
+{
+ if (odp_cunit_print_inactive())
+ return -1;
+
+ return 0;
+}
+
+/* Default scheduler config */
+odp_testinfo_t scheduler_suite[] = {
+ ODP_TEST_INFO(scheduler_test_create_group),
+ ODP_TEST_INFO(scheduler_test_create_max_groups),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t scheduler_suites[] = {
+ {"Scheduler no predefined groups",
+ scheduler_suite_init, scheduler_suite_term, scheduler_suite
+ },
+ ODP_SUITE_INFO_NULL,
+};
+
+static int global_init(odp_instance_t *inst)
+{
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("odph_options() failed.\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (odp_init_global(inst, &init_param, NULL)) {
+ ODPH_ERR("odp_init_global() failed.\n");
+ return -1;
+ }
+
+ if (odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("odp_init_local() failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ odp_cunit_register_global_init(global_init);
+ ret = odp_cunit_register(scheduler_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/shmem/.gitignore b/test/validation/api/shmem/.gitignore
index 4d82fd53a..4d82fd53a 100644
--- a/test/common_plat/validation/api/shmem/.gitignore
+++ b/test/validation/api/shmem/.gitignore
diff --git a/test/validation/api/shmem/Makefile.am b/test/validation/api/shmem/Makefile.am
new file mode 100644
index 000000000..52e33fdca
--- /dev/null
+++ b/test/validation/api/shmem/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = shmem_main
+shmem_main_SOURCES = shmem.c
diff --git a/test/common_plat/validation/api/shmem/shmem.c b/test/validation/api/shmem/shmem.c
index 0e757a708..9e91dab35 100644
--- a/test/common_plat/validation/api/shmem/shmem.c
+++ b/test/validation/api/shmem/shmem.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2014, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -6,9 +7,10 @@
#include <odp_api.h>
#include <odp_cunit_common.h>
-#include "shmem.h"
+#include <odp/helper/odph_api.h>
#include <stdlib.h>
+#define MAX_WORKERS 32
#define ALIGN_SIZE (128)
#define MEM_NAME "test_shmem"
#define NAME_LEN (sizeof(MEM_NAME) + 20)
@@ -20,6 +22,8 @@
#define STRESS_SIZE 32 /* power of 2 and <=256 */
#define STRESS_RANDOM_SZ 5
#define STRESS_ITERATION 5000
+#define MAX_SIZE_TESTED (100 * 1000000UL)
+#define MAX_ALIGN_TESTED (1024 * 1024)
typedef enum {
STRESS_FREE, /* entry is free and can be allocated */
@@ -69,6 +73,9 @@ typedef struct {
int data[BIG_MEM];
} shared_test_data_big_t;
+/* SHM capability saved at suite init phase */
+static odp_shm_capability_t _global_shm_capa;
+
/*
* thread part for the shmem_test_basic test
*/
@@ -78,6 +85,7 @@ static int run_test_basic_thread(void *arg ODP_UNUSED)
odp_shm_t shm;
shared_test_data_t *shared_test_data;
int thr;
+ int pagesz_match = 0;
thr = odp_thread_id();
printf("Thread %i starts\n", thr);
@@ -91,13 +99,32 @@ static int run_test_basic_thread(void *arg ODP_UNUSED)
odp_shm_print_all();
CU_ASSERT(TEST_SHARE_FOO == shared_test_data->foo);
CU_ASSERT(TEST_SHARE_BAR == shared_test_data->bar);
- CU_ASSERT(0 == odp_shm_info(shm, &info));
+ CU_ASSERT_FATAL(0 == odp_shm_info(shm, &info));
CU_ASSERT(0 == strcmp(MEM_NAME, info.name));
CU_ASSERT(0 == info.flags);
CU_ASSERT(shared_test_data == info.addr);
CU_ASSERT(sizeof(shared_test_data_t) <= info.size);
- CU_ASSERT((info.page_size == odp_sys_huge_page_size()) ||
- (info.page_size == odp_sys_page_size()))
+
+ if (info.page_size == odp_sys_page_size()) {
+ pagesz_match = 1;
+ } else {
+ int num = odp_sys_huge_page_size_all(NULL, 0);
+
+ if (num > 0) {
+ uint64_t pagesz_tbs[num];
+ int i;
+
+ num = odp_sys_huge_page_size_all(pagesz_tbs, num);
+ for (i = 0; i < num; i++) {
+ if (info.page_size == pagesz_tbs[i]) {
+ pagesz_match = 1;
+ break;
+ }
+ }
+ }
+ }
+ CU_ASSERT(pagesz_match == 1);
+
odp_shm_print_all();
fflush(stdout);
@@ -107,31 +134,64 @@ static int run_test_basic_thread(void *arg ODP_UNUSED)
/*
* test basic things: shmem creation, info, share, and free
*/
-void shmem_test_basic(void)
+static void shmem_test_multi_thread(void)
{
- pthrd_arg thrdarg;
odp_shm_t shm;
odp_shm_t shm2;
shared_test_data_t *shared_test_data;
- odp_cpumask_t unused;
+ int i, num;
+ char max_name[ODP_SHM_NAME_LEN];
+
+ for (i = 0; i < ODP_SHM_NAME_LEN; i++)
+ max_name[i] = 'A' + (i % 26);
+
+ max_name[ODP_SHM_NAME_LEN - 1] = 0;
+
+ /* NULL name */
+ shm = odp_shm_reserve(NULL,
+ sizeof(shared_test_data_t), ALIGN_SIZE, 0);
+ CU_ASSERT(ODP_SHM_INVALID != shm);
+ shared_test_data = odp_shm_addr(shm);
+ CU_ASSERT_FATAL(NULL != shared_test_data);
+ shared_test_data->foo = 0;
+ CU_ASSERT(0 == odp_shm_free(shm));
+
+ /* Maximum length name */
+ shm = odp_shm_reserve(max_name,
+ sizeof(shared_test_data_t), ALIGN_SIZE, 0);
+ CU_ASSERT(ODP_SHM_INVALID != shm);
+ shm2 = odp_shm_lookup(max_name);
+ CU_ASSERT(ODP_SHM_INVALID != shm2);
+ CU_ASSERT(odp_shm_addr(shm) == odp_shm_addr(shm2));
+ shared_test_data = odp_shm_addr(shm);
+ CU_ASSERT_FATAL(NULL != shared_test_data);
+ shared_test_data->foo = 0;
+ CU_ASSERT(0 == odp_shm_free(shm));
+ /* Non-unique name */
shm = odp_shm_reserve(MEM_NAME,
sizeof(shared_test_data_t), ALIGN_SIZE, 0);
CU_ASSERT(ODP_SHM_INVALID != shm);
CU_ASSERT(odp_shm_to_u64(shm) !=
odp_shm_to_u64(ODP_SHM_INVALID));
-
- /* also check that another reserve with same name is accepted: */
shm2 = odp_shm_reserve(MEM_NAME,
sizeof(shared_test_data_t), ALIGN_SIZE, 0);
CU_ASSERT(ODP_SHM_INVALID != shm2);
CU_ASSERT(odp_shm_to_u64(shm2) !=
odp_shm_to_u64(ODP_SHM_INVALID));
+ CU_ASSERT(odp_shm_addr(shm) != odp_shm_addr(shm2));
+ shared_test_data = odp_shm_addr(shm);
+ CU_ASSERT_FATAL(NULL != shared_test_data);
+ shared_test_data->foo = 0;
+ shared_test_data = odp_shm_addr(shm2);
+ CU_ASSERT_FATAL(NULL != shared_test_data);
+ shared_test_data->foo = 0;
CU_ASSERT(0 == odp_shm_free(shm));
CU_ASSERT(0 == odp_shm_free(shm2));
CU_ASSERT(ODP_SHM_INVALID == odp_shm_lookup(MEM_NAME));
+ /* Share with multiple threads */
shm = odp_shm_reserve(MEM_NAME,
sizeof(shared_test_data_t), ALIGN_SIZE, 0);
CU_ASSERT(ODP_SHM_INVALID != shm);
@@ -141,18 +201,378 @@ void shmem_test_basic(void)
shared_test_data->foo = TEST_SHARE_FOO;
shared_test_data->bar = TEST_SHARE_BAR;
- thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
+ num = odp_cpumask_default_worker(NULL, 0);
+
+ if (num > MAX_WORKERS)
+ num = MAX_WORKERS;
- if (thrdarg.numthrds > MAX_WORKERS)
- thrdarg.numthrds = MAX_WORKERS;
+ odp_barrier_init(&shared_test_data->test_barrier1, num);
+ odp_cunit_thread_create(num, run_test_basic_thread, NULL, 0, 0);
+ CU_ASSERT(odp_cunit_thread_join(num) >= 0);
- odp_barrier_init(&shared_test_data->test_barrier1, thrdarg.numthrds);
- odp_cunit_thread_create(run_test_basic_thread, &thrdarg);
- CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+ odp_shm_print(shm);
CU_ASSERT(0 == odp_shm_free(shm));
}
+static void shmem_test_capability(void)
+{
+ odp_shm_capability_t capa;
+
+ CU_ASSERT_FATAL(odp_shm_capability(&capa) == 0);
+
+ CU_ASSERT(capa.max_blocks);
+
+ printf("\nSHM capability\n--------------\n");
+
+ printf(" max_blocks: %u\n", capa.max_blocks);
+ printf(" max_size: %" PRIu64 "\n", capa.max_size);
+ printf(" max_align: %" PRIu64 "\n", capa.max_align);
+ printf(" flags: ");
+ if (capa.flags & ODP_SHM_PROC)
+ printf("ODP_SHM_PROC ");
+ if (capa.flags & ODP_SHM_SINGLE_VA)
+ printf("ODP_SHM_SINGLE_VA ");
+ if (capa.flags & ODP_SHM_EXPORT)
+ printf("ODP_SHM_EXPORT ");
+ if (capa.flags & ODP_SHM_HP)
+ printf("ODP_SHM_HP ");
+ if (capa.flags & ODP_SHM_HW_ACCESS)
+ printf("ODP_SHM_HW_ACCESS ");
+ if (capa.flags & ODP_SHM_NO_HP)
+ printf("ODP_SHM_NO_HP ");
+ printf("\n\n");
+}
+
+static void shmem_test_reserve(void)
+{
+ odp_shm_t shm;
+ void *addr;
+
+ shm = odp_shm_reserve(MEM_NAME, MEDIUM_MEM, ALIGN_SIZE, 0);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ addr = odp_shm_addr(shm);
+ CU_ASSERT(addr != NULL);
+
+ if (addr)
+ memset(addr, 0, MEDIUM_MEM);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+static void shmem_test_info(void)
+{
+ odp_shm_t shm;
+ void *addr;
+ int ret;
+ uint32_t i;
+ uint64_t sum_len;
+ uintptr_t next;
+ odp_shm_info_t info;
+ const char *name = "info_test";
+ uint32_t num_seg = 32;
+ uint64_t size = 4 * 1024 * 1024;
+ uint64_t align = 64;
+ int support_pa = 0;
+ int support_iova = 0;
+
+ if (_global_shm_capa.max_size && _global_shm_capa.max_size < size)
+ size = _global_shm_capa.max_size;
+
+ if (_global_shm_capa.max_align < align)
+ align = _global_shm_capa.max_align;
+
+ shm = odp_shm_reserve(name, size, align, 0);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ addr = odp_shm_addr(shm);
+ CU_ASSERT(addr != NULL);
+
+ if (addr)
+ memset(addr, 0, size);
+
+ memset(&info, 0, sizeof(odp_shm_info_t));
+ ret = odp_shm_info(shm, &info);
+
+ CU_ASSERT_FATAL(ret == 0);
+ CU_ASSERT(strcmp(name, info.name) == 0);
+ CU_ASSERT(info.addr == addr);
+ CU_ASSERT(info.size == size);
+ CU_ASSERT(info.page_size > 0);
+ CU_ASSERT(info.flags == 0);
+ CU_ASSERT(info.num_seg > 0);
+
+ /* Limit number of segments as it may get large with small page sizes */
+ if (info.num_seg < num_seg)
+ num_seg = info.num_seg;
+
+ /* all segments */
+ odp_shm_segment_info_t seginfo_a[num_seg];
+
+ memset(seginfo_a, 0, num_seg * sizeof(odp_shm_segment_info_t));
+
+ ret = odp_shm_segment_info(shm, 0, num_seg, seginfo_a);
+ CU_ASSERT_FATAL(ret == 0);
+
+ CU_ASSERT(seginfo_a[0].addr == (uintptr_t)addr);
+
+ sum_len = 0;
+ next = 0;
+
+ printf("\n\n");
+ printf("SHM segment info\n");
+ printf("%3s %16s %16s %16s %16s\n", "idx", "addr", "iova", "pa", "len");
+
+ for (i = 0; i < num_seg; i++) {
+ printf("%3u %16" PRIxPTR " %16" PRIx64 " %16" PRIx64 " %16" PRIu64 "\n",
+ i, seginfo_a[i].addr, seginfo_a[i].iova, seginfo_a[i].pa, seginfo_a[i].len);
+
+ CU_ASSERT(seginfo_a[i].addr != 0);
+ CU_ASSERT(seginfo_a[i].len > 0);
+
+ if (next) {
+ CU_ASSERT(seginfo_a[i].addr == next);
+ next += seginfo_a[i].len;
+ } else {
+ next = seginfo_a[i].addr + seginfo_a[i].len;
+ }
+
+ if (seginfo_a[i].iova != ODP_SHM_IOVA_INVALID)
+ support_iova = 1;
+
+ if (seginfo_a[i].pa != ODP_SHM_PA_INVALID)
+ support_pa = 1;
+
+ sum_len += seginfo_a[i].len;
+ }
+
+ printf("\n");
+ printf("IOVA: %s, PA: %s\n\n", support_iova ? "supported" : "not supported",
+ support_pa ? "supported" : "not supported");
+
+ CU_ASSERT(sum_len == size);
+
+ if (num_seg > 1) {
+ /* all, except the first one */
+ odp_shm_segment_info_t seginfo_b[num_seg];
+
+ memset(seginfo_b, 0xff, num_seg * sizeof(odp_shm_segment_info_t));
+
+ ret = odp_shm_segment_info(shm, 1, num_seg - 1, &seginfo_b[1]);
+ CU_ASSERT_FATAL(ret == 0);
+
+ for (i = 1; i < num_seg; i++) {
+ CU_ASSERT(seginfo_a[i].addr == seginfo_b[i].addr);
+ CU_ASSERT(seginfo_a[i].iova == seginfo_b[i].iova);
+ CU_ASSERT(seginfo_a[i].pa == seginfo_b[i].pa);
+ CU_ASSERT(seginfo_a[i].len == seginfo_b[i].len);
+ }
+ }
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+static int shmem_check_flag_hp(void)
+{
+ if (_global_shm_capa.flags & ODP_SHM_HP)
+ return ODP_TEST_ACTIVE;
+ return ODP_TEST_INACTIVE;
+}
+
+/*
+ * test reserving memory from huge pages
+ */
+static void shmem_test_flag_hp(void)
+{
+ odp_shm_t shm;
+ odp_shm_info_t info;
+ int i;
+ int num_sizes = odp_sys_huge_page_size_all(NULL, 0);
+
+ CU_ASSERT_FATAL(num_sizes >= 0);
+
+ shm = odp_shm_reserve(MEM_NAME, sizeof(shared_test_data_t),
+ ALIGN_SIZE, ODP_SHM_HP);
+ if (shm == ODP_SHM_INVALID) {
+ printf(" No huge pages available\n");
+ return;
+ }
+
+ /* Make sure that the memory is reserved from huge pages */
+
+ CU_ASSERT_FATAL(num_sizes > 0);
+ CU_ASSERT_FATAL(odp_shm_info(shm, &info) == 0);
+
+ uint64_t hp_sizes[num_sizes];
+
+ CU_ASSERT_FATAL(odp_sys_huge_page_size_all(hp_sizes, num_sizes) ==
+ num_sizes);
+
+ for (i = 0; i < num_sizes; i++) {
+ if (hp_sizes[i] == info.page_size)
+ break;
+ }
+
+ CU_ASSERT(i < num_sizes);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+static int shmem_check_flag_no_hp(void)
+{
+ if (_global_shm_capa.flags & ODP_SHM_NO_HP)
+ return ODP_TEST_ACTIVE;
+ return ODP_TEST_INACTIVE;
+}
+
+/*
+ * Test reserving memory from normal pages
+ */
+static void shmem_test_flag_no_hp(void)
+{
+ odp_shm_t shm;
+ odp_shm_info_t info;
+
+ shm = odp_shm_reserve(MEM_NAME, sizeof(shared_test_data_t), 0,
+ ODP_SHM_NO_HP);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ /* Make sure that the memory is reserved from normal pages */
+ CU_ASSERT_FATAL(odp_shm_info(shm, &info) == 0);
+
+ CU_ASSERT(info.page_size == odp_sys_page_size());
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+static int shmem_check_flag_proc(void)
+{
+ if (_global_shm_capa.flags & ODP_SHM_PROC)
+ return ODP_TEST_ACTIVE;
+ return ODP_TEST_INACTIVE;
+}
+
+static void shmem_test_flag_proc(void)
+{
+ odp_shm_t shm;
+ void *addr;
+
+ shm = odp_shm_reserve(MEM_NAME, MEDIUM_MEM, ALIGN_SIZE, ODP_SHM_PROC);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ addr = odp_shm_addr(shm);
+
+ CU_ASSERT(addr != NULL);
+
+ if (addr)
+ memset(addr, 0, MEDIUM_MEM);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+static int shmem_check_flag_export(void)
+{
+ if (_global_shm_capa.flags & ODP_SHM_EXPORT)
+ return ODP_TEST_ACTIVE;
+ return ODP_TEST_INACTIVE;
+}
+
+static void shmem_test_flag_export(void)
+{
+ odp_shm_t shm;
+ void *addr;
+
+ shm = odp_shm_reserve(MEM_NAME, MEDIUM_MEM, ALIGN_SIZE, ODP_SHM_EXPORT);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ addr = odp_shm_addr(shm);
+
+ CU_ASSERT(addr != NULL);
+
+ if (addr)
+ memset(addr, 0, MEDIUM_MEM);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+static int shmem_check_flag_hw_access(void)
+{
+ if (_global_shm_capa.flags & ODP_SHM_HW_ACCESS)
+ return ODP_TEST_ACTIVE;
+ return ODP_TEST_INACTIVE;
+}
+
+static void shmem_test_flag_hw_access(void)
+{
+ odp_shm_t shm;
+ void *addr;
+
+ shm = odp_shm_reserve(MEM_NAME, MEDIUM_MEM, ALIGN_SIZE,
+ ODP_SHM_HW_ACCESS);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ addr = odp_shm_addr(shm);
+
+ CU_ASSERT(addr != NULL);
+
+ if (addr)
+ memset(addr, 0, MEDIUM_MEM);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+/*
+ * maximum size reservation
+ */
+static void shmem_test_max_reserve(void)
+{
+ odp_shm_capability_t capa;
+ odp_shm_t shm;
+ uint64_t size, align;
+ uint8_t *data;
+ uint64_t i;
+
+ memset(&capa, 0, sizeof(odp_shm_capability_t));
+ CU_ASSERT_FATAL(odp_shm_capability(&capa) == 0);
+
+ CU_ASSERT(capa.max_blocks > 0);
+
+ size = capa.max_size;
+ align = capa.max_align;
+
+ /* Assuming that system has at least MAX_SIZE_TESTED bytes available */
+ if (capa.max_size == 0 || capa.max_size > MAX_SIZE_TESTED)
+ size = MAX_SIZE_TESTED;
+
+ if (capa.max_align == 0 || capa.max_align > MAX_ALIGN_TESTED)
+ align = MAX_ALIGN_TESTED;
+
+ printf("\n size: %" PRIu64 "\n", size);
+ printf(" align: %" PRIu64 "\n", align);
+
+ shm = odp_shm_reserve("test_max_reserve", size, align, 0);
+ CU_ASSERT(shm != ODP_SHM_INVALID);
+
+ data = odp_shm_addr(shm);
+ CU_ASSERT(data != NULL);
+
+ if (data) {
+ memset(data, 0xde, size);
+ for (i = 0; i < size; i++) {
+ if (data[i] != 0xde) {
+ printf(" data error i:%" PRIu64 ", data %x"
+ "\n", i, data[i]);
+ CU_FAIL("Data error");
+ break;
+ }
+ }
+ }
+
+ if (shm != ODP_SHM_INVALID)
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
/*
* thread part for the shmem_test_reserve_after_fork
*/
@@ -217,8 +637,8 @@ static int run_test_reserve_after_fork(void *arg ODP_UNUSED)
}
/* print block address */
- printf("In thread: Block index: %d mapped at %lx\n",
- thr_index, (long int)odp_shm_addr(shm));
+ printf("In thread: Block index: %d mapped at %p\n",
+ thr_index, odp_shm_addr(shm));
odp_barrier_wait(&glob_data->test_barrier1);
odp_barrier_wait(&glob_data->test_barrier2);
@@ -230,16 +650,12 @@ static int run_test_reserve_after_fork(void *arg ODP_UNUSED)
/*
* test sharing memory reserved after odp_thread creation (e.g. fork()):
*/
-void shmem_test_reserve_after_fork(void)
+static void shmem_test_reserve_after_fork(void)
{
- pthrd_arg thrdarg;
odp_shm_t shm;
odp_shm_t thr_shm;
shared_test_data_t *glob_data;
- odp_cpumask_t unused;
- int thr_index;
- int i;
- void *address;
+ int thr_index, i, num;
shared_test_data_small_t *pattern_small;
shared_test_data_medium_t *pattern_medium;
shared_test_data_big_t *pattern_big;
@@ -249,27 +665,27 @@ void shmem_test_reserve_after_fork(void)
glob_data = odp_shm_addr(shm);
CU_ASSERT_PTR_NOT_NULL(glob_data);
- thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
- if (thrdarg.numthrds > MAX_WORKERS)
- thrdarg.numthrds = MAX_WORKERS;
+ num = odp_cpumask_default_worker(NULL, 0);
+ if (num > MAX_WORKERS)
+ num = MAX_WORKERS;
- odp_barrier_init(&glob_data->test_barrier1, thrdarg.numthrds + 1);
- odp_barrier_init(&glob_data->test_barrier2, thrdarg.numthrds + 1);
+ odp_barrier_init(&glob_data->test_barrier1, num + 1);
+ odp_barrier_init(&glob_data->test_barrier2, num + 1);
odp_atomic_store_u32(&glob_data->index, 0);
- odp_cunit_thread_create(run_test_reserve_after_fork, &thrdarg);
+ odp_cunit_thread_create(num, run_test_reserve_after_fork, NULL, 0, 0);
/* wait until all threads have made their shm_reserve: */
odp_barrier_wait(&glob_data->test_barrier1);
/* perform a lookup of all memories: */
- for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ for (thr_index = 0; thr_index < num; thr_index++) {
thr_shm = odp_shm_lookup(glob_data->name[thr_index]);
CU_ASSERT(thr_shm == glob_data->shm[thr_index]);
}
/* check that the patterns are correct: */
- for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ for (thr_index = 0; thr_index < num; thr_index++) {
switch (thr_index % 3) {
case 0:
pattern_small =
@@ -298,23 +714,21 @@ void shmem_test_reserve_after_fork(void)
/*
* print the mapping address of the blocks
*/
- for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
- address = odp_shm_addr(glob_data->shm[thr_index]);
- printf("In main Block index: %d mapped at %lx\n",
- thr_index, (long int)address);
- }
+ for (thr_index = 0; thr_index < num; thr_index++)
+ printf("In main Block index: %d mapped at %p\n",
+ thr_index, odp_shm_addr(glob_data->shm[thr_index]));
/* unblock the threads and let them terminate (no free is done): */
odp_barrier_wait(&glob_data->test_barrier2);
/* at the same time, (race),free of all memories: */
- for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ for (thr_index = 0; thr_index < num; thr_index++) {
thr_shm = glob_data->shm[thr_index];
CU_ASSERT(odp_shm_free(thr_shm) == 0);
}
/* wait for all thread endings: */
- CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+ CU_ASSERT(odp_cunit_thread_join(num) >= 0);
/* just glob_data should remain: */
@@ -357,7 +771,7 @@ static int run_test_singleva_after_fork(void *arg ODP_UNUSED)
size = sizeof(shared_test_data_small_t);
shm = odp_shm_reserve(glob_data->name[thr_index], size,
0, ODP_SHM_SINGLE_VA);
- CU_ASSERT(ODP_SHM_INVALID != shm);
+ CU_ASSERT_FATAL(ODP_SHM_INVALID != shm);
glob_data->shm[thr_index] = shm;
pattern_small = odp_shm_addr(shm);
CU_ASSERT_PTR_NOT_NULL(pattern_small);
@@ -369,7 +783,7 @@ static int run_test_singleva_after_fork(void *arg ODP_UNUSED)
size = sizeof(shared_test_data_medium_t);
shm = odp_shm_reserve(glob_data->name[thr_index], size,
0, ODP_SHM_SINGLE_VA);
- CU_ASSERT(ODP_SHM_INVALID != shm);
+ CU_ASSERT_FATAL(ODP_SHM_INVALID != shm);
glob_data->shm[thr_index] = shm;
pattern_medium = odp_shm_addr(shm);
CU_ASSERT_PTR_NOT_NULL(pattern_medium);
@@ -381,7 +795,7 @@ static int run_test_singleva_after_fork(void *arg ODP_UNUSED)
size = sizeof(shared_test_data_big_t);
shm = odp_shm_reserve(glob_data->name[thr_index], size,
0, ODP_SHM_SINGLE_VA);
- CU_ASSERT(ODP_SHM_INVALID != shm);
+ CU_ASSERT_FATAL(ODP_SHM_INVALID != shm);
glob_data->shm[thr_index] = shm;
pattern_big = odp_shm_addr(shm);
CU_ASSERT_PTR_NOT_NULL(pattern_big);
@@ -392,8 +806,8 @@ static int run_test_singleva_after_fork(void *arg ODP_UNUSED)
}
/* print block address */
- printf("In thread: Block index: %d mapped at %lx\n",
- thr_index, (long int)odp_shm_addr(shm));
+ printf("In thread: Block index: %d mapped at %p\n",
+ thr_index, odp_shm_addr(shm));
odp_barrier_wait(&glob_data->test_barrier1);
odp_barrier_wait(&glob_data->test_barrier2);
@@ -415,19 +829,23 @@ static int run_test_singleva_after_fork(void *arg ODP_UNUSED)
return CU_get_number_of_failures();
}
+static int shmem_check_flag_single_va(void)
+{
+ if (_global_shm_capa.flags & ODP_SHM_SINGLE_VA)
+ return ODP_TEST_ACTIVE;
+ return ODP_TEST_INACTIVE;
+}
+
/*
* test sharing memory reserved after odp_thread creation (e.g. fork()):
* with single VA flag.
*/
-void shmem_test_singleva_after_fork(void)
+static void shmem_test_singleva_after_fork(void)
{
- pthrd_arg thrdarg;
odp_shm_t shm;
odp_shm_t thr_shm;
shared_test_data_t *glob_data;
- odp_cpumask_t unused;
- int thr_index;
- int i;
+ int thr_index, i, num;
void *address;
shared_test_data_small_t *pattern_small;
shared_test_data_medium_t *pattern_medium;
@@ -439,49 +857,49 @@ void shmem_test_singleva_after_fork(void)
glob_data = odp_shm_addr(shm);
CU_ASSERT_PTR_NOT_NULL(glob_data);
- thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
- if (thrdarg.numthrds > MAX_WORKERS)
- thrdarg.numthrds = MAX_WORKERS;
+ num = odp_cpumask_default_worker(NULL, 3);
+ if (num > MAX_WORKERS)
+ num = MAX_WORKERS;
- glob_data->nb_threads = thrdarg.numthrds;
- odp_barrier_init(&glob_data->test_barrier1, thrdarg.numthrds + 1);
- odp_barrier_init(&glob_data->test_barrier2, thrdarg.numthrds + 1);
- odp_barrier_init(&glob_data->test_barrier3, thrdarg.numthrds + 1);
- odp_barrier_init(&glob_data->test_barrier4, thrdarg.numthrds + 1);
+ glob_data->nb_threads = num;
+ odp_barrier_init(&glob_data->test_barrier1, num + 1);
+ odp_barrier_init(&glob_data->test_barrier2, num + 1);
+ odp_barrier_init(&glob_data->test_barrier3, num + 1);
+ odp_barrier_init(&glob_data->test_barrier4, num + 1);
odp_atomic_store_u32(&glob_data->index, 0);
- odp_cunit_thread_create(run_test_singleva_after_fork, &thrdarg);
+ odp_cunit_thread_create(num, run_test_singleva_after_fork, NULL, 0, 0);
/* wait until all threads have made their shm_reserve: */
odp_barrier_wait(&glob_data->test_barrier1);
/* perform a lookup of all memories: */
- for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ for (thr_index = 0; thr_index < num; thr_index++) {
thr_shm = odp_shm_lookup(glob_data->name[thr_index]);
CU_ASSERT(thr_shm == glob_data->shm[thr_index]);
}
/* check that the patterns are correct: */
- for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ for (thr_index = 0; thr_index < num; thr_index++) {
switch (thr_index % 3) {
case 0:
pattern_small =
odp_shm_addr(glob_data->shm[thr_index]);
- CU_ASSERT_PTR_NOT_NULL(pattern_small);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(pattern_small);
for (i = 0; i < SMALL_MEM; i++)
CU_ASSERT(pattern_small->data[i] == i);
break;
case 1:
pattern_medium =
odp_shm_addr(glob_data->shm[thr_index]);
- CU_ASSERT_PTR_NOT_NULL(pattern_medium);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(pattern_medium);
for (i = 0; i < MEDIUM_MEM; i++)
CU_ASSERT(pattern_medium->data[i] == (i << 2));
break;
case 2:
pattern_big =
odp_shm_addr(glob_data->shm[thr_index]);
- CU_ASSERT_PTR_NOT_NULL(pattern_big);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(pattern_big);
for (i = 0; i < BIG_MEM; i++)
CU_ASSERT(pattern_big->data[i] == (i >> 2));
break;
@@ -491,7 +909,7 @@ void shmem_test_singleva_after_fork(void)
/*
* check that the mapping address is common to all (SINGLE_VA):
*/
- for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ for (thr_index = 0; thr_index < num; thr_index++) {
address = odp_shm_addr(glob_data->shm[thr_index]);
CU_ASSERT(glob_data->address[thr_index] == address);
}
@@ -506,7 +924,7 @@ void shmem_test_singleva_after_fork(void)
odp_barrier_wait(&glob_data->test_barrier4);
/* wait for all thread endings: */
- CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+ CU_ASSERT(odp_cunit_thread_join(num) >= 0);
/* just glob_data should remain: */
@@ -563,7 +981,9 @@ static int run_test_stress(void *arg ODP_UNUSED)
/* we just play with the VA flag. randomly setting
* the mlock flag may exceed user ulimit -l
*/
- flags = random_bytes[2] & ODP_SHM_SINGLE_VA;
+ flags = (_global_shm_capa.flags & ODP_SHM_SINGLE_VA) ?
+ (random_bytes[2] & ODP_SHM_SINGLE_VA) : 0;
+
align = (random_bytes[3] + 1) << 6;/* up to 16Kb */
data = random_bytes[4];
@@ -659,14 +1079,13 @@ static int run_test_stress(void *arg ODP_UNUSED)
/*
* stress tests
*/
-void shmem_test_stress(void)
+static void shmem_test_stress(void)
{
- pthrd_arg thrdarg;
odp_shm_t shm;
odp_shm_t globshm;
shared_test_data_t *glob_data;
- odp_cpumask_t unused;
uint32_t i;
+ int num;
globshm = odp_shm_reserve(MEM_NAME, sizeof(shared_test_data_t),
0, 0);
@@ -674,12 +1093,12 @@ void shmem_test_stress(void)
glob_data = odp_shm_addr(globshm);
CU_ASSERT_PTR_NOT_NULL(glob_data);
- thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
- if (thrdarg.numthrds > MAX_WORKERS)
- thrdarg.numthrds = MAX_WORKERS;
+ num = odp_cpumask_default_worker(NULL, 0);
+ if (num > MAX_WORKERS)
+ num = MAX_WORKERS;
- glob_data->nb_threads = thrdarg.numthrds;
- odp_barrier_init(&glob_data->test_barrier1, thrdarg.numthrds);
+ glob_data->nb_threads = num;
+ odp_barrier_init(&glob_data->test_barrier1, num);
odp_spinlock_init(&glob_data->stress_lock);
/* before starting the threads, mark all entries as free: */
@@ -687,10 +1106,10 @@ void shmem_test_stress(void)
glob_data->stress[i].state = STRESS_FREE;
/* create threads */
- odp_cunit_thread_create(run_test_stress, &thrdarg);
+ odp_cunit_thread_create(num, run_test_stress, NULL, 0, 0);
/* wait for all thread endings: */
- CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+ CU_ASSERT(odp_cunit_thread_join(num) >= 0);
/* release left overs: */
for (i = 0; i < STRESS_SIZE; i++) {
@@ -708,25 +1127,43 @@ void shmem_test_stress(void)
/* check that no memory is left over: */
}
+static int shm_suite_init(void)
+{
+ if (odp_shm_capability(&_global_shm_capa)) {
+ ODPH_ERR("Failed to read SHM capability\n");
+ return -1;
+ }
+ return 0;
+}
+
odp_testinfo_t shmem_suite[] = {
- ODP_TEST_INFO(shmem_test_basic),
+ ODP_TEST_INFO(shmem_test_capability),
+ ODP_TEST_INFO(shmem_test_reserve),
+ ODP_TEST_INFO(shmem_test_info),
+ ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_hp, shmem_check_flag_hp),
+ ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_no_hp, shmem_check_flag_no_hp),
+ ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_proc, shmem_check_flag_proc),
+ ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_export, shmem_check_flag_export),
+ ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_hw_access, shmem_check_flag_hw_access),
+ ODP_TEST_INFO(shmem_test_max_reserve),
+ ODP_TEST_INFO(shmem_test_multi_thread),
ODP_TEST_INFO(shmem_test_reserve_after_fork),
- ODP_TEST_INFO(shmem_test_singleva_after_fork),
+ ODP_TEST_INFO_CONDITIONAL(shmem_test_singleva_after_fork, shmem_check_flag_single_va),
ODP_TEST_INFO(shmem_test_stress),
ODP_TEST_INFO_NULL,
};
odp_suiteinfo_t shmem_suites[] = {
- {"Shared Memory", NULL, NULL, shmem_suite},
+ {"Shared Memory", shm_suite_init, NULL, shmem_suite},
ODP_SUITE_INFO_NULL,
};
-int shmem_main(int argc, char *argv[])
+int main(int argc, char *argv[])
{
int ret;
/* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
+ if (odp_cunit_parse_options(&argc, argv))
return -1;
ret = odp_cunit_register(shmem_suites);
diff --git a/test/validation/api/stash/.gitignore b/test/validation/api/stash/.gitignore
new file mode 100644
index 000000000..577dc61c9
--- /dev/null
+++ b/test/validation/api/stash/.gitignore
@@ -0,0 +1 @@
+stash_main
diff --git a/test/validation/api/stash/Makefile.am b/test/validation/api/stash/Makefile.am
new file mode 100644
index 000000000..5ff3a7533
--- /dev/null
+++ b/test/validation/api/stash/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = stash_main
+stash_main_SOURCES = stash.c
diff --git a/test/validation/api/stash/stash.c b/test/validation/api/stash/stash.c
new file mode 100644
index 000000000..162697ba9
--- /dev/null
+++ b/test/validation/api/stash/stash.c
@@ -0,0 +1,1397 @@
+/* Copyright (c) 2020-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#include "odp_cunit_common.h"
+
+#include <string.h>
+
+#define MAGIC_U64 0x8b7438fa56c82e96
+#define MAGIC_U32 0x74a13b94
+#define MAGIC_U16 0x25bf
+#define MAGIC_U8 0xab
+
+#define VAL_U64 0x6b89f0742a672c34
+#define VAL_U32 0x713d847b
+#define VAL_U16 0xb587
+#define VAL_U8 0x9d
+
+#define NUM_U64 1024
+#define NUM_U32 1024
+#define NUM_U16 1024
+#define NUM_U8 256
+
+#define CACHE_SIZE 8
+#define BURST 32
+#define BATCH 16
+#define MAX_RETRY 1024
+#define RETRY_MSEC 100
+
+typedef enum stash_op_t {
+ STASH_GEN,
+ STASH_U32,
+ STASH_U64,
+ STASH_PTR
+} stash_op_t;
+
+typedef struct num_obj_t {
+ uint32_t u64;
+ uint32_t u32;
+ uint32_t u16;
+ uint32_t u8;
+
+} num_obj_t;
+
+typedef struct global_t {
+ odp_stash_capability_t capa_default;
+ odp_stash_capability_t capa_fifo;
+ int fifo_supported;
+ num_obj_t num_default;
+ num_obj_t num_fifo;
+ uint32_t cache_size_default;
+
+} global_t;
+
+static global_t global;
+
+static int stash_suite_init(void)
+{
+ odp_stash_capability_t *capa_default = &global.capa_default;
+ odp_stash_capability_t *capa_fifo = &global.capa_fifo;
+
+ if (odp_stash_capability(capa_default, ODP_STASH_TYPE_DEFAULT)) {
+ ODPH_ERR("Stash capability failed for the default type\n");
+ return -1;
+ }
+
+ global.num_default.u64 = NUM_U64;
+ global.num_default.u32 = NUM_U32;
+ global.num_default.u16 = NUM_U16;
+ global.num_default.u8 = NUM_U8;
+
+ if (global.num_default.u64 > capa_default->max_num_obj)
+ global.num_default.u64 = capa_default->max_num_obj;
+ if (global.num_default.u32 > capa_default->max_num_obj)
+ global.num_default.u32 = capa_default->max_num_obj;
+ if (global.num_default.u16 > capa_default->max_num_obj)
+ global.num_default.u16 = capa_default->max_num_obj;
+ if (global.num_default.u8 > capa_default->max_num_obj)
+ global.num_default.u8 = capa_default->max_num_obj;
+
+ global.cache_size_default = CACHE_SIZE;
+ if (global.cache_size_default > capa_default->max_cache_size)
+ global.cache_size_default = capa_default->max_cache_size;
+
+ global.fifo_supported = 0;
+ if (odp_stash_capability(capa_fifo, ODP_STASH_TYPE_FIFO) == 0) {
+ if (capa_fifo->max_stashes)
+ global.fifo_supported = 1;
+ }
+
+ if (global.fifo_supported) {
+ global.num_fifo.u64 = NUM_U64;
+ global.num_fifo.u32 = NUM_U32;
+ global.num_fifo.u16 = NUM_U16;
+ global.num_fifo.u8 = NUM_U8;
+
+ if (global.num_fifo.u64 > capa_fifo->max_num_obj)
+ global.num_fifo.u64 = capa_fifo->max_num_obj;
+ if (global.num_fifo.u32 > capa_fifo->max_num_obj)
+ global.num_fifo.u32 = capa_fifo->max_num_obj;
+ if (global.num_fifo.u16 > capa_fifo->max_num_obj)
+ global.num_fifo.u16 = capa_fifo->max_num_obj;
+ if (global.num_fifo.u8 > capa_fifo->max_num_obj)
+ global.num_fifo.u8 = capa_fifo->max_num_obj;
+ }
+
+ return 0;
+}
+
+static void stash_capability(void)
+{
+ odp_stash_capability_t capa;
+
+ memset(&capa, 0, sizeof(odp_stash_capability_t));
+ CU_ASSERT_FATAL(odp_stash_capability(&capa, ODP_STASH_TYPE_DEFAULT) == 0);
+ CU_ASSERT(capa.max_stashes_any_type > 0);
+ CU_ASSERT(capa.max_stashes > 0);
+ CU_ASSERT(capa.max_num_obj > 0);
+ CU_ASSERT(capa.max_obj_size >= sizeof(uint32_t));
+ CU_ASSERT(capa.max_get_batch >= 1);
+ CU_ASSERT(capa.max_put_batch >= 1);
+
+ CU_ASSERT(capa.max_num.u8 >= capa.max_num_obj);
+ CU_ASSERT(capa.max_num.u16 >= capa.max_num_obj);
+ CU_ASSERT(capa.max_num.u32 >= capa.max_num_obj);
+ CU_ASSERT(capa.max_num.max_obj_size >= capa.max_num_obj);
+ if (capa.max_obj_size >= 8)
+ CU_ASSERT(capa.max_num.u64 >= capa.max_num_obj);
+ if (capa.max_obj_size < 8)
+ CU_ASSERT(capa.max_num.u64 == 0);
+ if (capa.max_obj_size >= 16)
+ CU_ASSERT(capa.max_num.u128 >= capa.max_num_obj);
+ if (capa.max_obj_size < 16)
+ CU_ASSERT(capa.max_num.u128 == 0);
+
+ memset(&capa, 0, sizeof(odp_stash_capability_t));
+ CU_ASSERT_FATAL(odp_stash_capability(&capa, ODP_STASH_TYPE_FIFO) == 0);
+ CU_ASSERT(capa.max_stashes_any_type > 0);
+
+ if (capa.max_stashes == 0)
+ return;
+
+ CU_ASSERT(capa.max_num_obj > 0);
+ CU_ASSERT(capa.max_obj_size >= sizeof(uint32_t));
+ CU_ASSERT(capa.max_get_batch >= 1);
+ CU_ASSERT(capa.max_put_batch >= 1);
+
+ CU_ASSERT(capa.max_num.u8 >= capa.max_num_obj);
+ CU_ASSERT(capa.max_num.u16 >= capa.max_num_obj);
+ CU_ASSERT(capa.max_num.u32 >= capa.max_num_obj);
+ CU_ASSERT(capa.max_num.max_obj_size >= capa.max_num_obj);
+ if (capa.max_obj_size >= 8)
+ CU_ASSERT(capa.max_num.u64 >= capa.max_num_obj);
+ if (capa.max_obj_size < 8)
+ CU_ASSERT(capa.max_num.u64 == 0);
+ if (capa.max_obj_size >= 16)
+ CU_ASSERT(capa.max_num.u128 >= capa.max_num_obj);
+ if (capa.max_obj_size < 16)
+ CU_ASSERT(capa.max_num.u128 == 0);
+}
+
+static void param_defaults(uint8_t fill)
+{
+ odp_stash_param_t param;
+
+ memset(&param, fill, sizeof(param));
+ odp_stash_param_init(&param);
+ CU_ASSERT(param.type == ODP_STASH_TYPE_DEFAULT);
+ CU_ASSERT(param.put_mode == ODP_STASH_OP_MT);
+ CU_ASSERT(param.get_mode == ODP_STASH_OP_MT);
+ CU_ASSERT(param.cache_size == 0);
+ CU_ASSERT(param.stats.all == 0);
+ CU_ASSERT(param.stats.bit.count == 0);
+ CU_ASSERT(param.stats.bit.cache_count == 0);
+ CU_ASSERT(param.strict_size == 0);
+}
+
+static void stash_param_defaults(void)
+{
+ param_defaults(0);
+ param_defaults(0xff);
+}
+
+static void stash_create_u64(void)
+{
+ odp_stash_t stash, lookup;
+ odp_stash_param_t param;
+ uint32_t num = global.num_default.u64;
+
+ odp_stash_param_init(&param);
+ param.num_obj = num;
+ param.obj_size = sizeof(uint64_t);
+
+ stash = odp_stash_create("test_stash_u64", &param);
+
+ CU_ASSERT_FATAL(stash != ODP_STASH_INVALID);
+
+ printf("\n Stash handle: 0x%" PRIx64 "\n", odp_stash_to_u64(stash));
+
+ lookup = odp_stash_lookup("test_stash_u64");
+ CU_ASSERT(lookup != ODP_STASH_INVALID);
+ CU_ASSERT(stash == lookup);
+ CU_ASSERT(odp_stash_lookup("foo") == ODP_STASH_INVALID);
+
+ CU_ASSERT_FATAL(odp_stash_destroy(stash) == 0);
+}
+
+static void stash_create_u32(void)
+{
+ odp_stash_t stash, lookup;
+ odp_stash_param_t param;
+ uint32_t num = global.num_default.u32;
+
+ odp_stash_param_init(&param);
+ param.num_obj = num;
+ param.obj_size = sizeof(uint32_t);
+
+ stash = odp_stash_create("test_stash_u32", &param);
+
+ CU_ASSERT_FATAL(stash != ODP_STASH_INVALID);
+
+ printf("\n Stash handle: 0x%" PRIx64 "\n", odp_stash_to_u64(stash));
+
+ printf("\n--- Stash print ----\n");
+ odp_stash_print(stash);
+
+ lookup = odp_stash_lookup("test_stash_u32");
+ CU_ASSERT(lookup != ODP_STASH_INVALID);
+ CU_ASSERT(stash == lookup);
+ CU_ASSERT(odp_stash_lookup("foo") == ODP_STASH_INVALID);
+
+ CU_ASSERT_FATAL(odp_stash_destroy(stash) == 0);
+}
+
+static void stash_create_u64_all(void)
+{
+ odp_stash_param_t param;
+ uint64_t input, output;
+ uint32_t i, retry;
+ int32_t ret;
+ uint32_t num_obj = global.num_default.u32;
+ uint32_t num_stash = global.capa_default.max_stashes;
+ odp_stash_t stash[num_stash];
+
+ CU_ASSERT_FATAL(num_stash > 0);
+
+ odp_stash_param_init(&param);
+ param.num_obj = num_obj;
+ param.obj_size = sizeof(uint64_t);
+
+ for (i = 0; i < num_stash; i++) {
+ stash[i] = odp_stash_create("test_stash_u64_all", &param);
+ CU_ASSERT_FATAL(stash[i] != ODP_STASH_INVALID);
+ CU_ASSERT_FATAL(odp_stash_get(stash[i], &output, 1) == 0);
+
+ input = i;
+ CU_ASSERT(odp_stash_put(stash[i], &input, 1) == 1);
+ }
+
+ for (i = 0; i < num_stash; i++) {
+ ret = 0;
+
+ for (retry = 0; retry < RETRY_MSEC; retry++) {
+ /* Extra delay allows HW latency from put() to get() */
+ odp_time_wait_ns(ODP_TIME_MSEC_IN_NS);
+ ret = odp_stash_get(stash[i], &output, 1);
+ if (ret)
+ break;
+ }
+
+ CU_ASSERT(ret == 1);
+ CU_ASSERT(output == i);
+ }
+
+ for (i = 0; i < num_stash; i++)
+ CU_ASSERT_FATAL(odp_stash_destroy(stash[i]) == 0);
+}
+
+static void stash_create_u32_all(void)
+{
+ odp_stash_param_t param;
+ uint32_t i, retry, input, output;
+ int32_t ret;
+ uint32_t num_obj = global.num_default.u32;
+ uint32_t num_stash = global.capa_default.max_stashes;
+ odp_stash_t stash[num_stash];
+
+ CU_ASSERT_FATAL(num_stash > 0);
+
+ odp_stash_param_init(&param);
+ param.num_obj = num_obj;
+ param.obj_size = sizeof(uint32_t);
+
+ for (i = 0; i < num_stash; i++) {
+ stash[i] = odp_stash_create("test_stash_u32_all", &param);
+ CU_ASSERT_FATAL(stash[i] != ODP_STASH_INVALID);
+ CU_ASSERT_FATAL(odp_stash_get(stash[i], &output, 1) == 0);
+
+ input = i;
+ CU_ASSERT(odp_stash_put(stash[i], &input, 1) == 1);
+ }
+
+ for (i = 0; i < num_stash; i++) {
+ ret = 0;
+
+ for (retry = 0; retry < RETRY_MSEC; retry++) {
+ /* Extra delay allows HW latency from put() to get() */
+ odp_time_wait_ns(ODP_TIME_MSEC_IN_NS);
+ ret = odp_stash_get(stash[i], &output, 1);
+ if (ret)
+ break;
+ }
+
+ CU_ASSERT(ret == 1);
+ CU_ASSERT(output == i);
+ }
+
+ for (i = 0; i < num_stash; i++)
+ CU_ASSERT_FATAL(odp_stash_destroy(stash[i]) == 0);
+}
+
+static void stash_create_fifo_u64_all(void)
+{
+ odp_stash_param_t param;
+ uint64_t input, output;
+ uint32_t i, retry;
+ int32_t ret;
+ uint32_t num_obj = global.num_fifo.u64;
+ uint32_t num_stash = global.capa_fifo.max_stashes;
+ odp_stash_t stash[num_stash];
+
+ CU_ASSERT_FATAL(num_stash > 0);
+
+ odp_stash_param_init(&param);
+ param.type = ODP_STASH_TYPE_FIFO;
+ param.num_obj = num_obj;
+ param.obj_size = sizeof(uint64_t);
+
+ for (i = 0; i < num_stash; i++) {
+ stash[i] = odp_stash_create(NULL, &param);
+ CU_ASSERT_FATAL(stash[i] != ODP_STASH_INVALID);
+ CU_ASSERT_FATAL(odp_stash_get(stash[i], &output, 1) == 0);
+
+ input = i;
+ CU_ASSERT(odp_stash_put(stash[i], &input, 1) == 1);
+ }
+
+ for (i = 0; i < num_stash; i++) {
+ ret = 0;
+
+ for (retry = 0; retry < RETRY_MSEC; retry++) {
+ /* Extra delay allows HW latency from put() to get() */
+ odp_time_wait_ns(ODP_TIME_MSEC_IN_NS);
+ ret = odp_stash_get(stash[i], &output, 1);
+ if (ret)
+ break;
+ }
+
+ CU_ASSERT(ret == 1);
+ CU_ASSERT(output == i);
+ }
+
+ for (i = 0; i < num_stash; i++)
+ CU_ASSERT_FATAL(odp_stash_destroy(stash[i]) == 0);
+}
+
+static void stash_create_fifo_u32_all(void)
+{
+ odp_stash_param_t param;
+ uint32_t i, retry, input, output;
+ int32_t ret;
+ uint32_t num_obj = global.num_fifo.u32;
+ uint32_t num_stash = global.capa_fifo.max_stashes;
+ odp_stash_t stash[num_stash];
+
+ CU_ASSERT_FATAL(num_stash > 0);
+
+ odp_stash_param_init(&param);
+ param.type = ODP_STASH_TYPE_FIFO;
+ param.num_obj = num_obj;
+ param.obj_size = sizeof(uint32_t);
+
+ for (i = 0; i < num_stash; i++) {
+ stash[i] = odp_stash_create(NULL, &param);
+ CU_ASSERT_FATAL(stash[i] != ODP_STASH_INVALID);
+ CU_ASSERT_FATAL(odp_stash_get(stash[i], &output, 1) == 0);
+
+ input = i;
+ CU_ASSERT(odp_stash_put(stash[i], &input, 1) == 1);
+ }
+
+ for (i = 0; i < num_stash; i++) {
+ ret = 0;
+
+ for (retry = 0; retry < RETRY_MSEC; retry++) {
+ /* Extra delay allows HW latency from put() to get() */
+ odp_time_wait_ns(ODP_TIME_MSEC_IN_NS);
+ ret = odp_stash_get(stash[i], &output, 1);
+ if (ret)
+ break;
+ }
+
+ CU_ASSERT(ret == 1);
+ CU_ASSERT(output == i);
+ }
+
+ for (i = 0; i < num_stash; i++)
+ CU_ASSERT_FATAL(odp_stash_destroy(stash[i]) == 0);
+}
+
+static void stash_stats_u32(void)
+{
+ odp_stash_t stash;
+ odp_stash_param_t param;
+ odp_stash_stats_t stats;
+ int capa_count, capa_cache_count;
+ uint32_t i, input, output;
+ uint32_t max_num = 10;
+ uint32_t num = max_num / 2;
+ uint32_t num_put = 0;
+
+ capa_count = global.capa_default.stats.bit.count;
+ capa_cache_count = global.capa_default.stats.bit.cache_count;
+
+ odp_stash_param_init(&param);
+ param.num_obj = max_num;
+ param.obj_size = sizeof(uint32_t);
+ param.stats.bit.count = capa_count;
+ param.stats.bit.cache_count = capa_cache_count;
+
+ stash = odp_stash_create("test_stats_u32", &param);
+ CU_ASSERT_FATAL(stash != ODP_STASH_INVALID);
+
+ memset(&stats, 0xff, sizeof(odp_stash_stats_t));
+
+ CU_ASSERT_FATAL(odp_stash_stats(stash, &stats) == 0);
+ CU_ASSERT(stats.count == 0);
+ CU_ASSERT(stats.cache_count == 0);
+
+ for (i = 0; i < num; i++) {
+ input = i;
+ if (odp_stash_put_u32(stash, &input, 1) == 1)
+ num_put++;
+ }
+
+ CU_ASSERT(num_put == num);
+
+ memset(&stats, 0xff, sizeof(odp_stash_stats_t));
+
+ CU_ASSERT_FATAL(odp_stash_stats(stash, &stats) == 0);
+
+ if (capa_count) {
+ /* CU_ASSERT needs extra brackets */
+ CU_ASSERT(stats.count <= num_put);
+ } else {
+ CU_ASSERT(stats.count == 0);
+ }
+
+ if (capa_cache_count) {
+ /* CU_ASSERT needs extra brackets */
+ CU_ASSERT(stats.cache_count <= num_put);
+ } else {
+ CU_ASSERT(stats.cache_count == 0);
+ }
+
+ if (capa_count && capa_cache_count)
+ CU_ASSERT((stats.count + stats.cache_count) == num_put);
+
+ for (i = 0; i < num_put; i++) {
+ output = -1;
+ CU_ASSERT(odp_stash_get_u32(stash, &output, 1) == 1);
+ CU_ASSERT(output < num);
+ }
+
+ CU_ASSERT_FATAL(odp_stash_destroy(stash) == 0);
+}
+
+static void stash_default_put(uint32_t size, int32_t burst, stash_op_t op, int batch,
+ odp_bool_t strict_size)
+{
+ odp_stash_t stash;
+ odp_stash_param_t param;
+ int32_t i, ret, retry, num_left;
+ int32_t num, max_burst, num_stashed;
+ void *input, *output;
+
+ if (batch) {
+ CU_ASSERT_FATAL(global.capa_default.max_get_batch >= 1);
+ CU_ASSERT_FATAL(global.capa_default.max_put_batch >= 1);
+
+ if (burst > (int32_t)global.capa_default.max_get_batch)
+ burst = global.capa_default.max_get_batch;
+ if (burst > (int32_t)global.capa_default.max_put_batch)
+ burst = global.capa_default.max_put_batch;
+ }
+
+ uint64_t input_u64[burst];
+ uint64_t output_u64[burst + 2];
+ uint32_t input_u32[burst];
+ uint32_t output_u32[burst + 2];
+ uint16_t input_u16[burst];
+ uint16_t output_u16[burst + 2];
+ uint8_t input_u8[burst];
+ uint8_t output_u8[burst + 2];
+
+ if (size == sizeof(uint64_t)) {
+ num = global.num_default.u64;
+ input = input_u64;
+ output = &output_u64[1];
+ } else if (size == sizeof(uint32_t)) {
+ num = global.num_default.u32;
+ input = input_u32;
+ output = &output_u32[1];
+ } else if (size == sizeof(uint16_t)) {
+ num = global.num_default.u16;
+ input = input_u16;
+ output = &output_u16[1];
+ } else {
+ num = global.num_default.u8;
+ input = input_u8;
+ output = &output_u8[1];
+ }
+
+ for (i = 0; i < burst; i++) {
+ input_u64[i] = VAL_U64;
+ input_u32[i] = VAL_U32;
+ input_u16[i] = VAL_U16;
+ input_u8[i] = VAL_U8;
+ }
+
+ odp_stash_param_init(&param);
+ param.num_obj = num;
+ param.obj_size = size;
+ param.cache_size = global.cache_size_default;
+ param.strict_size = strict_size;
+
+ stash = odp_stash_create("test_stash_default", &param);
+
+ CU_ASSERT_FATAL(stash != ODP_STASH_INVALID);
+
+ /* Stash is empty */
+ CU_ASSERT_FATAL(odp_stash_get(stash, output, 1) == 0);
+
+ retry = MAX_RETRY;
+ num_left = num;
+ max_burst = burst;
+ num_stashed = 0;
+
+ /* Try to store extra objects if strict mode is not enabled */
+ if (!strict_size)
+ num_left += burst;
+
+ while (num_left > 0) {
+ if (op == STASH_GEN) {
+ if (batch)
+ ret = odp_stash_put_batch(stash, input, burst);
+ else
+ ret = odp_stash_put(stash, input, burst);
+ } else if (op == STASH_U32) {
+ if (batch)
+ ret = odp_stash_put_u32_batch(stash, input_u32, burst);
+ else
+ ret = odp_stash_put_u32(stash, input_u32, burst);
+ } else if (op == STASH_U64) {
+ if (batch)
+ ret = odp_stash_put_u64_batch(stash, input_u64, burst);
+ else
+ ret = odp_stash_put_u64(stash, input_u64, burst);
+ } else if (op == STASH_PTR) {
+ if (batch)
+ ret = odp_stash_put_ptr_batch(stash, input, burst);
+ else
+ ret = odp_stash_put_ptr(stash, input, burst);
+ } else {
+ ret = -1;
+ }
+ CU_ASSERT_FATAL(ret >= 0);
+ CU_ASSERT_FATAL(ret <= burst);
+
+ num_stashed += ret;
+
+ if (batch) {
+ CU_ASSERT(ret == 0 || ret == burst);
+ if (num_left - ret < burst)
+ burst = num_left - ret;
+ }
+
+ if (ret) {
+ num_left -= ret;
+ retry = MAX_RETRY;
+ } else {
+ /* Stash full */
+ if (num_stashed >= num)
+ break;
+ retry--;
+ CU_ASSERT_FATAL(retry > 0);
+ }
+ }
+
+ burst = max_burst;
+ retry = MAX_RETRY;
+ num_left = num_stashed;
+ while (num_left > 0) {
+ memset(output, 0, burst * size);
+
+ /* Init first and last array element for under-/overflow checking */
+ if (size == sizeof(uint64_t)) {
+ output_u64[0] = MAGIC_U64;
+ output_u64[burst + 1] = MAGIC_U64;
+ } else if (size == sizeof(uint32_t)) {
+ output_u32[0] = MAGIC_U32;
+ output_u32[burst + 1] = MAGIC_U32;
+ } else if (size == sizeof(uint16_t)) {
+ output_u16[0] = MAGIC_U16;
+ output_u16[burst + 1] = MAGIC_U16;
+ } else {
+ output_u8[0] = MAGIC_U8;
+ output_u8[burst + 1] = MAGIC_U8;
+ }
+ if (op == STASH_GEN) {
+ if (batch)
+ ret = odp_stash_get_batch(stash, output, burst);
+ else
+ ret = odp_stash_get(stash, output, burst);
+ } else if (op == STASH_U32) {
+ if (batch)
+ ret = odp_stash_get_u32_batch(stash, &output_u32[1], burst);
+ else
+ ret = odp_stash_get_u32(stash, &output_u32[1], burst);
+ } else if (op == STASH_U64) {
+ if (batch)
+ ret = odp_stash_get_u64_batch(stash, &output_u64[1], burst);
+ else
+ ret = odp_stash_get_u64(stash, &output_u64[1], burst);
+ } else if (op == STASH_PTR) {
+ if (batch)
+ ret = odp_stash_get_ptr_batch(stash, output, burst);
+ else
+ ret = odp_stash_get_ptr(stash, output, burst);
+ } else {
+ ret = -1;
+ }
+ CU_ASSERT_FATAL(ret >= 0);
+ CU_ASSERT_FATAL(ret <= burst);
+
+ if (size == sizeof(uint64_t)) {
+ CU_ASSERT_FATAL(output_u64[0] == MAGIC_U64);
+ CU_ASSERT_FATAL(output_u64[burst + 1] == MAGIC_U64);
+ } else if (size == sizeof(uint32_t)) {
+ CU_ASSERT_FATAL(output_u32[0] == MAGIC_U32);
+ CU_ASSERT_FATAL(output_u32[burst + 1] == MAGIC_U32);
+ } else if (size == sizeof(uint16_t)) {
+ CU_ASSERT_FATAL(output_u16[0] == MAGIC_U16);
+ CU_ASSERT_FATAL(output_u16[burst + 1] == MAGIC_U16);
+ } else {
+ CU_ASSERT_FATAL(output_u8[0] == MAGIC_U8);
+ CU_ASSERT_FATAL(output_u8[burst + 1] == MAGIC_U8);
+ }
+
+ if (batch) {
+ CU_ASSERT(ret == 0 || ret == burst);
+ if (num_left - ret < burst)
+ burst = num_left - ret;
+ }
+
+ if (ret) {
+ for (i = 0; i < ret; i++) {
+ if (size == sizeof(uint64_t)) {
+ /* CU_ASSERT needs brackets around it */
+ CU_ASSERT(output_u64[i + 1] == VAL_U64);
+ } else if (size == sizeof(uint32_t)) {
+ CU_ASSERT(output_u32[i + 1] == VAL_U32);
+ } else if (size == sizeof(uint16_t)) {
+ CU_ASSERT(output_u16[i + 1] == VAL_U16);
+ } else {
+ CU_ASSERT(output_u8[i + 1] == VAL_U8);
+ }
+ }
+
+ num_left -= ret;
+ retry = MAX_RETRY;
+ } else {
+ retry--;
+ CU_ASSERT_FATAL(retry > 0);
+ }
+ }
+
+ /* Stash is empty again */
+ CU_ASSERT(odp_stash_get(stash, output, 1) == 0);
+ CU_ASSERT(odp_stash_flush_cache(stash) == 0);
+
+ CU_ASSERT_FATAL(odp_stash_destroy(stash) == 0);
+}
+
+static void stash_fifo_put(uint32_t size, int32_t burst, stash_op_t op, int batch,
+ odp_bool_t strict_size)
+{
+ odp_stash_t stash;
+ odp_stash_param_t param;
+ int32_t i, ret, retry, num_left;
+ int32_t num, max_burst, num_stashed;
+ void *input, *output;
+
+ if (batch) {
+ CU_ASSERT_FATAL(global.capa_fifo.max_get_batch >= 1);
+ CU_ASSERT_FATAL(global.capa_fifo.max_put_batch >= 1);
+
+ if (burst > (int32_t)global.capa_fifo.max_get_batch)
+ burst = global.capa_fifo.max_get_batch;
+ if (burst > (int32_t)global.capa_fifo.max_put_batch)
+ burst = global.capa_fifo.max_put_batch;
+ }
+
+ uint64_t input_u64[burst];
+ uint64_t output_u64[burst + 2];
+ uint32_t input_u32[burst];
+ uint32_t output_u32[burst + 2];
+ uint16_t input_u16[burst];
+ uint16_t output_u16[burst + 2];
+ uint8_t input_u8[burst];
+ uint8_t output_u8[burst + 2];
+
+ if (size == sizeof(uint64_t)) {
+ num = global.num_fifo.u64;
+ input = input_u64;
+ output = &output_u64[1];
+ } else if (size == sizeof(uint32_t)) {
+ num = global.num_fifo.u32;
+ input = input_u32;
+ output = &output_u32[1];
+ } else if (size == sizeof(uint16_t)) {
+ num = global.num_fifo.u16;
+ input = input_u16;
+ output = &output_u16[1];
+ } else {
+ num = global.num_fifo.u8;
+ input = input_u8;
+ output = &output_u8[1];
+ }
+
+ odp_stash_param_init(&param);
+ param.type = ODP_STASH_TYPE_FIFO;
+ param.num_obj = num;
+ param.obj_size = size;
+ param.strict_size = strict_size;
+
+ stash = odp_stash_create("test_stash_fifo", &param);
+
+ CU_ASSERT_FATAL(stash != ODP_STASH_INVALID);
+
+ /* Stash is empty */
+ CU_ASSERT_FATAL(odp_stash_get(stash, output, 1) == 0);
+
+ retry = MAX_RETRY;
+ num_left = num;
+ max_burst = burst;
+ num_stashed = 0;
+
+ /* Try to store extra objects if strict mode is not enabled */
+ if (!strict_size)
+ num_left += burst;
+
+ while (num_left > 0) {
+ for (i = 0; i < burst; i++) {
+ if (size == sizeof(uint64_t))
+ input_u64[i] = VAL_U64 + num_left - i;
+ else if (size == sizeof(uint32_t))
+ input_u32[i] = VAL_U32 + num_left - i;
+ else if (size == sizeof(uint16_t))
+ input_u16[i] = VAL_U16 + num_left - i;
+ else
+ input_u8[i] = VAL_U8 + num_left - i;
+ }
+ if (op == STASH_GEN) {
+ if (batch)
+ ret = odp_stash_put_batch(stash, input, burst);
+ else
+ ret = odp_stash_put(stash, input, burst);
+ } else if (op == STASH_U32) {
+ if (batch)
+ ret = odp_stash_put_u32_batch(stash, input_u32, burst);
+ else
+ ret = odp_stash_put_u32(stash, input_u32, burst);
+ } else if (op == STASH_U64) {
+ if (batch)
+ ret = odp_stash_put_u64_batch(stash, input_u64, burst);
+ else
+ ret = odp_stash_put_u64(stash, input_u64, burst);
+
+ } else if (op == STASH_PTR) {
+ if (batch)
+ ret = odp_stash_put_ptr_batch(stash, input, burst);
+ else
+ ret = odp_stash_put_ptr(stash, input, burst);
+ } else {
+ ret = -1;
+ }
+ CU_ASSERT_FATAL(ret >= 0);
+ CU_ASSERT_FATAL(ret <= burst);
+
+ num_stashed += ret;
+
+ if (batch) {
+ CU_ASSERT(ret == 0 || ret == burst);
+ if (num_left - ret < burst)
+ burst = num_left - ret;
+ }
+
+ if (ret) {
+ num_left -= ret;
+ retry = MAX_RETRY;
+ } else {
+ /* Stash full */
+ if (num_stashed >= num)
+ break;
+ retry--;
+ CU_ASSERT_FATAL(retry > 0);
+ }
+ }
+
+ burst = max_burst;
+ retry = MAX_RETRY;
+ num_left = num_stashed;
+ while (num_left > 0) {
+ memset(output, 0, burst * size);
+
+ /* Init first and last array element for under-/overflow checking */
+ if (size == sizeof(uint64_t)) {
+ output_u64[0] = MAGIC_U64;
+ output_u64[burst + 1] = MAGIC_U64;
+ } else if (size == sizeof(uint32_t)) {
+ output_u32[0] = MAGIC_U32;
+ output_u32[burst + 1] = MAGIC_U32;
+ } else if (size == sizeof(uint16_t)) {
+ output_u16[0] = MAGIC_U16;
+ output_u16[burst + 1] = MAGIC_U16;
+ } else {
+ output_u8[0] = MAGIC_U8;
+ output_u8[burst + 1] = MAGIC_U8;
+ }
+
+ if (op == STASH_GEN) {
+ if (batch)
+ ret = odp_stash_get_batch(stash, output, burst);
+ else
+ ret = odp_stash_get(stash, output, burst);
+ } else if (op == STASH_U32) {
+ if (batch)
+ ret = odp_stash_get_u32_batch(stash, &output_u32[1], burst);
+ else
+ ret = odp_stash_get_u32(stash, &output_u32[1], burst);
+ } else if (op == STASH_U64) {
+ if (batch)
+ ret = odp_stash_get_u64_batch(stash, &output_u64[1], burst);
+ else
+ ret = odp_stash_get_u64(stash, &output_u64[1], burst);
+ } else if (op == STASH_PTR) {
+ if (batch)
+ ret = odp_stash_get_ptr_batch(stash, output, burst);
+ else
+ ret = odp_stash_get_ptr(stash, output, burst);
+ } else {
+ ret = -1;
+ }
+ CU_ASSERT_FATAL(ret >= 0);
+ CU_ASSERT_FATAL(ret <= burst);
+
+ if (size == sizeof(uint64_t)) {
+ CU_ASSERT_FATAL(output_u64[0] == MAGIC_U64);
+ CU_ASSERT_FATAL(output_u64[burst + 1] == MAGIC_U64);
+ } else if (size == sizeof(uint32_t)) {
+ CU_ASSERT_FATAL(output_u32[0] == MAGIC_U32);
+ CU_ASSERT_FATAL(output_u32[burst + 1] == MAGIC_U32);
+ } else if (size == sizeof(uint16_t)) {
+ CU_ASSERT_FATAL(output_u16[0] == MAGIC_U16);
+ CU_ASSERT_FATAL(output_u16[burst + 1] == MAGIC_U16);
+ } else {
+ CU_ASSERT_FATAL(output_u8[0] == MAGIC_U8);
+ CU_ASSERT_FATAL(output_u8[burst + 1] == MAGIC_U8);
+ }
+
+ if (batch) {
+ CU_ASSERT(ret == 0 || ret == burst);
+ if (num_left - ret < burst)
+ burst = num_left - ret;
+ }
+
+ if (ret) {
+ for (i = 0; i < ret; i++) {
+ if (size == sizeof(uint64_t)) {
+ uint64_t val = VAL_U64 + num_left - i;
+
+ CU_ASSERT(output_u64[i + 1] == val);
+ } else if (size == sizeof(uint32_t)) {
+ uint32_t val = VAL_U32 + num_left - i;
+
+ CU_ASSERT(output_u32[i + 1] == val);
+ } else if (size == sizeof(uint16_t)) {
+ uint16_t val = VAL_U16 + num_left - i;
+
+ CU_ASSERT(output_u16[i + 1] == val);
+ } else {
+ uint8_t val = VAL_U8 + num_left - i;
+
+ CU_ASSERT(output_u8[i + 1] == val);
+ }
+ }
+
+ num_left -= ret;
+ retry = MAX_RETRY;
+ } else {
+ retry--;
+ CU_ASSERT_FATAL(retry > 0);
+ }
+ }
+
+ /* Stash is empty again */
+ CU_ASSERT(odp_stash_get(stash, output, 1) == 0);
+ CU_ASSERT(odp_stash_flush_cache(stash) == 0);
+
+ CU_ASSERT_FATAL(odp_stash_destroy(stash) == 0);
+}
+
+static int check_support_64(void)
+{
+ if (global.capa_default.max_obj_size >= sizeof(uint64_t))
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_support_ptr(void)
+{
+ if (global.capa_default.max_obj_size >= sizeof(uintptr_t))
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_support_fifo_64(void)
+{
+ if (global.fifo_supported &&
+ global.capa_fifo.max_obj_size >= sizeof(uint64_t))
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_support_fifo_ptr(void)
+{
+ if (global.fifo_supported &&
+ global.capa_fifo.max_obj_size >= sizeof(uintptr_t))
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_support_fifo(void)
+{
+ if (global.fifo_supported)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static void stash_default_put_u64_1(void)
+{
+ stash_default_put(sizeof(uint64_t), 1, STASH_GEN, 0, false);
+ stash_default_put(sizeof(uint64_t), 1, STASH_GEN, 0, true);
+}
+
+static void stash_default_put_u64_n(void)
+{
+ stash_default_put(sizeof(uint64_t), BURST, STASH_GEN, 0, false);
+ stash_default_put(sizeof(uint64_t), BURST, STASH_GEN, 0, true);
+}
+
+static void stash_default_u64_put_u64_1(void)
+{
+ stash_default_put(sizeof(uint64_t), 1, STASH_U64, 0, false);
+ stash_default_put(sizeof(uint64_t), 1, STASH_U64, 0, true);
+}
+
+static void stash_default_u64_put_u64_n(void)
+{
+ stash_default_put(sizeof(uint64_t), BURST, STASH_U64, 0, false);
+ stash_default_put(sizeof(uint64_t), BURST, STASH_U64, 0, true);
+}
+
+static void stash_default_put_ptr_1(void)
+{
+ stash_default_put(sizeof(uintptr_t), 1, STASH_PTR, 0, false);
+ stash_default_put(sizeof(uintptr_t), 1, STASH_PTR, 0, true);
+}
+
+static void stash_default_put_ptr_n(void)
+{
+ stash_default_put(sizeof(uintptr_t), BURST, STASH_PTR, 0, false);
+ stash_default_put(sizeof(uintptr_t), BURST, STASH_PTR, 0, true);
+}
+
+static void stash_default_put_u64_1_batch(void)
+{
+ stash_default_put(sizeof(uint64_t), 1, STASH_GEN, 1, false);
+ stash_default_put(sizeof(uint64_t), 1, STASH_GEN, 1, true);
+}
+
+static void stash_default_put_u64_n_batch(void)
+{
+ stash_default_put(sizeof(uint64_t), BATCH, STASH_GEN, 1, false);
+ stash_default_put(sizeof(uint64_t), BATCH, STASH_GEN, 1, true);
+}
+
+static void stash_default_u64_put_u64_1_batch(void)
+{
+ stash_default_put(sizeof(uint64_t), 1, STASH_U64, 1, false);
+ stash_default_put(sizeof(uint64_t), 1, STASH_U64, 1, true);
+}
+
+static void stash_default_u64_put_u64_n_batch(void)
+{
+ stash_default_put(sizeof(uint64_t), BATCH, STASH_U64, 1, false);
+ stash_default_put(sizeof(uint64_t), BATCH, STASH_U64, 1, true);
+}
+
+static void stash_default_put_ptr_1_batch(void)
+{
+ stash_default_put(sizeof(uintptr_t), 1, STASH_PTR, 1, false);
+ stash_default_put(sizeof(uintptr_t), 1, STASH_PTR, 1, true);
+}
+
+static void stash_default_put_ptr_n_batch(void)
+{
+ stash_default_put(sizeof(uintptr_t), BATCH, STASH_PTR, 1, false);
+ stash_default_put(sizeof(uintptr_t), BATCH, STASH_PTR, 1, true);
+}
+
+static void stash_default_put_u32_1(void)
+{
+ stash_default_put(sizeof(uint32_t), 1, STASH_GEN, 0, false);
+ stash_default_put(sizeof(uint32_t), 1, STASH_GEN, 0, true);
+}
+
+static void stash_default_put_u32_n(void)
+{
+ stash_default_put(sizeof(uint32_t), BURST, STASH_GEN, 0, false);
+ stash_default_put(sizeof(uint32_t), BURST, STASH_GEN, 0, true);
+}
+
+static void stash_default_u32_put_u32_1(void)
+{
+ stash_default_put(sizeof(uint32_t), 1, STASH_U32, 0, false);
+ stash_default_put(sizeof(uint32_t), 1, STASH_U32, 0, true);
+}
+
+static void stash_default_u32_put_u32_n(void)
+{
+ stash_default_put(sizeof(uint32_t), BURST, STASH_U32, 0, false);
+ stash_default_put(sizeof(uint32_t), BURST, STASH_U32, 0, true);
+}
+
+static void stash_default_put_u16_1(void)
+{
+ stash_default_put(sizeof(uint16_t), 1, STASH_GEN, 0, false);
+ stash_default_put(sizeof(uint16_t), 1, STASH_GEN, 0, true);
+}
+
+static void stash_default_put_u16_n(void)
+{
+ stash_default_put(sizeof(uint16_t), BURST, STASH_GEN, 0, false);
+ stash_default_put(sizeof(uint16_t), BURST, STASH_GEN, 0, true);
+}
+
+static void stash_default_put_u8_1(void)
+{
+ stash_default_put(sizeof(uint8_t), 1, STASH_GEN, 0, false);
+ stash_default_put(sizeof(uint8_t), 1, STASH_GEN, 0, true);
+}
+
+static void stash_default_put_u8_n(void)
+{
+ stash_default_put(sizeof(uint8_t), BURST, STASH_GEN, 0, false);
+ stash_default_put(sizeof(uint8_t), BURST, STASH_GEN, 0, true);
+}
+
+static void stash_default_put_u32_1_batch(void)
+{
+ stash_default_put(sizeof(uint32_t), 1, STASH_GEN, 1, false);
+ stash_default_put(sizeof(uint32_t), 1, STASH_GEN, 1, true);
+}
+
+static void stash_default_put_u32_n_batch(void)
+{
+ stash_default_put(sizeof(uint32_t), BATCH, STASH_GEN, 1, false);
+ stash_default_put(sizeof(uint32_t), BATCH, STASH_GEN, 1, true);
+}
+
+static void stash_default_u32_put_u32_1_batch(void)
+{
+ stash_default_put(sizeof(uint32_t), 1, STASH_U32, 1, false);
+ stash_default_put(sizeof(uint32_t), 1, STASH_U32, 1, true);
+}
+
+static void stash_default_u32_put_u32_n_batch(void)
+{
+ stash_default_put(sizeof(uint32_t), BATCH, STASH_U32, 1, false);
+ stash_default_put(sizeof(uint32_t), BATCH, STASH_U32, 1, true);
+}
+
+static void stash_default_put_u16_1_batch(void)
+{
+ stash_default_put(sizeof(uint16_t), 1, STASH_GEN, 1, false);
+ stash_default_put(sizeof(uint16_t), 1, STASH_GEN, 1, true);
+}
+
+static void stash_default_put_u16_n_batch(void)
+{
+ stash_default_put(sizeof(uint16_t), BATCH, STASH_GEN, 1, false);
+ stash_default_put(sizeof(uint16_t), BATCH, STASH_GEN, 1, true);
+}
+
+static void stash_default_put_u8_1_batch(void)
+{
+ stash_default_put(sizeof(uint8_t), 1, STASH_GEN, 1, false);
+ stash_default_put(sizeof(uint8_t), 1, STASH_GEN, 1, true);
+}
+
+static void stash_default_put_u8_n_batch(void)
+{
+ stash_default_put(sizeof(uint8_t), BATCH, STASH_GEN, 1, false);
+ stash_default_put(sizeof(uint8_t), BATCH, STASH_GEN, 1, true);
+}
+
+static void stash_fifo_put_u64_1(void)
+{
+ stash_fifo_put(sizeof(uint64_t), 1, STASH_GEN, 0, false);
+ stash_fifo_put(sizeof(uint64_t), 1, STASH_GEN, 0, true);
+}
+
+static void stash_fifo_put_u64_n(void)
+{
+ stash_fifo_put(sizeof(uint64_t), BURST, STASH_GEN, 0, false);
+ stash_fifo_put(sizeof(uint64_t), BURST, STASH_GEN, 0, true);
+}
+
+static void stash_fifo_u64_put_u64_1(void)
+{
+ stash_fifo_put(sizeof(uint64_t), 1, STASH_U64, 0, false);
+ stash_fifo_put(sizeof(uint64_t), 1, STASH_U64, 0, true);
+}
+
+static void stash_fifo_u64_put_u64_n(void)
+{
+ stash_fifo_put(sizeof(uint64_t), BURST, STASH_U64, 0, false);
+ stash_fifo_put(sizeof(uint64_t), BURST, STASH_U64, 0, true);
+}
+
+static void stash_fifo_put_ptr_1(void)
+{
+ stash_fifo_put(sizeof(uintptr_t), 1, STASH_PTR, 0, false);
+ stash_fifo_put(sizeof(uintptr_t), 1, STASH_PTR, 0, true);
+}
+
+static void stash_fifo_put_ptr_n(void)
+{
+ stash_fifo_put(sizeof(uintptr_t), BURST, STASH_PTR, 0, false);
+ stash_fifo_put(sizeof(uintptr_t), BURST, STASH_PTR, 0, true);
+}
+
+static void stash_fifo_put_u32_1(void)
+{
+ stash_fifo_put(sizeof(uint32_t), 1, STASH_GEN, 0, false);
+ stash_fifo_put(sizeof(uint32_t), 1, STASH_GEN, 0, true);
+}
+
+static void stash_fifo_put_u32_n(void)
+{
+ stash_fifo_put(sizeof(uint32_t), BURST, STASH_GEN, 0, false);
+ stash_fifo_put(sizeof(uint32_t), BURST, STASH_GEN, 0, true);
+}
+
+static void stash_fifo_u32_put_u32_1(void)
+{
+ stash_fifo_put(sizeof(uint32_t), 1, STASH_U32, 0, false);
+ stash_fifo_put(sizeof(uint32_t), 1, STASH_U32, 0, true);
+}
+
+static void stash_fifo_u32_put_u32_n(void)
+{
+ stash_fifo_put(sizeof(uint32_t), BURST, STASH_U32, 0, false);
+ stash_fifo_put(sizeof(uint32_t), BURST, STASH_U32, 0, true);
+}
+
+static void stash_fifo_put_u16_1(void)
+{
+ stash_fifo_put(sizeof(uint16_t), 1, STASH_GEN, 0, false);
+ stash_fifo_put(sizeof(uint16_t), 1, STASH_GEN, 0, true);
+}
+
+static void stash_fifo_put_u16_n(void)
+{
+ stash_fifo_put(sizeof(uint16_t), BURST, STASH_GEN, 0, false);
+ stash_fifo_put(sizeof(uint16_t), BURST, STASH_GEN, 0, true);
+}
+
+static void stash_fifo_put_u8_1(void)
+{
+ stash_fifo_put(sizeof(uint8_t), 1, STASH_GEN, 0, false);
+ stash_fifo_put(sizeof(uint8_t), 1, STASH_GEN, 0, true);
+}
+
+static void stash_fifo_put_u8_n(void)
+{
+ stash_fifo_put(sizeof(uint8_t), BURST, STASH_GEN, 0, false);
+ stash_fifo_put(sizeof(uint8_t), BURST, STASH_GEN, 0, true);
+}
+
+static void stash_fifo_put_u64_1_batch(void)
+{
+ stash_fifo_put(sizeof(uint64_t), 1, STASH_GEN, 1, false);
+ stash_fifo_put(sizeof(uint64_t), 1, STASH_GEN, 1, true);
+}
+
+static void stash_fifo_put_u64_n_batch(void)
+{
+ stash_fifo_put(sizeof(uint64_t), BATCH, STASH_GEN, 1, false);
+ stash_fifo_put(sizeof(uint64_t), BATCH, STASH_GEN, 1, true);
+}
+
+static void stash_fifo_u64_put_u64_1_batch(void)
+{
+ stash_fifo_put(sizeof(uint64_t), 1, STASH_U64, 1, false);
+ stash_fifo_put(sizeof(uint64_t), 1, STASH_U64, 1, true);
+}
+
+static void stash_fifo_u64_put_u64_n_batch(void)
+{
+ stash_fifo_put(sizeof(uint64_t), BATCH, STASH_U64, 1, false);
+ stash_fifo_put(sizeof(uint64_t), BATCH, STASH_U64, 1, true);
+}
+
+static void stash_fifo_put_ptr_1_batch(void)
+{
+ stash_fifo_put(sizeof(uintptr_t), 1, STASH_PTR, 1, false);
+ stash_fifo_put(sizeof(uintptr_t), 1, STASH_PTR, 1, true);
+}
+
+static void stash_fifo_put_ptr_n_batch(void)
+{
+ stash_fifo_put(sizeof(uintptr_t), BATCH, STASH_PTR, 1, false);
+ stash_fifo_put(sizeof(uintptr_t), BATCH, STASH_PTR, 1, true);
+}
+
+static void stash_fifo_put_u32_1_batch(void)
+{
+ stash_fifo_put(sizeof(uint32_t), 1, STASH_GEN, 1, false);
+ stash_fifo_put(sizeof(uint32_t), 1, STASH_GEN, 1, true);
+}
+
+static void stash_fifo_put_u32_n_batch(void)
+{
+ stash_fifo_put(sizeof(uint32_t), BATCH, STASH_GEN, 1, false);
+ stash_fifo_put(sizeof(uint32_t), BATCH, STASH_GEN, 1, true);
+}
+
+static void stash_fifo_u32_put_u32_1_batch(void)
+{
+ stash_fifo_put(sizeof(uint32_t), 1, STASH_U32, 1, false);
+ stash_fifo_put(sizeof(uint32_t), 1, STASH_U32, 1, true);
+}
+
+static void stash_fifo_u32_put_u32_n_batch(void)
+{
+ stash_fifo_put(sizeof(uint32_t), BATCH, STASH_U32, 1, false);
+ stash_fifo_put(sizeof(uint32_t), BATCH, STASH_U32, 1, true);
+}
+
+static void stash_fifo_put_u16_1_batch(void)
+{
+ stash_fifo_put(sizeof(uint16_t), 1, STASH_GEN, 1, false);
+ stash_fifo_put(sizeof(uint16_t), 1, STASH_GEN, 1, true);
+}
+
+static void stash_fifo_put_u16_n_batch(void)
+{
+ stash_fifo_put(sizeof(uint16_t), BATCH, STASH_GEN, 1, false);
+ stash_fifo_put(sizeof(uint16_t), BATCH, STASH_GEN, 1, true);
+}
+
+static void stash_fifo_put_u8_1_batch(void)
+{
+ stash_fifo_put(sizeof(uint8_t), 1, STASH_GEN, 1, false);
+ stash_fifo_put(sizeof(uint8_t), 1, STASH_GEN, 1, true);
+}
+
+static void stash_fifo_put_u8_n_batch(void)
+{
+ stash_fifo_put(sizeof(uint8_t), BATCH, STASH_GEN, 1, false);
+ stash_fifo_put(sizeof(uint8_t), BATCH, STASH_GEN, 1, true);
+}
+
+odp_testinfo_t stash_suite[] = {
+ ODP_TEST_INFO(stash_capability),
+ ODP_TEST_INFO(stash_param_defaults),
+ ODP_TEST_INFO_CONDITIONAL(stash_create_u64, check_support_64),
+ ODP_TEST_INFO(stash_create_u32),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_put_u64_1, check_support_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_put_u64_n, check_support_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_u64_put_u64_1, check_support_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_u64_put_u64_n, check_support_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_put_ptr_1, check_support_ptr),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_put_ptr_n, check_support_ptr),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_put_u64_1_batch, check_support_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_put_u64_n_batch, check_support_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_u64_put_u64_1_batch, check_support_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_u64_put_u64_n_batch, check_support_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_put_ptr_1_batch, check_support_ptr),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_put_ptr_n_batch, check_support_ptr),
+ ODP_TEST_INFO(stash_default_put_u32_1),
+ ODP_TEST_INFO(stash_default_put_u32_n),
+ ODP_TEST_INFO(stash_default_u32_put_u32_1),
+ ODP_TEST_INFO(stash_default_u32_put_u32_n),
+ ODP_TEST_INFO(stash_default_put_u16_1),
+ ODP_TEST_INFO(stash_default_put_u16_n),
+ ODP_TEST_INFO(stash_default_put_u8_1),
+ ODP_TEST_INFO(stash_default_put_u8_n),
+ ODP_TEST_INFO(stash_default_put_u32_1_batch),
+ ODP_TEST_INFO(stash_default_put_u32_n_batch),
+ ODP_TEST_INFO(stash_default_u32_put_u32_1_batch),
+ ODP_TEST_INFO(stash_default_u32_put_u32_n_batch),
+ ODP_TEST_INFO(stash_default_put_u16_1_batch),
+ ODP_TEST_INFO(stash_default_put_u16_n_batch),
+ ODP_TEST_INFO(stash_default_put_u8_1_batch),
+ ODP_TEST_INFO(stash_default_put_u8_n_batch),
+ ODP_TEST_INFO_CONDITIONAL(stash_create_u64_all, check_support_64),
+ ODP_TEST_INFO(stash_create_u32_all),
+ ODP_TEST_INFO(stash_stats_u32),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u64_1, check_support_fifo_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u64_n, check_support_fifo_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_u64_put_u64_1, check_support_fifo_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_u64_put_u64_n, check_support_fifo_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_ptr_1, check_support_fifo_ptr),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_ptr_n, check_support_fifo_ptr),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u32_1, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u32_n, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_u32_put_u32_1, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_u32_put_u32_n, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u16_1, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u16_n, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u8_1, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u8_n, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u64_1_batch, check_support_fifo_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u64_n_batch, check_support_fifo_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_u64_put_u64_1_batch, check_support_fifo_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_u64_put_u64_n_batch, check_support_fifo_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_ptr_1_batch, check_support_fifo_ptr),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_ptr_n_batch, check_support_fifo_ptr),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u32_1_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u32_n_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_u32_put_u32_1_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_u32_put_u32_n_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u16_1_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u16_n_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u8_1_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u8_n_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_create_fifo_u64_all,
+ check_support_fifo_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_create_fifo_u32_all,
+ check_support_fifo),
+ ODP_TEST_INFO_NULL
+};
+
+odp_suiteinfo_t stash_suites[] = {
+ {"Stash", stash_suite_init, NULL, stash_suite},
+ ODP_SUITE_INFO_NULL
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(stash_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/api/std/.gitignore b/test/validation/api/std/.gitignore
new file mode 100644
index 000000000..51fbc1d95
--- /dev/null
+++ b/test/validation/api/std/.gitignore
@@ -0,0 +1 @@
+std_main
diff --git a/test/validation/api/std/Makefile.am b/test/validation/api/std/Makefile.am
new file mode 100644
index 000000000..7cebadb83
--- /dev/null
+++ b/test/validation/api/std/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = std_main
+std_main_SOURCES = std.c
diff --git a/test/common_plat/validation/api/std_clib/std_clib.c b/test/validation/api/std/std.c
index 7f089eabb..161ee87cf 100644
--- a/test/common_plat/validation/api/std_clib/std_clib.c
+++ b/test/validation/api/std/std.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -6,13 +6,12 @@
#include <odp_api.h>
#include <odp_cunit_common.h>
-#include "std_clib.h"
#include <string.h>
#define PATTERN 0x5e
-static void std_clib_test_memcpy(void)
+static void std_test_memcpy(void)
{
uint8_t src[] = {0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15};
@@ -28,7 +27,7 @@ static void std_clib_test_memcpy(void)
CU_ASSERT(ret == 0);
}
-static void std_clib_test_memset(void)
+static void std_test_memset(void)
{
uint8_t data[] = {0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15};
@@ -44,7 +43,7 @@ static void std_clib_test_memset(void)
CU_ASSERT(ret == 0);
}
-static void std_clib_test_memcmp(void)
+static void std_test_memcmp(void)
{
uint8_t data[] = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16};
@@ -81,27 +80,27 @@ static void std_clib_test_memcmp(void)
}
}
-odp_testinfo_t std_clib_suite[] = {
- ODP_TEST_INFO(std_clib_test_memcpy),
- ODP_TEST_INFO(std_clib_test_memset),
- ODP_TEST_INFO(std_clib_test_memcmp),
+odp_testinfo_t std_suite[] = {
+ ODP_TEST_INFO(std_test_memcpy),
+ ODP_TEST_INFO(std_test_memset),
+ ODP_TEST_INFO(std_test_memcmp),
ODP_TEST_INFO_NULL,
};
-odp_suiteinfo_t std_clib_suites[] = {
- {"Std C library", NULL, NULL, std_clib_suite},
+odp_suiteinfo_t std_suites[] = {
+ {"Std", NULL, NULL, std_suite},
ODP_SUITE_INFO_NULL
};
-int std_clib_main(int argc, char *argv[])
+int main(int argc, char *argv[])
{
int ret;
/* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
+ if (odp_cunit_parse_options(&argc, argv))
return -1;
- ret = odp_cunit_register(std_clib_suites);
+ ret = odp_cunit_register(std_suites);
if (ret == 0)
ret = odp_cunit_run();
diff --git a/test/common_plat/validation/api/system/.gitignore b/test/validation/api/system/.gitignore
index 347b1ee21..347b1ee21 100644
--- a/test/common_plat/validation/api/system/.gitignore
+++ b/test/validation/api/system/.gitignore
diff --git a/test/validation/api/system/Makefile.am b/test/validation/api/system/Makefile.am
new file mode 100644
index 000000000..8090b0b5a
--- /dev/null
+++ b/test/validation/api/system/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = system_main
+system_main_SOURCES = system.c
diff --git a/test/validation/api/system/system.c b/test/validation/api/system/system.c
new file mode 100644
index 000000000..3f7e0497d
--- /dev/null
+++ b/test/validation/api/system/system.c
@@ -0,0 +1,699 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <ctype.h>
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#include "odp_cunit_common.h"
+#include "test_common_macros.h"
+
+#define PERIODS_100_MSEC 160
+#define RES_TRY_NUM 10
+#define PAGESZ_NUM 10
+
+#define GIGA_HZ 1000000000ULL
+#define KILO_HZ 1000ULL
+
+/* 10 usec wait time assumes >100kHz resolution on CPU cycles counter */
+#define WAIT_TIME (10 * ODP_TIME_USEC_IN_NS)
+
+static void test_version_api_str(void)
+{
+ int char_ok = 0;
+ char version_string[128];
+ char *s = version_string;
+
+ strncpy(version_string, odp_version_api_str(),
+ sizeof(version_string) - 1);
+
+ while (*s) {
+ if (isdigit((int)*s) || (strncmp(s, ".", 1) == 0)) {
+ char_ok = 1;
+ s++;
+ } else {
+ char_ok = 0;
+ ODPH_DBG("\nBAD VERSION=%s\n", version_string);
+ break;
+ }
+ }
+ CU_ASSERT(char_ok);
+}
+
+static void test_version_str(void)
+{
+ printf("\nAPI version:\n");
+ printf("%s\n\n", odp_version_api_str());
+
+ printf("Implementation name:\n");
+ printf("%s\n\n", odp_version_impl_name());
+
+ printf("Implementation details:\n");
+ printf("%s\n\n", odp_version_impl_str());
+}
+
+static void test_version_macro(void)
+{
+ CU_ASSERT(ODP_VERSION_API_NUM(0, 0, 0) < ODP_VERSION_API_NUM(0, 0, 1));
+ CU_ASSERT(ODP_VERSION_API_NUM(0, 0, 1) < ODP_VERSION_API_NUM(0, 1, 0));
+ CU_ASSERT(ODP_VERSION_API_NUM(0, 1, 0) < ODP_VERSION_API_NUM(1, 0, 0));
+ CU_ASSERT(ODP_VERSION_API_NUM(1, 90, 0) <
+ ODP_VERSION_API_NUM(1, 90, 1));
+
+ CU_ASSERT(ODP_VERSION_API_NUM(ODP_VERSION_API_GENERATION,
+ ODP_VERSION_API_MAJOR,
+ ODP_VERSION_API_MINOR) ==
+ ODP_VERSION_API);
+
+ CU_ASSERT(ODP_VERSION_API_NUM(ODP_VERSION_API_GENERATION,
+ ODP_VERSION_API_MAJOR, 0) <=
+ ODP_VERSION_API);
+
+ CU_ASSERT(ODP_VERSION_API_NUM(ODP_VERSION_API_GENERATION,
+ ODP_VERSION_API_MAJOR + 1, 0) >
+ ODP_VERSION_API);
+}
+
+static void system_test_odp_cpu_count(void)
+{
+ int cpus;
+
+ cpus = odp_cpu_count();
+ CU_ASSERT(0 < cpus);
+}
+
+static void system_test_cpu_cycles(void)
+{
+ uint64_t c2, c1, diff, max;
+
+ c1 = odp_cpu_cycles();
+ odp_time_wait_ns(WAIT_TIME);
+ c2 = odp_cpu_cycles();
+
+ CU_ASSERT(c2 != c1);
+
+ max = odp_cpu_cycles_max();
+
+ /* With 10 usec delay, diff should be small compared to the maximum.
+ * Otherwise, counter is going backwards. */
+ if (c2 > c1) {
+ diff = c2 - c1;
+ CU_ASSERT(diff < (max - diff));
+ }
+
+ /* Same applies also when there was a wrap. */
+ if (c2 < c1) {
+ diff = max - c1 + c2;
+ CU_ASSERT(diff < (max - diff));
+ }
+}
+
+static void system_test_cpu_cycles_max(void)
+{
+ uint64_t c2, c1;
+ uint64_t max1, max2;
+
+ max1 = odp_cpu_cycles_max();
+ odp_time_wait_ns(WAIT_TIME);
+ max2 = odp_cpu_cycles_max();
+
+ CU_ASSERT(max1 >= UINT32_MAX / 2);
+ CU_ASSERT(max1 == max2);
+
+ c1 = odp_cpu_cycles();
+ odp_time_wait_ns(WAIT_TIME);
+ c2 = odp_cpu_cycles();
+
+ CU_ASSERT(c1 <= max1 && c2 <= max1);
+}
+
+static void system_test_cpu_cycles_resolution(void)
+{
+ int i;
+ uint64_t res;
+ uint64_t c2, c1, max;
+ uint64_t test_cycles = odp_cpu_hz() / 100; /* CPU cycles in 10 msec */
+
+ max = odp_cpu_cycles_max();
+
+ res = odp_cpu_cycles_resolution();
+ CU_ASSERT(res != 0);
+ CU_ASSERT(res < max / 1024);
+
+ for (i = 0; i < RES_TRY_NUM; i++) {
+ c1 = odp_cpu_cycles();
+ odp_time_wait_ns(10 * ODP_TIME_MSEC_IN_NS + i);
+ c2 = odp_cpu_cycles();
+
+ /* Diff may be zero with low resolution */
+ if (test_cycles && test_cycles > res) {
+ uint64_t diff = odp_cpu_cycles_diff(c2, c1);
+
+ CU_ASSERT(diff >= res);
+ }
+
+ }
+}
+
+static void system_test_cpu_cycles_diff(void)
+{
+ uint64_t c2, c1, max;
+ uint64_t tmp, diff, res;
+
+ res = odp_cpu_cycles_resolution();
+ max = odp_cpu_cycles_max();
+
+ c1 = res;
+ c2 = 2 * res;
+ diff = odp_cpu_cycles_diff(c2, c1);
+ CU_ASSERT(diff == res);
+
+ c1 = odp_cpu_cycles();
+ odp_time_wait_ns(WAIT_TIME);
+ c2 = odp_cpu_cycles();
+ diff = odp_cpu_cycles_diff(c2, c1);
+ CU_ASSERT(diff > 0);
+ CU_ASSERT(diff < (max - diff));
+
+ /* check resolution for wrap */
+ c1 = max - 2 * res;
+ do
+ c2 = odp_cpu_cycles();
+ while (c1 < c2);
+
+ diff = odp_cpu_cycles_diff(c1, c1);
+ CU_ASSERT(diff == 0);
+
+ /* wrap */
+ tmp = c2 + (max - c1) + res;
+ diff = odp_cpu_cycles_diff(c2, c1);
+ CU_ASSERT(diff == tmp);
+
+ /* no wrap, revert args */
+ tmp = c1 - c2;
+ diff = odp_cpu_cycles_diff(c1, c2);
+ CU_ASSERT(diff == tmp);
+}
+
+static void system_test_cpu_cycles_long_period(void)
+{
+ int i;
+ int periods = PERIODS_100_MSEC;
+ uint64_t max_period_duration = 100 * ODP_TIME_MSEC_IN_NS + periods - 1;
+ uint64_t c2, c1, c3, max;
+ uint64_t tmp, diff, res;
+
+ res = odp_cpu_cycles_resolution();
+ max = odp_cpu_cycles_max();
+
+ c3 = odp_cpu_cycles();
+
+ CU_ASSERT(c3 <= max);
+ /*
+ * If the cycle counter is not close to wrapping around during
+ * the test, then speed up the test by not trying to see the wrap
+ * around too hard. Assume cycle counter frequency of less than 10 GHz.
+ */
+ CU_ASSERT(odp_cpu_hz_max() < 10ULL * ODP_TIME_SEC_IN_NS);
+ if (max - c3 > 10 * periods * max_period_duration)
+ periods = 10;
+
+ printf("\n Testing CPU cycles for %i seconds... ", periods / 10);
+
+ for (i = 0; i < periods; i++) {
+ c1 = odp_cpu_cycles();
+ odp_time_wait_ns(100 * ODP_TIME_MSEC_IN_NS + i);
+ c2 = odp_cpu_cycles();
+
+ CU_ASSERT(c2 != c1);
+ CU_ASSERT(c1 <= max && c2 <= max);
+
+ if (c2 > c1)
+ tmp = c2 - c1;
+ else
+ tmp = c2 + (max - c1) + res;
+
+ diff = odp_cpu_cycles_diff(c2, c1);
+ CU_ASSERT(diff == tmp);
+
+ /* wrap is detected and verified */
+ if (c2 < c1)
+ break;
+ }
+
+ /* wrap was detected, no need to continue */
+ if (i < periods) {
+ printf("wrap was detected.\n");
+ return;
+ }
+
+ /* wrap has to be detected if possible */
+ CU_ASSERT(max > UINT32_MAX);
+ CU_ASSERT((max - c3) > UINT32_MAX);
+
+ printf("wrap was not detected.\n");
+}
+
+static void system_test_odp_sys_cache_line_size(void)
+{
+ uint64_t cache_size;
+
+ cache_size = odp_sys_cache_line_size();
+ CU_ASSERT(0 < cache_size);
+ CU_ASSERT(0 < ODP_CACHE_LINE_SIZE);
+ CU_ASSERT(TEST_CHECK_POW2(cache_size));
+ CU_ASSERT(TEST_CHECK_POW2(ODP_CACHE_LINE_SIZE));
+ if (ODP_CACHE_LINE_SIZE != cache_size)
+ printf("WARNING: ODP_CACHE_LINE_SIZE and odp_sys_cache_line_size() not matching\n");
+
+ CU_ASSERT(ODP_CACHE_LINE_ROUNDUP(0) == 0);
+ CU_ASSERT(ODP_CACHE_LINE_ROUNDUP(1) == ODP_CACHE_LINE_SIZE);
+ CU_ASSERT(ODP_CACHE_LINE_ROUNDUP(ODP_CACHE_LINE_SIZE) ==
+ ODP_CACHE_LINE_SIZE);
+ CU_ASSERT(ODP_CACHE_LINE_ROUNDUP(ODP_CACHE_LINE_SIZE + 1) ==
+ 2 * ODP_CACHE_LINE_SIZE);
+}
+
+static void system_test_odp_cpu_model_str(void)
+{
+ char model[128];
+
+ snprintf(model, 128, "%s", odp_cpu_model_str());
+ CU_ASSERT(strlen(model) > 0);
+ CU_ASSERT(strlen(model) < 127);
+}
+
+static void system_test_odp_cpu_model_str_id(void)
+{
+ char model[128];
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ snprintf(model, 128, "%s", odp_cpu_model_str_id(cpu));
+ CU_ASSERT(strlen(model) > 0);
+ CU_ASSERT(strlen(model) < 127);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
+static void system_test_odp_sys_page_size(void)
+{
+ uint64_t page;
+
+ page = odp_sys_page_size();
+ CU_ASSERT(0 < page);
+ CU_ASSERT(ODP_PAGE_SIZE == page);
+}
+
+static void system_test_odp_sys_huge_page_size(void)
+{
+ uint64_t page;
+
+ page = odp_sys_huge_page_size();
+ if (page == 0)
+ /* Not an error, but just to be sure to hit logs */
+ ODPH_ERR("Huge pages do not seem to be supported\n");
+ else
+ CU_ASSERT(page % ODP_PAGE_SIZE == 0);
+}
+
+static void system_test_odp_sys_huge_page_size_all(void)
+{
+ uint64_t pagesz_tbs[PAGESZ_NUM];
+ uint64_t prev_pagesz = 0;
+ int num;
+ int i;
+
+ num = odp_sys_huge_page_size_all(NULL, 0);
+ CU_ASSERT(num >= 0);
+
+ num = odp_sys_huge_page_size_all(pagesz_tbs, PAGESZ_NUM);
+ CU_ASSERT(num >= 0);
+ for (i = 0; i < num && i < PAGESZ_NUM; i++) {
+ CU_ASSERT(pagesz_tbs[i] > 0);
+ CU_ASSERT(pagesz_tbs[i] > prev_pagesz);
+ prev_pagesz = pagesz_tbs[i];
+ }
+}
+
+static int system_check_cycle_counter(void)
+{
+ if (odp_cpu_cycles_max() == 0) {
+ printf("Cycle counter is not supported, skipping test\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int system_check_odp_cpu_hz(void)
+{
+ if (odp_cpu_hz() == 0) {
+ printf("odp_cpu_hz() is not supported, skipping test\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void system_test_odp_cpu_hz(void)
+{
+ uint64_t hz = odp_cpu_hz();
+
+ /* Test value sanity: less than 10GHz */
+ CU_ASSERT(hz < 10 * GIGA_HZ);
+
+ /* larger than 1kHz */
+ CU_ASSERT(hz > 1 * KILO_HZ);
+}
+
+static int system_check_odp_cpu_hz_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_id(cpu);
+ if (hz == 0) {
+ printf("odp_cpu_hz_id() is not supported by CPU %d, skipping test\n", cpu);
+ return ODP_TEST_INACTIVE;
+ }
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void system_test_odp_cpu_hz_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_id(cpu);
+ /* Test value sanity: less than 10GHz */
+ CU_ASSERT(hz < 10 * GIGA_HZ);
+ /* larger than 1kHz */
+ CU_ASSERT(hz > 1 * KILO_HZ);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
+static int system_check_odp_cpu_hz_max(void)
+{
+ if (odp_cpu_hz_max() == 0) {
+ printf("odp_cpu_hz_max() is not supported, skipping test\n");
+ return ODP_TEST_INACTIVE;
+ }
+ return ODP_TEST_ACTIVE;
+}
+
+static void system_test_odp_cpu_hz_max(void)
+{
+ uint64_t hz = odp_cpu_hz_max();
+
+ /* Sanity check value */
+ CU_ASSERT(hz > 1 * KILO_HZ);
+ CU_ASSERT(hz < 20 * GIGA_HZ);
+}
+
+static int system_check_odp_cpu_hz_max_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_max_id(cpu);
+ if (hz == 0) {
+ printf("odp_cpu_hz_max_id() is not supported by CPU %d, skipping test\n",
+ cpu);
+ return ODP_TEST_INACTIVE;
+ }
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void system_test_odp_cpu_hz_max_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_max_id(cpu);
+ /* Sanity check value */
+ CU_ASSERT(hz > 1 * KILO_HZ);
+ CU_ASSERT(hz < 20 * GIGA_HZ);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
+static void system_test_info_print(void)
+{
+ printf("\n\nCalling system info print...\n");
+ odp_sys_info_print();
+ printf("...done. ");
+}
+
+static void system_test_config_print(void)
+{
+ printf("\n\nCalling system config print...\n");
+ odp_sys_config_print();
+ printf("...done. ");
+}
+
+static void system_test_info(void)
+{
+ odp_system_info_t info;
+ odp_cpu_arch_t cpu_arch;
+
+ memset(&info, 0xff, sizeof(odp_system_info_t));
+ CU_ASSERT(odp_system_info(&info) == 0);
+ cpu_arch = info.cpu_arch;
+
+ memset(&info, 0, sizeof(odp_system_info_t));
+ CU_ASSERT(odp_system_info(&info) == 0);
+
+ CU_ASSERT(info.cpu_arch == cpu_arch);
+ CU_ASSERT(info.cpu_arch >= ODP_CPU_ARCH_UNKNOWN && info.cpu_arch <= ODP_CPU_ARCH_X86);
+
+ if (info.cpu_arch == ODP_CPU_ARCH_X86) {
+ printf("\n ODP_CPU_ARCH_X86\n");
+ CU_ASSERT(info.cpu_isa_sw.x86 != ODP_CPU_ARCH_X86_UNKNOWN);
+
+ if (info.cpu_isa_sw.x86 == ODP_CPU_ARCH_X86_64)
+ printf(" ODP_CPU_ARCH_X86_64\n");
+ else if (info.cpu_isa_sw.x86 == ODP_CPU_ARCH_X86_I686)
+ printf(" ODP_CPU_ARCH_X86_I686\n");
+
+ if (info.cpu_isa_hw.x86 != ODP_CPU_ARCH_X86_UNKNOWN)
+ CU_ASSERT(info.cpu_isa_sw.x86 <= info.cpu_isa_hw.x86);
+ }
+
+ if (info.cpu_arch == ODP_CPU_ARCH_ARM) {
+ printf("\n ODP_CPU_ARCH_ARM\n");
+ CU_ASSERT(info.cpu_isa_sw.arm != ODP_CPU_ARCH_ARM_UNKNOWN);
+
+ if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV6)
+ printf(" ODP_CPU_ARCH_ARMV6\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV7)
+ printf(" ODP_CPU_ARCH_ARMV7\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV8_0)
+ printf(" ODP_CPU_ARCH_ARMV8_0\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV8_1)
+ printf(" ODP_CPU_ARCH_ARMV8_1\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV8_2)
+ printf(" ODP_CPU_ARCH_ARMV8_2\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV8_3)
+ printf(" ODP_CPU_ARCH_ARMV8_3\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV8_4)
+ printf(" ODP_CPU_ARCH_ARMV8_4\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV8_5)
+ printf(" ODP_CPU_ARCH_ARMV8_5\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV8_6)
+ printf(" ODP_CPU_ARCH_ARMV8_6\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV8_7)
+ printf(" ODP_CPU_ARCH_ARMV8_7\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV8_8)
+ printf(" ODP_CPU_ARCH_ARMV8_8\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV8_9)
+ printf(" ODP_CPU_ARCH_ARMV8_9\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV9_0)
+ printf(" ODP_CPU_ARCH_ARMV9_0\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV9_1)
+ printf(" ODP_CPU_ARCH_ARMV9_1\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV9_2)
+ printf(" ODP_CPU_ARCH_ARMV9_2\n");
+ else if (info.cpu_isa_sw.arm == ODP_CPU_ARCH_ARMV9_3)
+ printf(" ODP_CPU_ARCH_ARMV9_3\n");
+ else
+ CU_FAIL("Unknown CPU ISA SW ARCH found!");
+
+ if (info.cpu_isa_hw.arm != ODP_CPU_ARCH_ARM_UNKNOWN)
+ CU_ASSERT(info.cpu_isa_sw.arm <= info.cpu_isa_hw.arm);
+
+ if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV6)
+ printf(" ODP_CPU_ARCH_ARMV6\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV7)
+ printf(" ODP_CPU_ARCH_ARMV7\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV8_0)
+ printf(" ODP_CPU_ARCH_ARMV8_0\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV8_1)
+ printf(" ODP_CPU_ARCH_ARMV8_1\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV8_2)
+ printf(" ODP_CPU_ARCH_ARMV8_2\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV8_3)
+ printf(" ODP_CPU_ARCH_ARMV8_3\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV8_4)
+ printf(" ODP_CPU_ARCH_ARMV8_4\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV8_5)
+ printf(" ODP_CPU_ARCH_ARMV8_5\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV8_6)
+ printf(" ODP_CPU_ARCH_ARMV8_6\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV8_7)
+ printf(" ODP_CPU_ARCH_ARMV8_7\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV8_8)
+ printf(" ODP_CPU_ARCH_ARMV8_8\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV8_9)
+ printf(" ODP_CPU_ARCH_ARMV8_9\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV9_0)
+ printf(" ODP_CPU_ARCH_ARMV9_0\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV9_1)
+ printf(" ODP_CPU_ARCH_ARMV9_1\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV9_2)
+ printf(" ODP_CPU_ARCH_ARMV9_2\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARMV9_3)
+ printf(" ODP_CPU_ARCH_ARMV9_3\n");
+ else if (info.cpu_isa_hw.arm == ODP_CPU_ARCH_ARM_UNKNOWN)
+ printf(" ODP_CPU_ARCH_ARM_UNKNOWN\n");
+ else
+ CU_FAIL("Unknown CPU ISA HW ARCH found!");
+
+ }
+}
+
+static void system_test_meminfo(void)
+{
+ const int32_t max_num = 128;
+ odp_system_meminfo_t info, info_0;
+ int32_t ret, ret_0, num, i;
+ odp_system_memblock_t block[max_num];
+
+ /* Meminfo without blocks */
+ ret_0 = odp_system_meminfo(&info_0, NULL, 0);
+ CU_ASSERT_FATAL(ret_0 >= 0);
+
+ ret = odp_system_meminfo(&info, block, max_num);
+ CU_ASSERT_FATAL(ret >= 0);
+
+ /* Totals should match independent of per block output */
+ CU_ASSERT(ret == ret_0);
+ CU_ASSERT(info_0.total_mapped == info.total_mapped);
+ CU_ASSERT(info_0.total_used == info.total_used);
+ CU_ASSERT(info_0.total_overhead == info.total_overhead);
+
+ CU_ASSERT(info.total_mapped >= info.total_used);
+ CU_ASSERT(info.total_used >= info.total_overhead);
+
+ num = ret;
+ if (ret > max_num)
+ num = max_num;
+
+ printf("\n\n");
+ printf("System meminfo contain %i blocks, printing %i blocks:\n", ret, num);
+
+ printf(" %s %-32s %16s %14s %14s %12s\n", "index", "name", "addr",
+ "used", "overhead", "page_size");
+
+ for (i = 0; i < num; i++) {
+ printf(" [%3i] %-32s %16" PRIxPTR " %14" PRIu64 " %14" PRIu64 " %12" PRIu64 "\n",
+ i, block[i].name, block[i].addr, block[i].used, block[i].overhead,
+ block[i].page_size);
+ }
+
+ printf("\n");
+ printf("Total mapped: %" PRIu64 "\n", info.total_mapped);
+ printf("Total used: %" PRIu64 "\n", info.total_used);
+ printf("Total overhead: %" PRIu64 "\n\n", info.total_overhead);
+}
+
+odp_testinfo_t system_suite[] = {
+ ODP_TEST_INFO(test_version_api_str),
+ ODP_TEST_INFO(test_version_str),
+ ODP_TEST_INFO(test_version_macro),
+ ODP_TEST_INFO(system_test_odp_cpu_count),
+ ODP_TEST_INFO(system_test_odp_sys_cache_line_size),
+ ODP_TEST_INFO(system_test_odp_cpu_model_str),
+ ODP_TEST_INFO(system_test_odp_cpu_model_str_id),
+ ODP_TEST_INFO(system_test_odp_sys_page_size),
+ ODP_TEST_INFO(system_test_odp_sys_huge_page_size),
+ ODP_TEST_INFO(system_test_odp_sys_huge_page_size_all),
+ ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz,
+ system_check_odp_cpu_hz),
+ ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz_id,
+ system_check_odp_cpu_hz_id),
+ ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz_max,
+ system_check_odp_cpu_hz_max),
+ ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz_max_id,
+ system_check_odp_cpu_hz_max_id),
+ ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles,
+ system_check_cycle_counter),
+ ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles_max,
+ system_check_cycle_counter),
+ ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles_resolution,
+ system_check_cycle_counter),
+ ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles_diff,
+ system_check_cycle_counter),
+ ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles_long_period,
+ system_check_cycle_counter),
+ ODP_TEST_INFO(system_test_info),
+ ODP_TEST_INFO(system_test_meminfo),
+ ODP_TEST_INFO(system_test_info_print),
+ ODP_TEST_INFO(system_test_config_print),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t system_suites[] = {
+ {"System Info", NULL, NULL, system_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(system_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/thread/.gitignore b/test/validation/api/thread/.gitignore
index ab1787d97..ab1787d97 100644
--- a/test/common_plat/validation/api/thread/.gitignore
+++ b/test/validation/api/thread/.gitignore
diff --git a/test/validation/api/thread/Makefile.am b/test/validation/api/thread/Makefile.am
new file mode 100644
index 000000000..cbd8b2a7b
--- /dev/null
+++ b/test/validation/api/thread/Makefile.am
@@ -0,0 +1,6 @@
+include ../Makefile.inc
+
+test_PROGRAMS = thread_main
+thread_main_CPPFLAGS = $(AM_CPPFLAGS) -DTEST_THRMASK
+thread_main_SOURCES = thread.c
+LDADD += $(LIBTHRMASK_COMMON)
diff --git a/test/validation/api/thread/thread.c b/test/validation/api/thread/thread.c
new file mode 100644
index 000000000..ad9ffa745
--- /dev/null
+++ b/test/validation/api/thread/thread.c
@@ -0,0 +1,270 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include <odp_cunit_common.h>
+#include <mask_common.h>
+
+#define GLOBAL_SHM_NAME "GlobalThreadTest"
+
+typedef struct {
+ /* Test thread entry and exit synchronization barriers */
+ odp_barrier_t bar_entry;
+ odp_barrier_t bar_exit;
+
+ /* Storage for thread ID assignment order test */
+ int thread_id[ODP_THREAD_COUNT_MAX];
+} global_shared_mem_t;
+
+static global_shared_mem_t *global_mem;
+
+static int thread_global_init(odp_instance_t *inst)
+{
+ odp_shm_t global_shm;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("odph_options() failed\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
+ ODPH_ERR("odp_init_global() failed\n");
+ return -1;
+ }
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("odp_init_local() failed\n");
+ return -1;
+ }
+
+ global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
+ sizeof(global_shared_mem_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ if (global_shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Unable to reserve memory for global_shm\n");
+ return -1;
+ }
+
+ global_mem = odp_shm_addr(global_shm);
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
+ return 0;
+}
+
+static int thread_global_term(odp_instance_t inst)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ if (0 != odp_shm_free(shm)) {
+ ODPH_ERR("odp_shm_free() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_term_local()) {
+ ODPH_ERR("odp_term_local() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ ODPH_ERR("odp_term_global() failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void thread_test_odp_cpu_id(void)
+{
+ CU_ASSERT(odp_cpu_id() >= 0);
+}
+
+static void thread_test_odp_thread_id(void)
+{
+ int id = odp_thread_id();
+
+ /* First thread which called odp_init_local() */
+ CU_ASSERT(id == 0);
+
+ CU_ASSERT(id >= 0);
+ CU_ASSERT(id < odp_thread_count_max());
+ CU_ASSERT(id < ODP_THREAD_COUNT_MAX);
+}
+
+static void thread_test_odp_thread_count(void)
+{
+ int count = odp_thread_count();
+
+ /* One control thread running */
+ CU_ASSERT(count == 1);
+ CU_ASSERT(odp_thread_control_count() == 1);
+ CU_ASSERT(odp_thread_control_count() <= odp_thread_control_count_max());
+ CU_ASSERT(odp_thread_worker_count() == 0);
+
+ CU_ASSERT(count >= 1);
+ CU_ASSERT(count <= odp_thread_count_max());
+ CU_ASSERT(count <= ODP_THREAD_COUNT_MAX);
+}
+
+static void thread_test_odp_thread_count_max(void)
+{
+ int max_threads = odp_thread_count_max();
+ int max_control = odp_thread_control_count_max();
+ int max_worker = odp_thread_worker_count_max();
+
+ CU_ASSERT(max_threads > 0);
+ CU_ASSERT(max_threads <= ODP_THREAD_COUNT_MAX);
+
+ CU_ASSERT(max_control >= 0);
+ CU_ASSERT(max_control <= max_threads);
+
+ CU_ASSERT(max_worker >= 0);
+ CU_ASSERT(max_worker <= max_threads);
+}
+
+static int thread_func(void *arg)
+{
+ int *id_ptr = arg;
+
+ /* Indicate that thread has started */
+ odp_barrier_wait(&global_mem->bar_entry);
+
+ /* Record thread identifier for ID assignment order check */
+ *id_ptr = odp_thread_id();
+
+ CU_ASSERT(*id_ptr > 0);
+ CU_ASSERT(*id_ptr < odp_thread_count_max());
+
+ CU_ASSERT(odp_thread_type() == ODP_THREAD_WORKER);
+
+ /* Wait for indication that we can exit */
+ odp_barrier_wait(&global_mem->bar_exit);
+
+ return CU_get_number_of_failures();
+}
+
+static void thread_test_odp_thrmask_worker(void)
+{
+ odp_thrmask_t mask;
+ int ret;
+ int num = odp_cpumask_default_worker(NULL, 0);
+
+ CU_ASSERT_FATAL(num > 0);
+ CU_ASSERT_FATAL(odp_thread_type() == ODP_THREAD_CONTROL);
+
+ /* Control and worker threads may share CPUs */
+ if (num > 1)
+ num--;
+
+ void *args[num];
+
+ for (int i = 0; i < num; i++) {
+ global_mem->thread_id[i] = -1;
+ args[i] = &global_mem->thread_id[i];
+ }
+
+ odp_barrier_init(&global_mem->bar_entry, num + 1);
+ odp_barrier_init(&global_mem->bar_exit, num + 1);
+
+ /* should start out with 0 worker threads */
+ ret = odp_thrmask_worker(&mask);
+ CU_ASSERT(ret == odp_thrmask_count(&mask));
+ CU_ASSERT(ret == 0);
+
+ /* start the test thread(s) */
+ ret = odp_cunit_thread_create(num, thread_func, args, 1, 1);
+ CU_ASSERT(ret == num);
+
+ if (ret != num)
+ return;
+
+ /* wait for thread(s) to start */
+ odp_barrier_wait(&global_mem->bar_entry);
+
+ ret = odp_thrmask_worker(&mask);
+ CU_ASSERT(ret == odp_thrmask_count(&mask));
+ CU_ASSERT(ret == num);
+ CU_ASSERT(ret == odp_thread_worker_count());
+ CU_ASSERT(ret <= odp_thread_count_max());
+ CU_ASSERT(ret <= odp_thread_worker_count_max());
+
+ /* allow thread(s) to exit */
+ odp_barrier_wait(&global_mem->bar_exit);
+
+ /* Thread ID 0 is used by this control thread */
+ for (int i = 0; i < num; i++)
+ CU_ASSERT(global_mem->thread_id[i] == i + 1);
+
+ odp_cunit_thread_join(num);
+}
+
+static void thread_test_odp_thrmask_control(void)
+{
+ odp_thrmask_t mask;
+ int ret;
+
+ CU_ASSERT(odp_thread_type() == ODP_THREAD_CONTROL);
+
+ /* Should start out with 1 control thread */
+ ret = odp_thrmask_control(&mask);
+ CU_ASSERT(ret == odp_thrmask_count(&mask));
+ CU_ASSERT(ret == odp_thread_control_count());
+ CU_ASSERT(ret == 1);
+}
+
+odp_testinfo_t thread_suite[] = {
+ ODP_TEST_INFO(thread_test_odp_cpu_id),
+ ODP_TEST_INFO(thread_test_odp_thread_id),
+ ODP_TEST_INFO(thread_test_odp_thread_count),
+ ODP_TEST_INFO(thread_test_odp_thread_count_max),
+ ODP_TEST_INFO(thread_test_odp_thrmask_to_from_str),
+ ODP_TEST_INFO(thread_test_odp_thrmask_equal),
+ ODP_TEST_INFO(thread_test_odp_thrmask_zero),
+ ODP_TEST_INFO(thread_test_odp_thrmask_set),
+ ODP_TEST_INFO(thread_test_odp_thrmask_clr),
+ ODP_TEST_INFO(thread_test_odp_thrmask_isset),
+ ODP_TEST_INFO(thread_test_odp_thrmask_count),
+ ODP_TEST_INFO(thread_test_odp_thrmask_and),
+ ODP_TEST_INFO(thread_test_odp_thrmask_or),
+ ODP_TEST_INFO(thread_test_odp_thrmask_xor),
+ ODP_TEST_INFO(thread_test_odp_thrmask_copy),
+ ODP_TEST_INFO(thread_test_odp_thrmask_first),
+ ODP_TEST_INFO(thread_test_odp_thrmask_last),
+ ODP_TEST_INFO(thread_test_odp_thrmask_next),
+ ODP_TEST_INFO(thread_test_odp_thrmask_worker),
+ ODP_TEST_INFO(thread_test_odp_thrmask_control),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t thread_suites[] = {
+ {"thread", NULL, NULL, thread_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ odp_cunit_register_global_init(thread_global_init);
+ odp_cunit_register_global_term(thread_global_term);
+
+ ret = odp_cunit_register(thread_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/time/.gitignore b/test/validation/api/time/.gitignore
index 0ef3e6162..0ef3e6162 100644
--- a/test/common_plat/validation/api/time/.gitignore
+++ b/test/validation/api/time/.gitignore
diff --git a/test/validation/api/time/Makefile.am b/test/validation/api/time/Makefile.am
new file mode 100644
index 000000000..9b0392eb9
--- /dev/null
+++ b/test/validation/api/time/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = time_main
+time_main_SOURCES = time.c
diff --git a/test/validation/api/time/time.c b/test/validation/api/time/time.c
new file mode 100644
index 000000000..cfef7f619
--- /dev/null
+++ b/test/validation/api/time/time.c
@@ -0,0 +1,1031 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2019-2024, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <time.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include "odp_cunit_common.h"
+
+#define BUSY_LOOP_CNT 30000000 /* used for t > min resolution */
+#define MIN_TIME_RATE 32000
+#define MAX_TIME_RATE 15000000000
+#define DELAY_TOLERANCE 40000000 /* deviation for delay */
+#define WAIT_SECONDS 3
+#define MAX_WORKERS 32
+#define TIME_SAMPLES 2
+#define TIME_TOLERANCE_NS 1000000
+#define TIME_TOLERANCE_CI_NS 40000000
+#define TIME_TOLERANCE_1CPU_NS 40000000
+#define GLOBAL_SHM_NAME "GlobalTimeTest"
+#define YEAR_IN_NS (365 * 24 * ODP_TIME_HOUR_IN_NS)
+
+static uint64_t local_res;
+static uint64_t global_res;
+
+typedef odp_time_t time_cb(void);
+typedef uint64_t time_res_cb(void);
+typedef odp_time_t time_from_ns_cb(uint64_t ns);
+typedef uint64_t time_nsec_cb(void);
+
+typedef struct {
+ uint32_t num_threads;
+ odp_barrier_t test_barrier;
+ odp_time_t time[MAX_WORKERS + 1][TIME_SAMPLES];
+} global_shared_mem_t;
+
+static global_shared_mem_t *global_mem;
+static odp_instance_t *instance;
+
+static int time_global_init(odp_instance_t *inst)
+{
+ odp_shm_t global_shm;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+ uint32_t workers_count, max_threads;
+
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("odph_options() failed\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
+ ODPH_ERR("odp_init_global() failed\n");
+ return -1;
+ }
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("odp_init_local() failed\n");
+ return -1;
+ }
+
+ global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
+ sizeof(global_shared_mem_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ if (global_shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Unable reserve memory for global_shm\n");
+ return -1;
+ }
+
+ global_mem = odp_shm_addr(global_shm);
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
+ global_mem->num_threads = MAX_WORKERS;
+
+ workers_count = odp_cpumask_default_worker(NULL, 0);
+
+ max_threads = (workers_count >= MAX_WORKERS) ?
+ MAX_WORKERS : workers_count;
+
+ if (max_threads < global_mem->num_threads) {
+ printf("Requested num of threads is too large\n");
+ printf("reducing from %" PRIu32 " to %" PRIu32 "\n",
+ global_mem->num_threads,
+ max_threads);
+ global_mem->num_threads = max_threads;
+ }
+
+ printf("Num of threads used = %" PRIu32 "\n",
+ global_mem->num_threads);
+
+ instance = inst;
+
+ return 0;
+}
+
+static int time_global_term(odp_instance_t inst)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ if (0 != odp_shm_free(shm)) {
+ ODPH_ERR("odp_shm_free() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_term_local()) {
+ ODPH_ERR("odp_term_local() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ ODPH_ERR("odp_term_global() failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void time_test_constants(void)
+{
+ uint64_t ns;
+
+ CU_ASSERT(ODP_TIME_USEC_IN_NS == 1000);
+
+ ns = ODP_TIME_HOUR_IN_NS;
+ CU_ASSERT(ns == 60 * ODP_TIME_MIN_IN_NS);
+ ns = ODP_TIME_MIN_IN_NS;
+ CU_ASSERT(ns == 60 * ODP_TIME_SEC_IN_NS);
+ ns = ODP_TIME_SEC_IN_NS;
+ CU_ASSERT(ns == 1000 * ODP_TIME_MSEC_IN_NS);
+ ns = ODP_TIME_MSEC_IN_NS;
+ CU_ASSERT(ns == 1000 * ODP_TIME_USEC_IN_NS);
+
+ ns = ODP_TIME_SEC_IN_NS / 1000;
+ CU_ASSERT(ns == ODP_TIME_MSEC_IN_NS);
+ ns /= 1000;
+ CU_ASSERT(ns == ODP_TIME_USEC_IN_NS);
+}
+
+static void time_test_startup_time(void)
+{
+ odp_time_startup_t startup;
+ uint64_t ns1, ns2, ns3;
+ odp_time_t time;
+
+ memset(&startup, 0, sizeof(odp_time_startup_t));
+
+ odp_time_startup(&startup);
+ ns1 = startup.global_ns;
+ ns2 = odp_time_to_ns(startup.global);
+
+ CU_ASSERT(UINT64_MAX - ns1 >= 10 * YEAR_IN_NS);
+ CU_ASSERT(UINT64_MAX - ns2 >= 10 * YEAR_IN_NS);
+
+ time = odp_time_global();
+ ns3 = odp_time_to_ns(time);
+ CU_ASSERT(odp_time_cmp(time, startup.global) > 0);
+
+ time = odp_time_global_from_ns(10 * YEAR_IN_NS);
+ time = odp_time_sum(startup.global, time);
+ CU_ASSERT(odp_time_cmp(time, startup.global) > 0);
+
+ printf("\n");
+ printf(" Startup time in nsec: %" PRIu64 "\n", ns1);
+ printf(" Startup time to nsec: %" PRIu64 "\n", ns2);
+ printf(" Nsec since startup: %" PRIu64 "\n\n", ns3 - startup.global_ns);
+}
+
+static void time_test_res(time_res_cb time_res, uint64_t *res)
+{
+ uint64_t rate;
+
+ rate = time_res();
+ CU_ASSERT(rate > MIN_TIME_RATE);
+ CU_ASSERT(rate < MAX_TIME_RATE);
+
+ *res = ODP_TIME_SEC_IN_NS / rate;
+ if (ODP_TIME_SEC_IN_NS % rate)
+ (*res)++;
+}
+
+static void time_test_local_res(void)
+{
+ time_test_res(odp_time_local_res, &local_res);
+}
+
+static void time_test_global_res(void)
+{
+ time_test_res(odp_time_global_res, &global_res);
+}
+
+/* check that related conversions come back to the same value */
+static void time_test_conversion(time_from_ns_cb time_from_ns, uint64_t res)
+{
+ uint64_t ns1, ns2;
+ odp_time_t time;
+ uint64_t upper_limit, lower_limit;
+
+ ns1 = 100;
+ time = time_from_ns(ns1);
+
+ ns2 = odp_time_to_ns(time);
+
+ /* need to check within arithmetic tolerance that the same
+ * value in ns is returned after conversions */
+ upper_limit = ns1 + res;
+ lower_limit = ns1 - res;
+ CU_ASSERT((ns2 <= upper_limit) && (ns2 >= lower_limit));
+
+ ns1 = 60 * 11 * ODP_TIME_SEC_IN_NS;
+ time = time_from_ns(ns1);
+
+ ns2 = odp_time_to_ns(time);
+
+ /* need to check within arithmetic tolerance that the same
+ * value in ns is returned after conversions */
+ upper_limit = ns1 + res;
+ lower_limit = ns1 - res;
+ CU_ASSERT((ns2 <= upper_limit) && (ns2 >= lower_limit));
+
+ /* test on 0 */
+ ns1 = odp_time_to_ns(ODP_TIME_NULL);
+ CU_ASSERT(ns1 == 0);
+}
+
+static void time_test_local_conversion(void)
+{
+ time_test_conversion(odp_time_local_from_ns, local_res);
+}
+
+static void time_test_global_conversion(void)
+{
+ time_test_conversion(odp_time_global_from_ns, global_res);
+}
+
+static void time_test_monotony(void)
+{
+ volatile uint64_t count = 0;
+ odp_time_t l_t1, l_t2, l_t3;
+ odp_time_t ls_t1, ls_t2, ls_t3;
+ odp_time_t g_t1, g_t2, g_t3;
+ odp_time_t gs_t1, gs_t2, gs_t3;
+ uint64_t l_ns1, l_ns2, l_ns3;
+ uint64_t ls_ns1, ls_ns2, ls_ns3;
+ uint64_t g_ns1, g_ns2, g_ns3;
+ uint64_t gs_ns1, gs_ns2, gs_ns3;
+ uint64_t ns1, ns2, ns3;
+ uint64_t s_ns1, s_ns2, s_ns3;
+ uint64_t limit;
+
+ l_t1 = odp_time_local();
+ ls_t1 = odp_time_local_strict();
+ l_ns1 = odp_time_local_ns();
+ ls_ns1 = odp_time_local_strict_ns();
+
+ g_t1 = odp_time_global();
+ gs_t1 = odp_time_global_strict();
+ g_ns1 = odp_time_global_ns();
+ gs_ns1 = odp_time_global_strict_ns();
+
+ while (count < BUSY_LOOP_CNT) {
+ count++;
+ };
+
+ l_t2 = odp_time_local();
+ ls_t2 = odp_time_local_strict();
+ l_ns2 = odp_time_local_ns();
+ ls_ns2 = odp_time_local_strict_ns();
+
+ g_t2 = odp_time_global();
+ gs_t2 = odp_time_global_strict();
+ g_ns2 = odp_time_global_ns();
+ gs_ns2 = odp_time_global_strict_ns();
+
+ count = 0;
+ while (count < BUSY_LOOP_CNT) {
+ count++;
+ };
+
+ l_t3 = odp_time_local();
+ ls_t3 = odp_time_local_strict();
+ l_ns3 = odp_time_local_ns();
+ ls_ns3 = odp_time_local_strict_ns();
+
+ g_t3 = odp_time_global();
+ gs_t3 = odp_time_global_strict();
+ g_ns3 = odp_time_global_ns();
+ gs_ns3 = odp_time_global_strict_ns();
+
+ /* Local time tests
+ * ---------------- */
+
+ ns1 = odp_time_to_ns(l_t1);
+ ns2 = odp_time_to_ns(l_t2);
+ ns3 = odp_time_to_ns(l_t3);
+
+ s_ns1 = odp_time_to_ns(ls_t1);
+ s_ns2 = odp_time_to_ns(ls_t2);
+ s_ns3 = odp_time_to_ns(ls_t3);
+
+ /* Time should not wrap in at least 10 years from ODP start. Ignoring delay from start up
+ * and other test cases, which should be few seconds. */
+ limit = 10 * YEAR_IN_NS;
+ CU_ASSERT(UINT64_MAX - ns1 > limit);
+ CU_ASSERT(UINT64_MAX - s_ns1 > limit);
+ CU_ASSERT(UINT64_MAX - l_ns1 > limit);
+ CU_ASSERT(UINT64_MAX - ls_ns1 > limit);
+
+ /* Time stamp */
+ CU_ASSERT(ns2 > ns1);
+ CU_ASSERT(ns3 > ns2);
+
+ /* Strict time stamp */
+ CU_ASSERT(s_ns2 > s_ns1);
+ CU_ASSERT(s_ns3 > s_ns2);
+
+ /* Nsec time */
+ CU_ASSERT(l_ns2 > l_ns1);
+ CU_ASSERT(l_ns3 > l_ns2);
+
+ /* Strict nsec time */
+ CU_ASSERT(ls_ns2 > ls_ns1);
+ CU_ASSERT(ls_ns3 > ls_ns2);
+
+ /* Strict time stamp order is maintained */
+ CU_ASSERT(ls_ns1 >= s_ns1);
+ CU_ASSERT(ls_ns2 >= s_ns2);
+ CU_ASSERT(ls_ns3 >= s_ns3);
+
+ /* Time in nanoseconds have the same time base. Allow less than 100 msec error
+ * between time stamp converted to nsec and nsec time. */
+ CU_ASSERT((ls_ns1 - s_ns1) < (100 * ODP_TIME_MSEC_IN_NS));
+ CU_ASSERT((ls_ns2 - s_ns2) < (100 * ODP_TIME_MSEC_IN_NS));
+ CU_ASSERT((ls_ns3 - s_ns3) < (100 * ODP_TIME_MSEC_IN_NS));
+
+ /* Global time tests
+ * ----------------- */
+
+ ns1 = odp_time_to_ns(g_t1);
+ ns2 = odp_time_to_ns(g_t2);
+ ns3 = odp_time_to_ns(g_t3);
+
+ s_ns1 = odp_time_to_ns(gs_t1);
+ s_ns2 = odp_time_to_ns(gs_t2);
+ s_ns3 = odp_time_to_ns(gs_t3);
+
+ /* Time should not wrap in at least 10 years from ODP start. Ignoring delay from start up
+ * and other test cases, which should be few seconds. */
+ limit = 10 * YEAR_IN_NS;
+ CU_ASSERT(UINT64_MAX - ns1 > limit);
+ CU_ASSERT(UINT64_MAX - s_ns1 > limit);
+ CU_ASSERT(UINT64_MAX - g_ns1 > limit);
+ CU_ASSERT(UINT64_MAX - gs_ns1 > limit);
+
+ /* Time stamp */
+ CU_ASSERT(ns2 > ns1);
+ CU_ASSERT(ns3 > ns2);
+
+ /* Strict time stamp */
+ CU_ASSERT(s_ns2 > s_ns1);
+ CU_ASSERT(s_ns3 > s_ns2);
+
+ /* Nsec time */
+ CU_ASSERT(g_ns2 > g_ns1);
+ CU_ASSERT(g_ns3 > g_ns2);
+
+ /* Strict nsec time */
+ CU_ASSERT(gs_ns2 > gs_ns1);
+ CU_ASSERT(gs_ns3 > gs_ns2);
+
+ /* Strict time stamp order is maintained */
+ CU_ASSERT(gs_ns1 >= s_ns1);
+ CU_ASSERT(gs_ns2 >= s_ns2);
+ CU_ASSERT(gs_ns3 >= s_ns3);
+
+ /* Time in nanoseconds have the same time base. Allow less than 100 msec error
+ * between time stamp converted to nsec and nsec time. */
+ CU_ASSERT((gs_ns1 - s_ns1) < (100 * ODP_TIME_MSEC_IN_NS));
+ CU_ASSERT((gs_ns2 - s_ns2) < (100 * ODP_TIME_MSEC_IN_NS));
+ CU_ASSERT((gs_ns3 - s_ns3) < (100 * ODP_TIME_MSEC_IN_NS));
+
+ /* Tight error margin cannot be used due to possible OS interrupts during the test.
+ * Record all time stamp values into the log to help debugging their relative order and
+ * accuracy. */
+ printf("\n Time stamp values in nsec:\n");
+ printf(" odp_time_local(): %" PRIu64 "\n", odp_time_to_ns(l_t1));
+ printf(" odp_time_local_strict(): %" PRIu64 "\n", odp_time_to_ns(ls_t1));
+ printf(" odp_time_local_ns(): %" PRIu64 "\n", l_ns1);
+ printf(" odp_time_local_strict_ns(): %" PRIu64 "\n", ls_ns1);
+ printf(" odp_time_global(): %" PRIu64 "\n", odp_time_to_ns(g_t1));
+ printf(" odp_time_global_strict(): %" PRIu64 "\n", odp_time_to_ns(gs_t1));
+ printf(" odp_time_global_ns(): %" PRIu64 "\n", g_ns1);
+ printf(" odp_time_global_strict_ns(): %" PRIu64 "\n\n", gs_ns1);
+}
+
+static void time_test_cmp(time_cb time_cur, time_from_ns_cb time_from_ns)
+{
+ /* volatile to stop optimization of busy loop */
+ volatile int count = 0;
+ odp_time_t t1, t2, t3;
+
+ t1 = time_cur();
+
+ while (count < BUSY_LOOP_CNT) {
+ count++;
+ };
+
+ t2 = time_cur();
+
+ while (count < BUSY_LOOP_CNT * 2) {
+ count++;
+ };
+
+ t3 = time_cur();
+
+ CU_ASSERT(odp_time_cmp(t2, t1) > 0);
+ CU_ASSERT(odp_time_cmp(t3, t2) > 0);
+ CU_ASSERT(odp_time_cmp(t3, t1) > 0);
+ CU_ASSERT(odp_time_cmp(t1, t2) < 0);
+ CU_ASSERT(odp_time_cmp(t2, t3) < 0);
+ CU_ASSERT(odp_time_cmp(t1, t3) < 0);
+ CU_ASSERT(odp_time_cmp(t1, t1) == 0);
+ CU_ASSERT(odp_time_cmp(t2, t2) == 0);
+ CU_ASSERT(odp_time_cmp(t3, t3) == 0);
+
+ t2 = time_from_ns(60 * 10 * ODP_TIME_SEC_IN_NS);
+ t1 = time_from_ns(3);
+
+ CU_ASSERT(odp_time_cmp(t2, t1) > 0);
+ CU_ASSERT(odp_time_cmp(t1, t2) < 0);
+
+ t1 = time_from_ns(0);
+ CU_ASSERT(odp_time_cmp(t1, ODP_TIME_NULL) == 0);
+}
+
+static void time_test_local_cmp(void)
+{
+ time_test_cmp(odp_time_local, odp_time_local_from_ns);
+}
+
+static void time_test_global_cmp(void)
+{
+ time_test_cmp(odp_time_global, odp_time_global_from_ns);
+}
+
+static void time_test_local_strict_cmp(void)
+{
+ time_test_cmp(odp_time_local_strict, odp_time_local_from_ns);
+}
+
+static void time_test_global_strict_cmp(void)
+{
+ time_test_cmp(odp_time_global_strict, odp_time_global_from_ns);
+}
+
+/* check that a time difference gives a reasonable result */
+static void time_test_diff(time_cb time_cur,
+ time_from_ns_cb time_from_ns,
+ uint64_t res)
+{
+ /* volatile to stop optimization of busy loop */
+ volatile int count = 0;
+ odp_time_t diff, t1, t2;
+ uint64_t ns1, ns2, ns;
+ uint64_t nsdiff, diff_ns;
+ uint64_t upper_limit, lower_limit;
+
+ /* test timestamp diff */
+ t1 = time_cur();
+
+ while (count < BUSY_LOOP_CNT) {
+ count++;
+ };
+
+ t2 = time_cur();
+ CU_ASSERT(odp_time_cmp(t2, t1) > 0);
+
+ diff = odp_time_diff(t2, t1);
+ CU_ASSERT(odp_time_cmp(diff, ODP_TIME_NULL) > 0);
+
+ diff_ns = odp_time_diff_ns(t2, t1);
+ CU_ASSERT(diff_ns > 0);
+
+ ns1 = odp_time_to_ns(t1);
+ ns2 = odp_time_to_ns(t2);
+ ns = ns2 - ns1;
+ nsdiff = odp_time_to_ns(diff);
+
+ upper_limit = ns + 2 * res;
+ lower_limit = ns - 2 * res;
+ CU_ASSERT((nsdiff <= upper_limit) && (nsdiff >= lower_limit));
+ CU_ASSERT((diff_ns <= upper_limit) && (diff_ns >= lower_limit));
+
+ /* test timestamp and interval diff */
+ ns1 = 54;
+ t1 = time_from_ns(ns1);
+ ns = ns2 - ns1;
+
+ diff = odp_time_diff(t2, t1);
+ CU_ASSERT(odp_time_cmp(diff, ODP_TIME_NULL) > 0);
+
+ diff_ns = odp_time_diff_ns(t2, t1);
+ CU_ASSERT(diff_ns > 0);
+
+ nsdiff = odp_time_to_ns(diff);
+
+ upper_limit = ns + 2 * res;
+ lower_limit = ns - 2 * res;
+ CU_ASSERT((nsdiff <= upper_limit) && (nsdiff >= lower_limit));
+ CU_ASSERT((diff_ns <= upper_limit) && (diff_ns >= lower_limit));
+
+ /* test interval diff */
+ ns2 = 60 * 10 * ODP_TIME_SEC_IN_NS;
+ ns = ns2 - ns1;
+
+ t2 = time_from_ns(ns2);
+ diff = odp_time_diff(t2, t1);
+ CU_ASSERT(odp_time_cmp(diff, ODP_TIME_NULL) > 0);
+
+ diff_ns = odp_time_diff_ns(t2, t1);
+ CU_ASSERT(diff_ns > 0);
+
+ nsdiff = odp_time_to_ns(diff);
+
+ upper_limit = ns + 2 * res;
+ lower_limit = ns - 2 * res;
+ CU_ASSERT((nsdiff <= upper_limit) && (nsdiff >= lower_limit));
+ CU_ASSERT((diff_ns <= upper_limit) && (diff_ns >= lower_limit));
+
+ /* same time has to diff to 0 */
+ diff = odp_time_diff(t2, t2);
+ CU_ASSERT(odp_time_cmp(diff, ODP_TIME_NULL) == 0);
+
+ diff = odp_time_diff(t2, ODP_TIME_NULL);
+ CU_ASSERT(odp_time_cmp(t2, diff) == 0);
+
+ diff_ns = odp_time_diff_ns(t2, t2);
+ CU_ASSERT(diff_ns == 0);
+}
+
+static void time_test_local_diff(void)
+{
+ time_test_diff(odp_time_local, odp_time_local_from_ns, local_res);
+}
+
+static void time_test_global_diff(void)
+{
+ time_test_diff(odp_time_global, odp_time_global_from_ns, global_res);
+}
+
+static void time_test_local_strict_diff(void)
+{
+ time_test_diff(odp_time_local_strict, odp_time_local_from_ns, local_res);
+}
+
+static void time_test_global_strict_diff(void)
+{
+ time_test_diff(odp_time_global_strict, odp_time_global_from_ns, global_res);
+}
+
+/* check that a time sum gives a reasonable result */
+static void time_test_sum(time_cb time_cur,
+ time_from_ns_cb time_from_ns,
+ uint64_t res)
+{
+ odp_time_t sum, t1, t2;
+ uint64_t nssum, ns1, ns2, ns, diff;
+ uint64_t upper_limit, lower_limit;
+
+ /* sum timestamp and interval */
+ t1 = time_cur();
+ ns2 = 103;
+ t2 = time_from_ns(ns2);
+ ns1 = odp_time_to_ns(t1);
+ ns = ns1 + ns2;
+
+ sum = odp_time_sum(t2, t1);
+ CU_ASSERT(odp_time_cmp(sum, ODP_TIME_NULL) > 0);
+ nssum = odp_time_to_ns(sum);
+
+ upper_limit = ns + 2 * res;
+ lower_limit = ns - 2 * res;
+ CU_ASSERT((nssum <= upper_limit) && (nssum >= lower_limit));
+
+ /* sum intervals */
+ ns1 = 60 * 13 * ODP_TIME_SEC_IN_NS;
+ t1 = time_from_ns(ns1);
+ ns = ns1 + ns2;
+
+ sum = odp_time_sum(t2, t1);
+ CU_ASSERT(odp_time_cmp(sum, ODP_TIME_NULL) > 0);
+ nssum = odp_time_to_ns(sum);
+
+ upper_limit = ns + 2 * res;
+ lower_limit = ns - 2 * res;
+ CU_ASSERT((nssum <= upper_limit) && (nssum >= lower_limit));
+
+ /* test on 0 */
+ sum = odp_time_sum(t2, ODP_TIME_NULL);
+ CU_ASSERT(odp_time_cmp(t2, sum) == 0);
+
+ /* test add nsec */
+ ns = ODP_TIME_SEC_IN_NS;
+ upper_limit = ns + 2 * res;
+ lower_limit = ns - 2 * res;
+
+ t1 = time_cur();
+ t2 = odp_time_add_ns(t1, ns);
+
+ CU_ASSERT(odp_time_cmp(t2, t1) > 0);
+
+ diff = odp_time_diff_ns(t2, t1);
+ CU_ASSERT((diff <= upper_limit) && (diff >= lower_limit));
+
+ t1 = ODP_TIME_NULL;
+ t2 = odp_time_add_ns(t1, ns);
+
+ CU_ASSERT(odp_time_cmp(t2, t1) > 0);
+
+ diff = odp_time_diff_ns(t2, t1);
+ CU_ASSERT((diff <= upper_limit) && (diff >= lower_limit));
+}
+
+static void time_test_local_sum(void)
+{
+ time_test_sum(odp_time_local, odp_time_local_from_ns, local_res);
+}
+
+static void time_test_global_sum(void)
+{
+ time_test_sum(odp_time_global, odp_time_global_from_ns, global_res);
+}
+
+static void time_test_local_strict_sum(void)
+{
+ time_test_sum(odp_time_local_strict, odp_time_local_from_ns, local_res);
+}
+
+static void time_test_global_strict_sum(void)
+{
+ time_test_sum(odp_time_global_strict, odp_time_global_from_ns, global_res);
+}
+
+static void time_test_wait_until(time_cb time_cur, time_from_ns_cb time_from_ns)
+{
+ int i;
+ odp_time_t lower_limit, upper_limit;
+ odp_time_t start_time, end_time, wait;
+ odp_time_t second = time_from_ns(ODP_TIME_SEC_IN_NS);
+
+ start_time = time_cur();
+ wait = start_time;
+ for (i = 0; i < WAIT_SECONDS; i++) {
+ wait = odp_time_sum(wait, second);
+ odp_time_wait_until(wait);
+ }
+ end_time = time_cur();
+
+ wait = odp_time_diff(end_time, start_time);
+ lower_limit = time_from_ns(WAIT_SECONDS * ODP_TIME_SEC_IN_NS -
+ DELAY_TOLERANCE);
+ upper_limit = time_from_ns(WAIT_SECONDS * ODP_TIME_SEC_IN_NS +
+ DELAY_TOLERANCE);
+
+ if (odp_time_cmp(wait, lower_limit) < 0) {
+ ODPH_ERR("Exceed lower limit: wait is %" PRIu64 ", lower_limit %" PRIu64 "\n",
+ odp_time_to_ns(wait), odp_time_to_ns(lower_limit));
+ CU_FAIL("Exceed lower limit\n");
+ }
+
+ if (odp_time_cmp(wait, upper_limit) > 0) {
+ ODPH_ERR("Exceed upper limit: wait is %" PRIu64 ", upper_limit %" PRIu64 "\n",
+ odp_time_to_ns(wait), odp_time_to_ns(lower_limit));
+ CU_FAIL("Exceed upper limit\n");
+ }
+}
+
+static void time_test_local_wait_until(void)
+{
+ time_test_wait_until(odp_time_local, odp_time_local_from_ns);
+}
+
+static void time_test_global_wait_until(void)
+{
+ time_test_wait_until(odp_time_global, odp_time_global_from_ns);
+}
+
+static void time_test_wait_ns(void)
+{
+ int i;
+ odp_time_t lower_limit, upper_limit;
+ odp_time_t start_time, end_time, diff;
+
+ start_time = odp_time_local();
+ for (i = 0; i < WAIT_SECONDS; i++)
+ odp_time_wait_ns(ODP_TIME_SEC_IN_NS);
+ end_time = odp_time_local();
+
+ diff = odp_time_diff(end_time, start_time);
+
+ lower_limit = odp_time_local_from_ns(WAIT_SECONDS * ODP_TIME_SEC_IN_NS -
+ DELAY_TOLERANCE);
+ upper_limit = odp_time_local_from_ns(WAIT_SECONDS * ODP_TIME_SEC_IN_NS +
+ DELAY_TOLERANCE);
+
+ if (odp_time_cmp(diff, lower_limit) < 0) {
+ ODPH_ERR("Exceed lower limit: diff is %" PRIu64 ", lower_limit %" PRIu64 "\n",
+ odp_time_to_ns(diff), odp_time_to_ns(lower_limit));
+ CU_FAIL("Exceed lower limit\n");
+ }
+
+ if (odp_time_cmp(diff, upper_limit) > 0) {
+ ODPH_ERR("Exceed upper limit: diff is %" PRIu64 ", upper_limit %" PRIu64 "\n",
+ odp_time_to_ns(diff), odp_time_to_ns(upper_limit));
+ CU_FAIL("Exceed upper limit\n");
+ }
+}
+
+/* Check that ODP time is within +-5% of system time */
+static void check_time_diff(double t_odp, double t_system,
+ const char *test, int id)
+{
+ if (t_odp > t_system * 1.05) {
+ CU_FAIL("ODP time too high");
+ ODPH_ERR("ODP time too high (%s/%d): t_odp: %f, t_system: %f\n",
+ test, id, t_odp, t_system);
+ }
+ if (t_odp < t_system * 0.95) {
+ CU_FAIL("ODP time too low");
+ ODPH_ERR("ODP time too low (%s/%d): t_odp: %f, t_system: %f\n",
+ test, id, t_odp, t_system);
+ }
+}
+
+static void time_test_accuracy(time_cb time_cur,
+ time_cb time_cur_strict, time_from_ns_cb time_from_ns)
+{
+ int i;
+ odp_time_t t1[2], t2[2], wait;
+ struct timespec ts1, ts2, tsdiff;
+ double sec_c;
+ odp_time_t sec = time_from_ns(ODP_TIME_SEC_IN_NS);
+
+ i = clock_gettime(CLOCK_MONOTONIC, &ts1);
+ CU_ASSERT(i == 0);
+ t1[0] = time_cur_strict();
+ t1[1] = time_cur();
+
+ wait = odp_time_sum(t1[0], sec);
+ for (i = 0; i < 5; i++) {
+ odp_time_wait_until(wait);
+ wait = odp_time_add_ns(wait, ODP_TIME_SEC_IN_NS);
+ }
+
+ i = clock_gettime(CLOCK_MONOTONIC, &ts2);
+ CU_ASSERT(i == 0);
+ t2[0] = time_cur_strict();
+ t2[1] = time_cur();
+
+ if (ts2.tv_nsec < ts1.tv_nsec) {
+ tsdiff.tv_nsec = 1000000000L + ts2.tv_nsec - ts1.tv_nsec;
+ tsdiff.tv_sec = ts2.tv_sec - 1 - ts1.tv_sec;
+ } else {
+ tsdiff.tv_nsec = ts2.tv_nsec - ts1.tv_nsec;
+ tsdiff.tv_sec = ts2.tv_sec - ts1.tv_sec;
+ }
+ sec_c = ((double)(tsdiff.tv_nsec) / 1000000000L) + tsdiff.tv_sec;
+
+ for (i = 0; i < 2; i++) {
+ odp_time_t diff = odp_time_diff(t2[i], t1[i]);
+ double sec_t = ((double)odp_time_to_ns(diff)) / ODP_TIME_SEC_IN_NS;
+
+ check_time_diff(sec_t, sec_c, __func__, i);
+ }
+}
+
+static void time_test_local_accuracy(void)
+{
+ time_test_accuracy(odp_time_local, odp_time_local_strict, odp_time_local_from_ns);
+}
+
+static void time_test_global_accuracy(void)
+{
+ time_test_accuracy(odp_time_global, odp_time_global_strict, odp_time_global_from_ns);
+}
+
+static void time_test_accuracy_nsec(void)
+{
+ uint64_t t1[4], t2[4];
+ struct timespec ts1, ts2, tsdiff;
+ double sec_c;
+ int i, ret;
+
+ ret = clock_gettime(CLOCK_MONOTONIC, &ts1);
+ CU_ASSERT(ret == 0);
+ t1[0] = odp_time_global_strict_ns();
+ t1[1] = odp_time_local_strict_ns();
+ t1[2] = odp_time_global_ns();
+ t1[3] = odp_time_local_ns();
+
+ for (i = 0; i < 5; i++)
+ odp_time_wait_ns(ODP_TIME_SEC_IN_NS);
+
+ ret = clock_gettime(CLOCK_MONOTONIC, &ts2);
+ CU_ASSERT(ret == 0);
+ t2[0] = odp_time_global_strict_ns();
+ t2[1] = odp_time_local_strict_ns();
+ t2[2] = odp_time_global_ns();
+ t2[3] = odp_time_local_ns();
+
+ if (ts2.tv_nsec < ts1.tv_nsec) {
+ tsdiff.tv_nsec = 1000000000L + ts2.tv_nsec - ts1.tv_nsec;
+ tsdiff.tv_sec = ts2.tv_sec - 1 - ts1.tv_sec;
+ } else {
+ tsdiff.tv_nsec = ts2.tv_nsec - ts1.tv_nsec;
+ tsdiff.tv_sec = ts2.tv_sec - ts1.tv_sec;
+ }
+ sec_c = ((double)(tsdiff.tv_nsec) / 1000000000L) + tsdiff.tv_sec;
+
+ for (i = 0; i < 4; i++) {
+ uint64_t diff = t2[i] - t1[i];
+ double sec_t = ((double)diff) / ODP_TIME_SEC_IN_NS;
+
+ check_time_diff(sec_t, sec_c, __func__, i);
+ }
+}
+
+static int time_test_global_sync_thr(void *arg ODP_UNUSED)
+{
+ int tid = odp_thread_id();
+ odp_shm_t global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ global_shared_mem_t *global_mem = odp_shm_addr(global_shm);
+
+ if (!global_mem)
+ return 1;
+
+ odp_barrier_wait(&global_mem->test_barrier);
+ global_mem->time[tid][0] = odp_time_global();
+ odp_time_wait_ns(ODP_TIME_MSEC_IN_NS * 100);
+ odp_barrier_wait(&global_mem->test_barrier);
+ global_mem->time[tid][1] = odp_time_global();
+
+ return 0;
+}
+
+static void time_test_global_sync(const int ctrl)
+{
+ odp_cpumask_t cpumask;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ uint64_t tolerance = odp_cunit_ci() ? TIME_TOLERANCE_CI_NS : TIME_TOLERANCE_NS;
+ const int num = ctrl ? 2 : global_mem->num_threads;
+
+ if (num < 2) {
+ printf(" number of threads is less than two, test skipped. ");
+ return;
+ }
+
+ odp_barrier_init(&global_mem->test_barrier, num);
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = time_test_global_sync_thr;
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = *instance;
+
+ int thr = 0;
+
+ if (ctrl) {
+ /* Test sync between one control and one worker thread. */
+ int control_cpu;
+ int worker_cpu;
+
+ odp_cpumask_default_control(&cpumask, 1);
+ thr_common.cpumask = &cpumask;
+ thr_param.thr_type = ODP_THREAD_CONTROL;
+ control_cpu = odp_cpumask_first(&cpumask);
+
+ int r = odph_thread_create(&thread_tbl[thr++],
+ &thr_common, &thr_param, 1);
+ CU_ASSERT_FATAL(r == 1);
+ odp_cpumask_default_worker(&cpumask, 1);
+ worker_cpu = odp_cpumask_first(&cpumask);
+ if (control_cpu == worker_cpu) {
+ printf(" single CPU, relaxing tolerance. ");
+ tolerance = TIME_TOLERANCE_1CPU_NS;
+ }
+ } else {
+ /* Test sync between num worker threads. */
+ odp_cpumask_default_worker(&cpumask, num);
+ }
+
+ int cpu = odp_cpumask_first(&cpumask);
+
+ while (cpu >= 0) {
+ odp_cpumask_t cpumask_one;
+
+ /*
+ * Delay for more than the tolerance, so that we notice if the
+ * thread's view of global time is affected.
+ */
+ odp_time_wait_ns(tolerance * 2);
+
+ odp_cpumask_zero(&cpumask_one);
+ odp_cpumask_set(&cpumask_one, cpu);
+ thr_common.cpumask = &cpumask_one;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ int r = odph_thread_create(&thread_tbl[thr++],
+ &thr_common, &thr_param, 1);
+ CU_ASSERT_FATAL(r == 1);
+
+ cpu = odp_cpumask_next(&cpumask, cpu);
+ }
+
+ CU_ASSERT(odph_thread_join(thread_tbl, num) == num);
+
+ for (int s = 0; s < TIME_SAMPLES; s++) {
+ int min_idx = 0, max_idx = 0;
+ uint64_t min = UINT64_MAX, max = 0;
+ double avg = 0;
+
+ for (int i = 1; i < num + 1; i++) {
+ uint64_t t = odp_time_to_ns(global_mem->time[i][s]);
+
+ if (t < min) {
+ min = t;
+ min_idx = i;
+ }
+ }
+
+ printf("\nround %d\nthread time diffs: ", s);
+
+ for (int i = 1; i < num + 1; i++) {
+ uint64_t t = odp_time_to_ns(global_mem->time[i][s]) - min;
+
+ printf("%" PRIu64 " ", t);
+
+ if (t > max) {
+ max = t;
+ max_idx = i;
+ }
+
+ avg += t;
+ }
+
+ /* The min result itself is not included in the average. */
+ avg /= num - 1;
+ printf("\nmin: %" PRIu64 " (tid %d) max diff: %" PRIu64
+ " (tid %d) avg diff: %g", min, min_idx, max, max_idx, avg);
+ CU_ASSERT(max < tolerance);
+ }
+
+ printf("\n");
+}
+
+static void time_test_global_sync_workers(void)
+{
+ time_test_global_sync(0);
+}
+
+static void time_test_global_sync_control(void)
+{
+ time_test_global_sync(1);
+}
+
+odp_testinfo_t time_suite_time[] = {
+ ODP_TEST_INFO(time_test_constants),
+ ODP_TEST_INFO(time_test_startup_time),
+ ODP_TEST_INFO(time_test_local_res),
+ ODP_TEST_INFO(time_test_local_conversion),
+ ODP_TEST_INFO(time_test_local_cmp),
+ ODP_TEST_INFO(time_test_local_diff),
+ ODP_TEST_INFO(time_test_local_sum),
+ ODP_TEST_INFO(time_test_global_res),
+ ODP_TEST_INFO(time_test_global_conversion),
+ ODP_TEST_INFO(time_test_global_cmp),
+ ODP_TEST_INFO(time_test_global_diff),
+ ODP_TEST_INFO(time_test_global_sum),
+ ODP_TEST_INFO(time_test_wait_ns),
+ ODP_TEST_INFO(time_test_monotony),
+ ODP_TEST_INFO(time_test_local_wait_until),
+ ODP_TEST_INFO(time_test_global_wait_until),
+ ODP_TEST_INFO(time_test_local_accuracy),
+ ODP_TEST_INFO(time_test_global_accuracy),
+ ODP_TEST_INFO(time_test_accuracy_nsec),
+ ODP_TEST_INFO(time_test_local_strict_diff),
+ ODP_TEST_INFO(time_test_local_strict_sum),
+ ODP_TEST_INFO(time_test_local_strict_cmp),
+ ODP_TEST_INFO(time_test_global_strict_diff),
+ ODP_TEST_INFO(time_test_global_strict_sum),
+ ODP_TEST_INFO(time_test_global_strict_cmp),
+ ODP_TEST_INFO(time_test_global_sync_workers),
+ ODP_TEST_INFO(time_test_global_sync_control),
+ ODP_TEST_INFO_NULL
+};
+
+odp_suiteinfo_t time_suites[] = {
+ {"Time", NULL, NULL, time_suite_time},
+ ODP_SUITE_INFO_NULL
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ odp_cunit_register_global_init(time_global_init);
+ odp_cunit_register_global_term(time_global_term);
+
+ ret = odp_cunit_register(time_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/timer/.gitignore b/test/validation/api/timer/.gitignore
index 74e8fa992..74e8fa992 100644
--- a/test/common_plat/validation/api/timer/.gitignore
+++ b/test/validation/api/timer/.gitignore
diff --git a/test/validation/api/timer/Makefile.am b/test/validation/api/timer/Makefile.am
new file mode 100644
index 000000000..bc33e731f
--- /dev/null
+++ b/test/validation/api/timer/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = timer_main
+timer_main_SOURCES = timer.c
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
new file mode 100644
index 000000000..3678d0cb2
--- /dev/null
+++ b/test/validation/api/timer/timer.c
@@ -0,0 +1,3309 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* For rand_r and nanosleep */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <time.h>
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include "odp_cunit_common.h"
+
+#include <stdint.h>
+
+#define MAX_WORKERS 32
+
+#define GLOBAL_SHM_NAME "GlobalTimerTest"
+
+#define MAX_TIMER_POOLS 1024
+
+/* Timeout range in milliseconds (ms) */
+#define RANGE_MS 2000
+
+/* Number of timers per thread */
+#define NTIMERS 2000
+
+/* Number of extra timers per thread */
+#define EXTRA_TIMERS 256
+
+#define NAME "timer_pool"
+#define MSEC ODP_TIME_MSEC_IN_NS
+#define THREE_POINT_THREE_MSEC (10 * ODP_TIME_MSEC_IN_NS / 3)
+#define USER_PTR ((void *)0xdead)
+#define TICK_INVALID (~(uint64_t)0)
+#define YEAR_IN_NS (365 * 24 * ODP_TIME_HOUR_IN_NS)
+
+/* Test case options */
+#define PRIV 1
+#define EXP_RELAX 1
+#define FIRST_TICK 1
+#define RELATIVE ODP_TIMER_TICK_REL
+#define ABSOLUTE ODP_TIMER_TICK_ABS
+
+enum {
+ TIMEOUT = 0,
+ CANCEL
+};
+
+enum {
+ START = 0,
+ RESTART
+};
+
+/* Timer helper structure */
+struct test_timer {
+ odp_timer_t tim; /* Timer handle */
+ odp_event_t ev; /* Timeout event */
+ odp_event_t ev2; /* Copy of event handle */
+ uint64_t tick; /* Expiration tick or TICK_INVALID */
+};
+
+typedef struct {
+ /* Periodic timer support */
+ int periodic_support;
+
+ /* Default resolution / timeout parameters */
+ struct {
+ uint64_t res_ns;
+ uint64_t min_tmo;
+ uint64_t max_tmo;
+ odp_bool_t queue_type_sched;
+ odp_bool_t queue_type_plain;
+ } param;
+
+ /* Timeout pool handle used by all threads */
+ odp_pool_t tbp;
+
+ /* Timer pool handle used by all threads */
+ odp_timer_pool_t tp;
+
+ /* Barrier for thread synchronization */
+ odp_barrier_t test_barrier;
+
+ /* Count of timeouts delivered too late */
+ odp_atomic_u32_t ndelivtoolate;
+
+ /* Sum of all allocated timers from all threads. Thread-local
+ * caches may make this number lower than the capacity of the pool */
+ odp_atomic_u32_t timers_allocated;
+
+ /* Number of timers allocated per thread */
+ uint32_t timers_per_thread;
+
+ /* Queue type to be tested */
+ odp_queue_type_t test_queue_type;
+
+} global_shared_mem_t;
+
+typedef struct {
+ odp_timer_clk_src_t clk_src;
+ global_shared_mem_t global_mem;
+
+} test_global_t;
+
+static global_shared_mem_t *global_mem;
+static test_global_t *test_global;
+static odp_shm_t global_shm;
+static odp_instance_t inst;
+
+static int global_init(void)
+{
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("odph_options() failed\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(&inst, &init_param, NULL)) {
+ ODPH_ERR("odp_init_global() failed\n");
+ return -1;
+ }
+ if (0 != odp_init_local(inst, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("odp_init_local() failed\n");
+ return -1;
+ }
+
+ global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
+ sizeof(test_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ if (global_shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Unable to reserve memory for global_shm\n");
+ return -1;
+ }
+
+ test_global = odp_shm_addr(global_shm);
+ memset(test_global, 0, sizeof(*test_global));
+ global_mem = &test_global->global_mem;
+
+ /* Configure scheduler */
+ odp_schedule_config(NULL);
+
+ return 0;
+}
+
+static int global_term(void)
+{
+ if (0 != odp_shm_free(global_shm)) {
+ ODPH_ERR("odp_shm_free() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_term_local()) {
+ ODPH_ERR("odp_term_local() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ ODPH_ERR("odp_term_global() failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int timer_global_init(odp_instance_t *instance)
+{
+ odp_timer_capability_t capa;
+ odp_timer_res_capability_t res_capa;
+ uint64_t res_ns, min_tmo, max_tmo;
+ unsigned int range;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ *instance = inst;
+
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
+ memset(&capa, 0, sizeof(capa));
+ if (odp_timer_capability(clk_src, &capa)) {
+ ODPH_ERR("Timer capability failed\n");
+ return -1;
+ }
+
+ global_mem->periodic_support = capa.periodic.max_pools > 0;
+
+ /* By default 2 msec resolution */
+ res_ns = 2 * ODP_TIME_MSEC_IN_NS;
+ if (res_ns < capa.max_res.res_ns)
+ res_ns = capa.max_res.res_ns;
+
+ memset(&res_capa, 0, sizeof(res_capa));
+ res_capa.res_ns = res_ns;
+
+ if (odp_timer_res_capability(clk_src, &res_capa)) {
+ ODPH_ERR("Timer resolution capability failed\n");
+ return -1;
+ }
+
+ /* Try to keep min timeout error margin within +-20% */
+ min_tmo = 5 * res_ns;
+ if (min_tmo < res_capa.min_tmo)
+ min_tmo = res_capa.min_tmo;
+
+ /* Max 1 hour */
+ max_tmo = 3600 * ODP_TIME_SEC_IN_NS;
+ if (max_tmo > res_capa.max_tmo)
+ max_tmo = res_capa.max_tmo;
+
+ range = (RANGE_MS * 1000) + THREE_POINT_THREE_MSEC;
+ if ((max_tmo - min_tmo) < range) {
+ ODPH_ERR("Validation test needs %u msec range\n", range);
+ return -1;
+ }
+
+ /* Default parameters for test cases using the default clock source */
+ global_mem->param.res_ns = res_ns;
+ global_mem->param.min_tmo = min_tmo;
+ global_mem->param.max_tmo = max_tmo;
+ global_mem->param.queue_type_plain = capa.queue_type_plain;
+ global_mem->param.queue_type_sched = capa.queue_type_sched;
+
+ return 0;
+}
+
+static int timer_global_term(odp_instance_t inst)
+{
+ (void)inst;
+
+ return 0;
+}
+
+static int
+check_sched_queue_support(void)
+{
+ if (global_mem->param.queue_type_sched)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int
+check_plain_queue_support(void)
+{
+ if (global_mem->param.queue_type_plain)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_periodic_support(void)
+{
+ if (global_mem->periodic_support)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_periodic_sched_support(void)
+{
+ if (global_mem->periodic_support && global_mem->param.queue_type_sched)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_periodic_plain_support(void)
+{
+ if (global_mem->periodic_support && global_mem->param.queue_type_plain)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static void timer_test_capa(void)
+{
+ odp_timer_capability_t capa;
+ odp_timer_res_capability_t res_capa;
+ int ret;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ memset(&capa, 0, sizeof(capa));
+ ret = odp_timer_capability(clk_src, &capa);
+ CU_ASSERT_FATAL(ret == 0);
+
+ CU_ASSERT(capa.highest_res_ns == capa.max_res.res_ns);
+ /* Assuming max resolution to be 100 msec or better */
+ CU_ASSERT(capa.max_res.res_ns <= 100000000);
+ CU_ASSERT(capa.max_res.res_hz >= 10);
+ CU_ASSERT(capa.max_res.res_ns < capa.max_res.max_tmo);
+ CU_ASSERT(capa.max_res.min_tmo < capa.max_res.max_tmo);
+
+ /* With max timeout, resolution may be low (worse than 1 sec) */
+ CU_ASSERT(capa.max_tmo.res_ns < capa.max_tmo.max_tmo);
+ CU_ASSERT(capa.max_tmo.min_tmo < capa.max_tmo.max_tmo);
+ CU_ASSERT(capa.max_tmo.res_ns != 0 || capa.max_tmo.res_hz != 0);
+ if (capa.max_tmo.res_hz == 0)
+ CU_ASSERT(capa.max_tmo.res_ns > 1000000000);
+
+ /* Set max resolution in nsec */
+ memset(&res_capa, 0, sizeof(res_capa));
+ res_capa.res_ns = capa.max_res.res_ns;
+
+ ret = odp_timer_res_capability(clk_src, &res_capa);
+ CU_ASSERT_FATAL(ret == 0);
+ CU_ASSERT(res_capa.res_ns == capa.max_res.res_ns);
+ CU_ASSERT(res_capa.min_tmo == capa.max_res.min_tmo);
+ CU_ASSERT(res_capa.max_tmo == capa.max_res.max_tmo);
+
+ if (capa.max_res.res_ns > 1) {
+ memset(&res_capa, 0, sizeof(res_capa));
+ res_capa.res_ns = capa.max_res.res_ns - 1;
+ ret = odp_timer_res_capability(clk_src, &res_capa);
+ CU_ASSERT(ret < 0);
+ }
+
+ /* Set max resolution in hz */
+ memset(&res_capa, 0, sizeof(res_capa));
+ res_capa.res_hz = capa.max_res.res_hz;
+
+ ret = odp_timer_res_capability(clk_src, &res_capa);
+ CU_ASSERT_FATAL(ret == 0);
+ CU_ASSERT(res_capa.res_hz == capa.max_res.res_hz);
+ CU_ASSERT(res_capa.min_tmo == capa.max_res.min_tmo);
+ CU_ASSERT(res_capa.max_tmo == capa.max_res.max_tmo);
+
+ if (capa.max_res.res_hz < UINT64_MAX) {
+ memset(&res_capa, 0, sizeof(res_capa));
+ res_capa.res_hz = capa.max_res.res_hz + 1;
+ ret = odp_timer_res_capability(clk_src, &res_capa);
+ CU_ASSERT(ret < 0);
+ }
+
+ /* Set max timeout */
+ memset(&res_capa, 0, sizeof(res_capa));
+ res_capa.max_tmo = capa.max_tmo.max_tmo;
+
+ ret = odp_timer_res_capability(clk_src, &res_capa);
+ CU_ASSERT_FATAL(ret == 0);
+ CU_ASSERT(res_capa.max_tmo == capa.max_tmo.max_tmo);
+ CU_ASSERT(res_capa.min_tmo == capa.max_tmo.min_tmo);
+ CU_ASSERT(res_capa.res_ns == capa.max_tmo.res_ns);
+ CU_ASSERT(res_capa.res_hz == capa.max_tmo.res_hz);
+
+ if (capa.max_tmo.max_tmo < UINT64_MAX) {
+ memset(&res_capa, 0, sizeof(res_capa));
+ res_capa.max_tmo = capa.max_tmo.max_tmo + 1;
+ ret = odp_timer_res_capability(clk_src, &res_capa);
+ CU_ASSERT(ret < 0);
+ }
+}
+
+static void timer_test_capa_allsrc(void)
+{
+ odp_timer_capability_t capa;
+ odp_timer_clk_src_t clk_src;
+ int i;
+
+ /* Check that all API clock source enumeration values exist */
+ CU_ASSERT_FATAL(ODP_CLOCK_DEFAULT == ODP_CLOCK_SRC_0);
+ CU_ASSERT_FATAL(ODP_CLOCK_SRC_0 + 1 == ODP_CLOCK_SRC_1);
+ CU_ASSERT_FATAL(ODP_CLOCK_SRC_0 + 2 == ODP_CLOCK_SRC_2);
+ CU_ASSERT_FATAL(ODP_CLOCK_SRC_0 + 3 == ODP_CLOCK_SRC_3);
+ CU_ASSERT_FATAL(ODP_CLOCK_SRC_0 + 4 == ODP_CLOCK_SRC_4);
+ CU_ASSERT_FATAL(ODP_CLOCK_SRC_0 + 5 == ODP_CLOCK_SRC_5);
+ CU_ASSERT_FATAL(ODP_CLOCK_SRC_5 + 1 == ODP_CLOCK_NUM_SRC);
+
+ CU_ASSERT(odp_timer_capability(ODP_CLOCK_DEFAULT, &capa) == 0);
+
+ for (i = 0; i < ODP_CLOCK_NUM_SRC; i++) {
+ int ret;
+
+ clk_src = ODP_CLOCK_SRC_0 + i;
+
+ ret = odp_timer_capability(clk_src, &capa);
+ CU_ASSERT(ret == 0 || ret == -1);
+ }
+}
+
+static void test_param_init(uint8_t fill)
+{
+ odp_timer_pool_param_t tp_param;
+
+ memset(&tp_param, fill, sizeof(tp_param));
+
+ odp_timer_pool_param_init(&tp_param);
+ CU_ASSERT(tp_param.res_ns == 0);
+ CU_ASSERT(tp_param.res_hz == 0);
+ CU_ASSERT(tp_param.min_tmo == 0);
+ CU_ASSERT(tp_param.priv == 0);
+ CU_ASSERT(tp_param.clk_src == ODP_CLOCK_DEFAULT);
+ CU_ASSERT(tp_param.exp_mode == ODP_TIMER_EXP_AFTER);
+ CU_ASSERT(tp_param.timer_type == ODP_TIMER_TYPE_SINGLE);
+ CU_ASSERT(tp_param.periodic.base_freq_hz.integer == 0);
+ CU_ASSERT(tp_param.periodic.base_freq_hz.numer == 0);
+ CU_ASSERT(tp_param.periodic.base_freq_hz.denom == 0);
+}
+
+static void timer_test_param_init(void)
+{
+ test_param_init(0);
+ test_param_init(0xff);
+}
+
+static void timer_test_timeout_pool_alloc(void)
+{
+ odp_pool_t pool;
+ const int num = 3;
+ odp_timeout_t tmo[num];
+ odp_event_t ev;
+ int index;
+ odp_bool_t wrong_type = false, wrong_subtype = false;
+ odp_pool_param_t params;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_TIMEOUT;
+ params.tmo.num = num;
+
+ pool = odp_pool_create("timeout_pool_alloc", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_pool_print(pool);
+
+ /* Try to allocate num items from the pool */
+ for (index = 0; index < num; index++) {
+ odp_event_subtype_t subtype;
+
+ tmo[index] = odp_timeout_alloc(pool);
+
+ if (tmo[index] == ODP_TIMEOUT_INVALID)
+ break;
+
+ ev = odp_timeout_to_event(tmo[index]);
+ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT)
+ wrong_type = true;
+ if (odp_event_subtype(ev) != ODP_EVENT_NO_SUBTYPE)
+ wrong_subtype = true;
+ if (odp_event_types(ev, &subtype) != ODP_EVENT_TIMEOUT)
+ wrong_type = true;
+ if (subtype != ODP_EVENT_NO_SUBTYPE)
+ wrong_subtype = true;
+
+ /* No source pool for timeout events */
+ CU_ASSERT(odp_event_pool(ev) == ODP_POOL_INVALID);
+ }
+
+ /* Check that the pool had at least num items */
+ CU_ASSERT(index == num);
+ /* index points out of buffer[] or it point to an invalid buffer */
+ index--;
+
+ /* Check that the pool had correct buffers */
+ CU_ASSERT(!wrong_type);
+ CU_ASSERT(!wrong_subtype);
+
+ for (; index >= 0; index--)
+ odp_timeout_free(tmo[index]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void timer_test_timeout_pool_alloc_multi(void)
+{
+ odp_pool_capability_t capa;
+ odp_pool_t pool;
+ odp_pool_param_t params;
+ uint32_t num_timeouts = 1000;
+ uint32_t num_allocated = 0;
+ uint32_t num_freed = 0;
+ uint32_t num_retries = 0;
+
+ CU_ASSERT_FATAL(!odp_pool_capability(&capa));
+
+ if (capa.tmo.max_num && capa.tmo.max_num < num_timeouts)
+ num_timeouts = capa.tmo.max_num;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_TIMEOUT;
+ params.tmo.num = num_timeouts;
+
+ pool = odp_pool_create("timeout_pool_alloc_multi", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_timeout_t tmo[num_timeouts];
+
+ do {
+ int ret;
+ int num = (num_timeouts - num_allocated) / 2;
+
+ if (num < 1)
+ num = 1;
+
+ ret = odp_timeout_alloc_multi(pool, &tmo[num_allocated], num);
+ if (ret < 0) {
+ CU_FAIL("Timeout alloc multi failed");
+ break;
+ }
+ CU_ASSERT_FATAL(ret <= num);
+
+ num_retries = (ret == 0) ? num_retries + 1 : 0;
+ num_allocated += ret;
+ } while (num_allocated < num_timeouts && num_retries < 100);
+ CU_ASSERT(num_allocated == num_timeouts)
+
+ if (num_allocated) {
+ do {
+ int num = num_allocated / 2;
+
+ if (num < 1)
+ num = 1;
+
+ odp_timeout_free_multi(&tmo[num_freed], num);
+
+ num_freed += num;
+ num_allocated -= num;
+ } while (num_allocated);
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void timer_test_timeout_from_event(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ uint32_t i;
+ const uint32_t num = 10;
+ uint32_t num_alloc = 0;
+ odp_timeout_t tmo_tbl[num];
+ odp_timeout_t tmo2_tbl[num];
+ odp_event_t ev_tbl[num];
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_TIMEOUT;
+ param.tmo.num = num;
+
+ pool = odp_pool_create("test_timeout_from_event", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < num; i++) {
+ tmo_tbl[i] = odp_timeout_alloc(pool);
+ if (tmo_tbl[i] == ODP_TIMEOUT_INVALID)
+ break;
+ ev_tbl[i] = odp_timeout_to_event(tmo_tbl[i]);
+ num_alloc++;
+ }
+
+ CU_ASSERT(num_alloc == num);
+
+ for (i = 0; i < num_alloc; i++) {
+ odp_timeout_t tmo = odp_timeout_from_event(ev_tbl[i]);
+
+ CU_ASSERT(odp_timeout_to_u64(tmo) == odp_timeout_to_u64(tmo_tbl[i]));
+ }
+
+ odp_timeout_from_event_multi(tmo2_tbl, ev_tbl, num_alloc);
+ for (i = 0; i < num_alloc; i++)
+ CU_ASSERT(odp_timeout_to_u64(tmo2_tbl[i]) == odp_timeout_to_u64(tmo_tbl[i]));
+
+ for (i = 0; i < num_alloc; i++)
+ odp_timeout_free(tmo_tbl[i]);
+
+ CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
+}
+
+static void timer_test_timeout_pool_free(void)
+{
+ odp_pool_t pool;
+ odp_timeout_t tmo;
+ odp_pool_param_t params;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_TIMEOUT;
+ params.tmo.num = 1;
+
+ pool = odp_pool_create("timeout_pool_free", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ odp_pool_print(pool);
+
+ /* Allocate the only timeout from the pool */
+ tmo = odp_timeout_alloc(pool);
+ CU_ASSERT_FATAL(tmo != ODP_TIMEOUT_INVALID);
+
+ /* Pool should have only one timeout */
+ CU_ASSERT_FATAL(odp_timeout_alloc(pool) == ODP_TIMEOUT_INVALID)
+
+ odp_timeout_free(tmo);
+
+ /* Check that the timeout was returned back to the pool */
+ tmo = odp_timeout_alloc(pool);
+ CU_ASSERT_FATAL(tmo != ODP_TIMEOUT_INVALID);
+
+ odp_timeout_free(tmo);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void timer_test_timeout_user_area(void)
+{
+ odp_pool_t pool;
+ odp_pool_capability_t pool_capa;
+ odp_pool_param_t param;
+ uint32_t i, max_size;
+ void *addr;
+ void *prev = NULL;
+ const uint32_t num = 10;
+ uint32_t num_alloc = 0;
+ uint32_t size = 1024;
+ odp_timeout_t tmo[num];
+
+ CU_ASSERT_FATAL(!odp_pool_capability(&pool_capa));
+ max_size = pool_capa.tmo.max_uarea_size;
+
+ if (max_size == 0) {
+ ODPH_DBG("Timeout user area not supported\n");
+ return;
+ }
+
+ if (size > max_size)
+ size = max_size;
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_TIMEOUT;
+ param.tmo.num = num;
+ param.tmo.uarea_size = size;
+
+ pool = odp_pool_create("test_user_area", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < num; i++) {
+ odp_event_t ev;
+ int flag = 0;
+
+ tmo[i] = odp_timeout_alloc(pool);
+
+ if (tmo[i] == ODP_TIMEOUT_INVALID)
+ break;
+
+ num_alloc++;
+
+ addr = odp_timeout_user_area(tmo[i]);
+ CU_ASSERT_FATAL(addr != NULL);
+ CU_ASSERT(prev != addr);
+
+ ev = odp_timeout_to_event(tmo[i]);
+ CU_ASSERT(odp_event_user_area(ev) == addr);
+ CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == addr);
+ CU_ASSERT(flag < 0);
+
+ prev = addr;
+ memset(addr, 0, size);
+ }
+
+ CU_ASSERT(i == num);
+
+ for (i = 0; i < num_alloc; i++)
+ odp_timeout_free(tmo[i]);
+
+ CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
+}
+
+static void timer_pool_create_destroy(void)
+{
+ odp_timer_pool_param_t tparam;
+ odp_queue_param_t queue_param;
+ odp_timer_capability_t capa;
+ odp_timer_pool_info_t info;
+ odp_timer_pool_t tp[2];
+ odp_timer_t tim;
+ odp_queue_t queue;
+ int ret;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ memset(&capa, 0, sizeof(capa));
+ ret = odp_timer_capability(clk_src, &capa);
+ CU_ASSERT_FATAL(ret == 0);
+
+ odp_queue_param_init(&queue_param);
+ if (capa.queue_type_plain) {
+ queue_param.type = ODP_QUEUE_TYPE_PLAIN;
+ } else if (capa.queue_type_sched) {
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ }
+ queue = odp_queue_create("timer_queue", &queue_param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ odp_timer_pool_param_init(&tparam);
+ tparam.res_ns = global_mem->param.res_ns;
+ tparam.min_tmo = global_mem->param.min_tmo;
+ tparam.max_tmo = global_mem->param.max_tmo;
+ tparam.num_timers = 100;
+ tparam.priv = 0;
+ tparam.clk_src = clk_src;
+
+ tp[0] = odp_timer_pool_create("timer_pool_a", &tparam);
+ CU_ASSERT(tp[0] != ODP_TIMER_POOL_INVALID);
+
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp[0], 1) == 1);
+
+ tim = odp_timer_alloc(tp[0], queue, USER_PTR);
+ CU_ASSERT(tim != ODP_TIMER_INVALID);
+ CU_ASSERT(odp_timer_free(tim) == 0);
+
+ odp_timer_pool_destroy(tp[0]);
+
+ tp[0] = odp_timer_pool_create("timer_pool_b", &tparam);
+ CU_ASSERT(tp[0] != ODP_TIMER_POOL_INVALID);
+ tp[1] = odp_timer_pool_create("timer_pool_c", &tparam);
+ CU_ASSERT(tp[1] != ODP_TIMER_POOL_INVALID);
+
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(tp, 2) == 2);
+
+ odp_timer_pool_destroy(tp[0]);
+
+ tp[0] = odp_timer_pool_create("timer_pool_d", &tparam);
+ CU_ASSERT(tp[0] != ODP_TIMER_POOL_INVALID);
+
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp[0], 1) == 1);
+
+ memset(&info, 0, sizeof(odp_timer_pool_info_t));
+ CU_ASSERT(odp_timer_pool_info(tp[1], &info) == 0);
+ CU_ASSERT(strcmp(info.name, "timer_pool_c") == 0);
+
+ tim = odp_timer_alloc(tp[1], queue, USER_PTR);
+ CU_ASSERT(tim != ODP_TIMER_INVALID);
+ CU_ASSERT(odp_timer_free(tim) == 0);
+
+ odp_timer_pool_destroy(tp[1]);
+
+ memset(&info, 0, sizeof(odp_timer_pool_info_t));
+ CU_ASSERT(odp_timer_pool_info(tp[0], &info) == 0);
+ CU_ASSERT(strcmp(info.name, "timer_pool_d") == 0);
+
+ tim = odp_timer_alloc(tp[0], queue, USER_PTR);
+ CU_ASSERT(tim != ODP_TIMER_INVALID);
+ CU_ASSERT(odp_timer_free(tim) == 0);
+
+ odp_timer_pool_destroy(tp[0]);
+
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+}
+
+static void timer_pool_create_max(void)
+{
+ odp_timer_capability_t capa;
+ odp_timer_pool_param_t tp_param;
+ odp_queue_param_t queue_param;
+ odp_queue_t queue;
+ uint32_t i;
+ int ret;
+ uint64_t tmo_ns = ODP_TIME_SEC_IN_NS;
+ uint64_t res_ns = ODP_TIME_SEC_IN_NS / 10;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ memset(&capa, 0, sizeof(capa));
+ ret = odp_timer_capability(clk_src, &capa);
+ CU_ASSERT_FATAL(ret == 0);
+
+ uint32_t num = capa.max_pools;
+
+ if (num > MAX_TIMER_POOLS)
+ num = MAX_TIMER_POOLS;
+
+ odp_timer_pool_t tp[num];
+ odp_timer_t timer[num];
+
+ if (capa.max_tmo.max_tmo < tmo_ns) {
+ tmo_ns = capa.max_tmo.max_tmo;
+ res_ns = capa.max_tmo.res_ns;
+ }
+
+ odp_queue_param_init(&queue_param);
+
+ if (capa.queue_type_sched)
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+
+ queue = odp_queue_create("timer_queue", &queue_param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ odp_timer_pool_param_init(&tp_param);
+
+ tp_param.res_ns = res_ns;
+ tp_param.min_tmo = tmo_ns / 2;
+ tp_param.max_tmo = tmo_ns;
+ tp_param.num_timers = 1;
+ tp_param.clk_src = clk_src;
+
+ for (i = 0; i < num; i++) {
+ tp[i] = odp_timer_pool_create("test_max", &tp_param);
+ if (tp[i] == ODP_TIMER_POOL_INVALID)
+ ODPH_ERR("Timer pool create failed: %u / %u\n", i, num);
+
+ CU_ASSERT_FATAL(tp[i] != ODP_TIMER_POOL_INVALID);
+ }
+
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(tp, num) == (int)num);
+
+ for (i = 0; i < num; i++) {
+ timer[i] = odp_timer_alloc(tp[i], queue, USER_PTR);
+
+ if (timer[i] == ODP_TIMER_INVALID)
+ ODPH_ERR("Timer alloc failed: %u / %u\n", i, num);
+
+ CU_ASSERT_FATAL(timer[i] != ODP_TIMER_INVALID);
+
+ /* Pool should have only one timer */
+ CU_ASSERT_FATAL(odp_timer_alloc(tp[i], queue, USER_PTR) == ODP_TIMER_INVALID);
+ }
+
+ for (i = 0; i < num; i++)
+ CU_ASSERT(odp_timer_free(timer[i]) == 0);
+
+ for (i = 0; i < num; i++)
+ odp_timer_pool_destroy(tp[i]);
+
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+}
+
+static void timer_pool_max_res(void)
+{
+ odp_timer_capability_t capa;
+ odp_timer_pool_param_t tp_param;
+ odp_queue_param_t queue_param;
+ odp_timer_pool_t tp;
+ odp_timer_t timer;
+ odp_timer_start_t start_param;
+ odp_pool_param_t pool_param;
+ odp_pool_t pool;
+ odp_queue_t queue;
+ odp_timeout_t tmo;
+ odp_event_t ev;
+ uint64_t tick;
+ int ret, i;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ memset(&capa, 0, sizeof(capa));
+ ret = odp_timer_capability(clk_src, &capa);
+ CU_ASSERT_FATAL(ret == 0);
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_TIMEOUT;
+ pool_param.tmo.num = 10;
+ pool = odp_pool_create("timeout_pool", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_queue_param_init(&queue_param);
+ if (capa.queue_type_plain) {
+ queue_param.type = ODP_QUEUE_TYPE_PLAIN;
+ } else if (capa.queue_type_sched) {
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ }
+ queue = odp_queue_create("timer_queue", &queue_param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ /* Highest resolution: first in nsec, then in hz */
+ for (i = 0; i < 2; i++) {
+ odp_timer_pool_param_init(&tp_param);
+
+ if (i == 0) {
+ printf("\n Highest resolution %" PRIu64 " nsec\n",
+ capa.max_res.res_ns);
+ tp_param.res_ns = capa.max_res.res_ns;
+ } else {
+ printf(" Highest resolution %" PRIu64 " Hz\n",
+ capa.max_res.res_hz);
+ tp_param.res_hz = capa.max_res.res_hz;
+ }
+
+ tp_param.min_tmo = capa.max_res.min_tmo;
+ tp_param.max_tmo = capa.max_res.max_tmo;
+ tp_param.num_timers = 100;
+ tp_param.priv = 0;
+ tp_param.clk_src = clk_src;
+
+ tp = odp_timer_pool_create("high_res_tp", &tp_param);
+ CU_ASSERT_FATAL(tp != ODP_TIMER_POOL_INVALID);
+
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp, 1) == 1);
+
+ /* Maximum timeout length with maximum resolution */
+ tick = odp_timer_ns_to_tick(tp, capa.max_res.max_tmo);
+
+ timer = odp_timer_alloc(tp, queue, USER_PTR);
+ CU_ASSERT_FATAL(timer != ODP_TIMER_INVALID);
+
+ tmo = odp_timeout_alloc(pool);
+ ev = odp_timeout_to_event(tmo);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+
+ start_param.tick_type = ODP_TIMER_TICK_REL;
+ start_param.tick = tick;
+ start_param.tmo_ev = ev;
+
+ ret = odp_timer_start(timer, &start_param);
+ CU_ASSERT(ret == ODP_TIMER_SUCCESS);
+
+ ev = ODP_EVENT_INVALID;
+ ret = odp_timer_cancel(timer, &ev);
+ CU_ASSERT(ret == ODP_TIMER_SUCCESS);
+
+ if (ret == ODP_TIMER_SUCCESS) {
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+ odp_event_free(ev);
+ }
+
+ CU_ASSERT(odp_timer_free(timer) == 0);
+ odp_timer_pool_destroy(tp);
+ }
+
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static odp_event_t wait_event(odp_queue_type_t queue_type, odp_queue_t queue,
+ odp_time_t t1, uint64_t wait_ns)
+{
+ odp_time_t t2;
+ odp_event_t ev;
+ odp_queue_t from = ODP_QUEUE_INVALID;
+
+ while (1) {
+ if (queue_type == ODP_QUEUE_TYPE_SCHED)
+ ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+ else
+ ev = odp_queue_deq(queue);
+
+ if (ev != ODP_EVENT_INVALID) {
+ if (queue_type == ODP_QUEUE_TYPE_SCHED)
+ CU_ASSERT(from == queue);
+
+ return ev;
+ }
+
+ t2 = odp_time_global();
+ if (odp_time_diff_ns(t2, t1) > wait_ns)
+ break;
+ }
+
+ return ODP_EVENT_INVALID;
+}
+
+static void free_schedule_context(odp_queue_type_t queue_type)
+{
+ if (queue_type != ODP_QUEUE_TYPE_SCHED)
+ return;
+
+ while (1) {
+ odp_event_t ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ CU_ASSERT(ev == ODP_EVENT_INVALID);
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ odp_event_free(ev);
+ }
+}
+
+static void timer_single_shot(odp_queue_type_t queue_type, odp_timer_tick_type_t tick_type,
+ int restart, int cancel, int rounds, uint64_t tmo_ns)
+{
+ odp_timer_capability_t capa;
+ odp_timer_res_capability_t res_capa;
+ odp_timer_pool_param_t tp_param;
+ odp_queue_param_t queue_param;
+ odp_pool_param_t pool_param;
+ odp_timer_start_t start_param;
+ odp_timer_pool_t tp;
+ odp_timer_t timer;
+ odp_pool_t pool;
+ odp_queue_t queue;
+ odp_timeout_t tmo;
+ odp_event_t ev;
+ odp_time_t t1, t2;
+ uint64_t tick, nsec, res_ns, min_tmo;
+ int ret, i;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ memset(&capa, 0, sizeof(capa));
+ ret = odp_timer_capability(clk_src, &capa);
+ CU_ASSERT_FATAL(ret == 0);
+ CU_ASSERT_FATAL(capa.max_tmo.max_tmo > 0);
+
+ /* Use timeout and resolution values that are within capability limits */
+ if (capa.max_tmo.max_tmo < tmo_ns)
+ tmo_ns = capa.max_tmo.max_tmo;
+
+ memset(&res_capa, 0, sizeof(res_capa));
+ res_capa.max_tmo = tmo_ns;
+
+ ret = odp_timer_res_capability(clk_src, &res_capa);
+ CU_ASSERT_FATAL(ret == 0);
+ CU_ASSERT_FATAL(res_capa.res_ns > 0);
+
+ res_ns = tmo_ns / 10;
+
+ if (res_ns < res_capa.res_ns)
+ res_ns = res_capa.res_ns;
+
+ /* Test expects better resolution than 0.5x timeout */
+ CU_ASSERT_FATAL(res_ns < tmo_ns / 2);
+
+ min_tmo = tmo_ns / 4;
+ if (min_tmo < res_capa.min_tmo)
+ min_tmo = res_capa.min_tmo;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_TIMEOUT;
+ pool_param.tmo.num = 10;
+ pool = odp_pool_create("timeout_pool", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = queue_type;
+
+ if (queue_type == ODP_QUEUE_TYPE_SCHED)
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+
+ queue = odp_queue_create("timer_queue", &queue_param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ odp_timer_pool_param_init(&tp_param);
+
+ tp_param.res_ns = res_ns;
+ tp_param.min_tmo = min_tmo;
+ tp_param.max_tmo = tmo_ns;
+ tp_param.num_timers = 1;
+ tp_param.clk_src = clk_src;
+
+ tp = odp_timer_pool_create("test_single", &tp_param);
+ CU_ASSERT_FATAL(tp != ODP_TIMER_POOL_INVALID);
+
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp, 1) == 1);
+
+ timer = odp_timer_alloc(tp, queue, USER_PTR);
+ CU_ASSERT_FATAL(timer != ODP_TIMER_INVALID);
+
+ tmo = odp_timeout_alloc(pool);
+ ev = odp_timeout_to_event(tmo);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+
+ nsec = tmo_ns;
+ if (restart)
+ nsec = tmo_ns / 2;
+
+ for (i = 0; i < rounds; i++) {
+ tick = odp_timer_ns_to_tick(tp, nsec);
+ if (tick_type == ODP_TIMER_TICK_ABS)
+ tick += odp_timer_current_tick(tp);
+
+ start_param.tick_type = tick_type;
+ start_param.tick = tick;
+ start_param.tmo_ev = ev;
+
+ ret = odp_timer_start(timer, &start_param);
+ CU_ASSERT_FATAL(ret == ODP_TIMER_SUCCESS);
+
+ if (restart) {
+ tick = odp_timer_ns_to_tick(tp, tmo_ns);
+ if (tick_type == ODP_TIMER_TICK_ABS)
+ tick += odp_timer_current_tick(tp);
+
+ start_param.tick = tick;
+ start_param.tmo_ev = ODP_EVENT_INVALID;
+
+ ret = odp_timer_restart(timer, &start_param);
+ CU_ASSERT_FATAL(ret == ODP_TIMER_SUCCESS);
+ }
+
+ ev = ODP_EVENT_INVALID;
+
+ if (cancel) {
+ ret = odp_timer_cancel(timer, &ev);
+ CU_ASSERT(ret == ODP_TIMER_SUCCESS);
+
+ if (ret == ODP_TIMER_SUCCESS)
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+ } else {
+ uint64_t diff_ns;
+
+ t1 = odp_time_global();
+ ev = wait_event(queue_type, queue, t1, 10 * tmo_ns);
+ t2 = odp_time_global();
+ diff_ns = odp_time_diff_ns(t2, t1);
+
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(diff_ns < 2 * tmo_ns);
+ CU_ASSERT((double)diff_ns > 0.5 * tmo_ns);
+ }
+
+ if (ev != ODP_EVENT_INVALID) {
+ CU_ASSERT_FATAL(odp_event_type(ev) == ODP_EVENT_TIMEOUT);
+ tmo = odp_timeout_from_event(ev);
+ CU_ASSERT(odp_timeout_user_ptr(tmo) == USER_PTR);
+ CU_ASSERT(odp_timeout_timer(tmo) == timer);
+
+ if (!cancel) {
+ if (tick_type == ODP_TIMER_TICK_ABS) {
+ /* CU_ASSERT needs these extra brackets */
+ CU_ASSERT(odp_timeout_tick(tmo) == tick);
+ } else {
+ CU_ASSERT(odp_timeout_tick(tmo) > tick);
+ }
+ }
+ } else {
+ ODPH_DBG("Event missing\n");
+ break;
+ }
+ }
+
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+
+ free_schedule_context(queue_type);
+
+ CU_ASSERT(odp_timer_free(timer) == 0);
+ odp_timer_pool_destroy(tp);
+
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void timer_plain_rel_wait(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, RELATIVE, START, TIMEOUT, 2, 50 * MSEC);
+}
+
+static void timer_plain_abs_wait(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ABSOLUTE, START, TIMEOUT, 2, 50 * MSEC);
+}
+
+static void timer_plain_rel_cancel(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, RELATIVE, START, CANCEL, 5, 100 * MSEC);
+}
+
+static void timer_plain_abs_cancel(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ABSOLUTE, START, CANCEL, 5, 100 * MSEC);
+}
+
+static void timer_plain_rel_restart_wait(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, RELATIVE, RESTART, TIMEOUT, 2, 60 * MSEC);
+}
+
+static void timer_plain_abs_restart_wait(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ABSOLUTE, RESTART, TIMEOUT, 2, 60 * MSEC);
+}
+
+static void timer_plain_rel_restart_cancel(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, RELATIVE, RESTART, CANCEL, 5, 100 * MSEC);
+}
+
+static void timer_plain_abs_restart_cancel(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ABSOLUTE, RESTART, CANCEL, 5, 100 * MSEC);
+}
+
+static void timer_plain_abs_wait_3sec(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ABSOLUTE, START, TIMEOUT, 30, 110 * MSEC);
+}
+
+static void timer_sched_rel_wait(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, RELATIVE, START, TIMEOUT, 2, 50 * MSEC);
+}
+
+static void timer_sched_abs_wait(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, ABSOLUTE, START, TIMEOUT, 2, 50 * MSEC);
+}
+
+static void timer_sched_rel_cancel(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, RELATIVE, START, CANCEL, 5, 100 * MSEC);
+}
+
+static void timer_sched_abs_cancel(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, ABSOLUTE, START, CANCEL, 5, 100 * MSEC);
+}
+
+static void timer_sched_rel_restart_wait(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, RELATIVE, RESTART, TIMEOUT, 2, 60 * MSEC);
+}
+
+static void timer_sched_abs_restart_wait(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, ABSOLUTE, RESTART, TIMEOUT, 2, 60 * MSEC);
+}
+
+static void timer_sched_rel_restart_cancel(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, RELATIVE, RESTART, CANCEL, 5, 100 * MSEC);
+}
+
+static void timer_sched_abs_restart_cancel(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, ABSOLUTE, RESTART, CANCEL, 5, 100 * MSEC);
+}
+
+static void timer_sched_abs_wait_3sec(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, ABSOLUTE, START, TIMEOUT, 30, 110 * MSEC);
+}
+
+static void timer_pool_current_tick(void)
+{
+ odp_timer_capability_t capa;
+ odp_timer_pool_param_t tp_param;
+ odp_timer_pool_t tp;
+ uint64_t t1, t2, ticks, min, max, limit;
+ uint64_t nsec = 100 * ODP_TIME_MSEC_IN_NS;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ memset(&capa, 0, sizeof(capa));
+ CU_ASSERT_FATAL(odp_timer_capability(clk_src, &capa) == 0);
+
+ /* Highest resolution */
+ odp_timer_pool_param_init(&tp_param);
+ tp_param.res_hz = capa.max_res.res_hz;
+ tp_param.min_tmo = capa.max_res.min_tmo;
+ tp_param.max_tmo = capa.max_res.max_tmo;
+ tp_param.num_timers = 100;
+ tp_param.clk_src = clk_src;
+
+ tp = odp_timer_pool_create("cur_tick", &tp_param);
+ CU_ASSERT_FATAL(tp != ODP_TIMER_POOL_INVALID);
+
+ /* API to be deprecated */
+ odp_timer_pool_start();
+
+ /* Allow +-10% error margin */
+ min = odp_timer_ns_to_tick(tp, 0.9 * nsec);
+ max = odp_timer_ns_to_tick(tp, 1.1 * nsec);
+
+ t1 = odp_timer_current_tick(tp);
+
+ odp_time_wait_ns(nsec);
+
+ t2 = odp_timer_current_tick(tp);
+
+ ticks = t2 - t1;
+
+ CU_ASSERT(t2 >= t1);
+ CU_ASSERT(ticks >= min);
+ CU_ASSERT(ticks <= max);
+
+ /* Timer tick (or tick in nsec) should not wrap in at least 10 years from ODP start.
+ * Ignoring delay from start up and other test cases, which should be few seconds. */
+ limit = 10 * YEAR_IN_NS;
+ nsec = odp_timer_tick_to_ns(tp, t1);
+ CU_ASSERT(UINT64_MAX - nsec > limit);
+ CU_ASSERT(UINT64_MAX - t1 > odp_timer_ns_to_tick(tp, limit));
+
+ printf("\nClock source %i\n", clk_src);
+ printf(" Time nsec: %" PRIu64 "\n", nsec);
+ printf(" Measured ticks: %" PRIu64 "\n", ticks);
+ printf(" Expected ticks: %" PRIu64 "\n", odp_timer_ns_to_tick(tp, nsec));
+
+ odp_timer_pool_destroy(tp);
+}
+
+static void timer_pool_sample_ticks(void)
+{
+ odp_timer_capability_t capa;
+ odp_timer_pool_param_t tp_param;
+ odp_timer_pool_t tp[2];
+ uint64_t t1[2], t2[2], ticks[2], min[2], max[2];
+ uint64_t clk_count[2] = {0};
+ odp_timer_clk_src_t clk_1 = ODP_CLOCK_DEFAULT;
+ odp_timer_clk_src_t clk_2 = test_global->clk_src;
+ uint64_t nsec = 100 * ODP_TIME_MSEC_IN_NS;
+
+ /* Highest resolution */
+ odp_timer_pool_param_init(&tp_param);
+ tp_param.num_timers = 100;
+
+ /* First timer pool: default clock source */
+ memset(&capa, 0, sizeof(capa));
+ CU_ASSERT_FATAL(odp_timer_capability(clk_1, &capa) == 0);
+ tp_param.clk_src = clk_1;
+ tp_param.res_hz = capa.max_res.res_hz;
+ tp_param.min_tmo = capa.max_res.min_tmo;
+ tp_param.max_tmo = capa.max_res.max_tmo;
+
+ tp[0] = odp_timer_pool_create("timer_pool_0", &tp_param);
+ CU_ASSERT_FATAL(tp[0] != ODP_TIMER_POOL_INVALID);
+
+ /* Second timer pool: another clock source */
+ memset(&capa, 0, sizeof(capa));
+ CU_ASSERT_FATAL(odp_timer_capability(clk_2, &capa) == 0);
+ tp_param.clk_src = clk_2;
+ tp_param.res_hz = capa.max_res.res_hz;
+ tp_param.min_tmo = capa.max_res.min_tmo;
+ tp_param.max_tmo = capa.max_res.max_tmo;
+
+ tp[1] = odp_timer_pool_create("timer_pool_1", &tp_param);
+ CU_ASSERT_FATAL(tp[1] != ODP_TIMER_POOL_INVALID);
+
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(tp, 2) == 2);
+
+ /* Allow +-10% error margin */
+ min[0] = odp_timer_ns_to_tick(tp[0], 0.9 * nsec);
+ max[0] = odp_timer_ns_to_tick(tp[0], 1.1 * nsec);
+ min[1] = odp_timer_ns_to_tick(tp[1], 0.9 * nsec);
+ max[1] = odp_timer_ns_to_tick(tp[1], 1.1 * nsec);
+
+ CU_ASSERT_FATAL(odp_timer_sample_ticks(tp, t1, NULL, 2) == 0);
+
+ odp_time_wait_ns(nsec);
+
+ CU_ASSERT_FATAL(odp_timer_sample_ticks(tp, t2, clk_count, 2) == 0);
+
+ ticks[0] = t2[0] - t1[0];
+ ticks[1] = t2[1] - t1[1];
+
+ CU_ASSERT(t2[0] >= t1[0]);
+ CU_ASSERT(t2[1] >= t1[1]);
+ CU_ASSERT(ticks[0] >= min[0]);
+ CU_ASSERT(ticks[1] >= min[1]);
+ CU_ASSERT(ticks[0] <= max[0]);
+ CU_ASSERT(ticks[1] <= max[1]);
+
+ printf("\nClock source: %i, %i\n", clk_1, clk_2);
+ printf(" Time nsec: %" PRIu64 "\n", nsec);
+ printf(" Measured ticks: %" PRIu64 ", %" PRIu64 "\n", ticks[0], ticks[1]);
+ printf(" Expected ticks: %" PRIu64 ", %" PRIu64 "\n",
+ odp_timer_ns_to_tick(tp[0], nsec), odp_timer_ns_to_tick(tp[1], nsec));
+ printf(" T2 tick: %" PRIu64 ", %" PRIu64 "\n", t2[0], t2[1]);
+ printf(" Clk count: %" PRIu64 ", %" PRIu64 "\n", clk_count[0], clk_count[1]);
+
+ odp_timer_pool_destroy(tp[0]);
+ odp_timer_pool_destroy(tp[1]);
+}
+
+static void timer_pool_tick_info(void)
+{
+ odp_timer_capability_t capa;
+ odp_timer_pool_param_t tp_param;
+ odp_timer_pool_t tp;
+ odp_timer_pool_info_t info;
+ uint64_t ticks_per_sec;
+ double tick_hz, tick_nsec, tick_to_nsec, tick_low;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ memset(&capa, 0, sizeof(capa));
+ CU_ASSERT_FATAL(odp_timer_capability(clk_src, &capa) == 0);
+
+ /* Highest resolution */
+ odp_timer_pool_param_init(&tp_param);
+ tp_param.res_hz = capa.max_res.res_hz;
+ tp_param.min_tmo = capa.max_res.min_tmo;
+ tp_param.max_tmo = capa.max_res.max_tmo;
+ tp_param.num_timers = 100;
+ tp_param.priv = 0;
+ tp_param.clk_src = clk_src;
+
+ tp = odp_timer_pool_create("tick_info_tp", &tp_param);
+ CU_ASSERT_FATAL(tp != ODP_TIMER_POOL_INVALID);
+
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp, 1) == 1);
+
+ memset(&info, 0, sizeof(odp_timer_pool_info_t));
+ CU_ASSERT_FATAL(odp_timer_pool_info(tp, &info) == 0);
+
+ /* Tick frequency in hertz. Allow 1 hz rounding error between odp_timer_ns_to_tick()
+ * and tick_info. */
+ ticks_per_sec = odp_timer_ns_to_tick(tp, ODP_TIME_SEC_IN_NS);
+ tick_hz = odp_fract_u64_to_dbl(&info.tick_info.freq);
+
+ CU_ASSERT(((double)(ticks_per_sec - 1)) <= tick_hz);
+ CU_ASSERT(((double)(ticks_per_sec + 1)) >= tick_hz);
+
+ /* Tick frequency must be the same or higher that resolution */
+ CU_ASSERT(tick_hz >= tp_param.res_hz);
+
+ printf("\nClock source %i\n", clk_src);
+ printf(" Ticks per second: %" PRIu64 "\n", ticks_per_sec);
+ printf(" Tick info freq: %" PRIu64 " + %" PRIu64 " / %" PRIu64 "\n",
+ info.tick_info.freq.integer,
+ info.tick_info.freq.numer,
+ info.tick_info.freq.denom);
+ printf(" Tick info freq dbl: %f\n", tick_hz);
+
+ /* One tick on nsec. For better resolution, convert 1000 ticks (and use double)
+ * instead of one tick. Allow 1 nsec rounding error between odp_timer_tick_to_ns()
+ * and tick_info. */
+ tick_to_nsec = odp_timer_tick_to_ns(tp, 1000) / 1000.0;
+ tick_nsec = odp_fract_u64_to_dbl(&info.tick_info.nsec);
+ tick_low = tick_to_nsec - 1.0;
+ if (tick_to_nsec < 1.0)
+ tick_low = 0.0;
+
+ CU_ASSERT(tick_low <= tick_nsec);
+ CU_ASSERT((tick_to_nsec + 1.0) >= tick_nsec);
+
+ printf(" Tick in nsec: %f\n", tick_to_nsec);
+ printf(" Tick info nsec: %" PRIu64 " + %" PRIu64 " / %" PRIu64 "\n",
+ info.tick_info.nsec.integer,
+ info.tick_info.nsec.numer,
+ info.tick_info.nsec.denom);
+ printf(" Tick info nsec dbl: %f\n", tick_nsec);
+
+ /* One tick in source clock cycles. Depending on clock source it may be zero.
+ * Print the values to have a reference to the fields. */
+ printf(" Tick info clk cycles: %" PRIu64 " + %" PRIu64 " / %" PRIu64 "\n",
+ info.tick_info.clk_cycle.integer,
+ info.tick_info.clk_cycle.numer,
+ info.tick_info.clk_cycle.denom);
+
+ odp_timer_pool_destroy(tp);
+}
+
+static void timer_test_event_type(odp_queue_type_t queue_type,
+ odp_event_type_t event_type, int rounds)
+{
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ odp_queue_param_t queue_param;
+ odp_timer_pool_param_t timer_param;
+ odp_timer_pool_t timer_pool;
+ odp_timer_start_t start_param;
+ odp_queue_t queue;
+ odp_timeout_t tmo;
+ odp_buffer_t buf;
+ odp_packet_t pkt;
+ odp_event_t ev;
+ odp_time_t t1, t2;
+ uint64_t period_ns, period_tick, duration_ns;
+ int i, ret, num_tmo;
+ const char *user_ctx = "User context";
+ int test_print = 0;
+ int num = 5;
+ odp_timer_t timer[num];
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ odp_timer_pool_param_init(&timer_param);
+ timer_param.res_ns = global_mem->param.res_ns;
+ timer_param.min_tmo = global_mem->param.min_tmo;
+ period_ns = 2 * global_mem->param.min_tmo;
+ timer_param.max_tmo = global_mem->param.max_tmo;
+ timer_param.num_timers = num;
+ timer_param.clk_src = clk_src;
+
+ timer_pool = odp_timer_pool_create("timer_pool", &timer_param);
+ if (timer_pool == ODP_TIMER_POOL_INVALID)
+ CU_FAIL_FATAL("Timer pool create failed");
+
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&timer_pool, 1) == 1);
+
+ odp_pool_param_init(&pool_param);
+
+ if (event_type == ODP_EVENT_BUFFER) {
+ pool_param.type = ODP_POOL_BUFFER;
+ pool_param.buf.num = num;
+ } else if (event_type == ODP_EVENT_PACKET) {
+ pool_param.type = ODP_POOL_PACKET;
+ pool_param.pkt.num = num;
+ } else if (event_type == ODP_EVENT_TIMEOUT) {
+ pool_param.type = ODP_POOL_TIMEOUT;
+ pool_param.tmo.num = num;
+ test_print = 1;
+ } else {
+ CU_FAIL("Bad event_type");
+ return;
+ }
+
+ pool = odp_pool_create("timeout_pool", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_queue_param_init(&queue_param);
+ if (queue_type == ODP_QUEUE_TYPE_SCHED) {
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ }
+
+ queue = odp_queue_create("timeout_queue", &queue_param);
+ if (queue == ODP_QUEUE_INVALID)
+ CU_FAIL_FATAL("Queue create failed");
+
+ period_tick = odp_timer_ns_to_tick(timer_pool, period_ns);
+ duration_ns = num * period_ns;
+
+ ODPH_DBG("\nTimer pool parameters:\n");
+ ODPH_DBG(" res_ns %" PRIu64 "\n", timer_param.res_ns);
+ ODPH_DBG(" min_tmo %" PRIu64 "\n", timer_param.min_tmo);
+ ODPH_DBG(" max_tmo %" PRIu64 "\n", timer_param.max_tmo);
+ ODPH_DBG(" period_ns %" PRIu64 "\n", period_ns);
+ ODPH_DBG(" period_tick %" PRIu64 "\n", period_tick);
+ ODPH_DBG(" duration_ns %" PRIu64 "\n", duration_ns);
+ ODPH_DBG(" user_ptr %p\n\n", (const void *)user_ctx);
+
+ for (i = 0; i < num; i++) {
+ timer[i] = odp_timer_alloc(timer_pool, queue, user_ctx);
+ CU_ASSERT_FATAL(timer[i] != ODP_TIMER_INVALID);
+ }
+
+ for (int round = 0; round < rounds; round++) {
+ for (i = 0; i < num; i++) {
+ if (event_type == ODP_EVENT_BUFFER) {
+ buf = odp_buffer_alloc(pool);
+ ev = odp_buffer_to_event(buf);
+ } else if (event_type == ODP_EVENT_PACKET) {
+ pkt = odp_packet_alloc(pool, 10);
+ ev = odp_packet_to_event(pkt);
+ } else {
+ tmo = odp_timeout_alloc(pool);
+ ev = odp_timeout_to_event(tmo);
+ }
+
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+
+ start_param.tick_type = ODP_TIMER_TICK_REL;
+ start_param.tick = (i + 1) * period_tick;
+ start_param.tmo_ev = ev;
+
+ ret = odp_timer_start(timer[i], &start_param);
+
+ if (ret == ODP_TIMER_TOO_NEAR)
+ ODPH_DBG("Timer set failed. Too near %i.\n", i);
+ else if (ret == ODP_TIMER_TOO_FAR)
+ ODPH_DBG("Timer set failed. Too far %i.\n", i);
+ else if (ret == ODP_TIMER_FAIL)
+ ODPH_DBG("Timer set failed %i\n", i);
+
+ CU_ASSERT(ret == ODP_TIMER_SUCCESS);
+ }
+
+ if (test_print) {
+ printf("\n");
+ odp_timer_pool_print(timer_pool);
+ odp_timer_print(timer[0]);
+ }
+
+ ev = ODP_EVENT_INVALID;
+ num_tmo = 0;
+ t1 = odp_time_local();
+
+ /* Wait for timers. Make sure that scheduler context is not held when
+ * exiting the loop. */
+ do {
+ if (queue_type == ODP_QUEUE_TYPE_SCHED)
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ else
+ ev = odp_queue_deq(queue);
+
+ if (ev == ODP_EVENT_INVALID) {
+ t2 = odp_time_local();
+ if (odp_time_diff_ns(t2, t1) > (10 * duration_ns))
+ break;
+
+ continue;
+ }
+
+ CU_ASSERT(odp_event_type(ev) == event_type);
+
+ if (test_print) {
+ test_print = 0;
+ tmo = odp_timeout_from_event(ev);
+ odp_timeout_print(tmo);
+ printf("\n");
+ }
+
+ odp_event_free(ev);
+ num_tmo++;
+
+ } while (num_tmo < num || ev != ODP_EVENT_INVALID);
+
+ CU_ASSERT(num_tmo == num);
+ }
+
+ for (i = 0; i < num; i++)
+ CU_ASSERT(odp_timer_free(timer[i]) == 0);
+
+ odp_timer_pool_destroy(timer_pool);
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void timer_test_tmo_event_plain(void)
+{
+ timer_test_event_type(ODP_QUEUE_TYPE_PLAIN, ODP_EVENT_TIMEOUT, 1);
+}
+
+static void timer_test_tmo_event_sched(void)
+{
+ timer_test_event_type(ODP_QUEUE_TYPE_SCHED, ODP_EVENT_TIMEOUT, 1);
+}
+
+static void timer_test_buf_event_plain(void)
+{
+ timer_test_event_type(ODP_QUEUE_TYPE_PLAIN, ODP_EVENT_BUFFER, 1);
+}
+
+static void timer_test_buf_event_sched(void)
+{
+ timer_test_event_type(ODP_QUEUE_TYPE_SCHED, ODP_EVENT_BUFFER, 1);
+}
+
+static void timer_test_pkt_event_plain(void)
+{
+ timer_test_event_type(ODP_QUEUE_TYPE_PLAIN, ODP_EVENT_PACKET, 1);
+}
+
+static void timer_test_pkt_event_sched(void)
+{
+ timer_test_event_type(ODP_QUEUE_TYPE_SCHED, ODP_EVENT_PACKET, 1);
+}
+
+static void timer_test_tmo_event_reuse(void)
+{
+ timer_test_event_type(ODP_QUEUE_TYPE_SCHED, ODP_EVENT_TIMEOUT, 2);
+}
+
+static void timer_test_buf_event_reuse(void)
+{
+ timer_test_event_type(ODP_QUEUE_TYPE_SCHED, ODP_EVENT_BUFFER, 2);
+}
+
+static void timer_test_pkt_event_reuse(void)
+{
+ timer_test_event_type(ODP_QUEUE_TYPE_SCHED, ODP_EVENT_PACKET, 2);
+}
+
+static void timer_test_queue_type(odp_queue_type_t queue_type, int priv, int exp_relax)
+{
+ odp_pool_t pool;
+ const int num = 10;
+ odp_timeout_t tmo;
+ odp_event_t ev;
+ odp_queue_param_t queue_param;
+ odp_timer_pool_param_t tparam;
+ odp_timer_pool_t tp;
+ odp_timer_start_t start_param;
+ odp_queue_t queue;
+ odp_timer_t tim;
+ int i, ret, num_tmo;
+ uint64_t tick_base, tick, nsec_base, nsec;
+ uint64_t res_ns, period_ns, period_tick, test_period;
+ uint64_t diff_test;
+ odp_pool_param_t params;
+ odp_time_t t0, t1;
+ odp_timer_t timer[num];
+ uint64_t target_tick[num];
+ uint64_t target_nsec[num];
+ void *user_ptr[num];
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_TIMEOUT;
+ params.tmo.num = num;
+
+ pool = odp_pool_create("timeout_pool", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ res_ns = global_mem->param.res_ns;
+
+ odp_timer_pool_param_init(&tparam);
+ tparam.res_ns = global_mem->param.res_ns;
+ tparam.min_tmo = global_mem->param.min_tmo;
+ tparam.max_tmo = global_mem->param.max_tmo;
+ tparam.num_timers = num + 1;
+ tparam.priv = priv;
+ tparam.clk_src = clk_src;
+
+ if (exp_relax)
+ tparam.exp_mode = ODP_TIMER_EXP_RELAXED;
+
+ ODPH_DBG("\nTimer pool parameters:\n");
+ ODPH_DBG(" res_ns %" PRIu64 "\n", tparam.res_ns);
+ ODPH_DBG(" min_tmo %" PRIu64 "\n", tparam.min_tmo);
+ ODPH_DBG(" max_tmo %" PRIu64 "\n", tparam.max_tmo);
+
+ tp = odp_timer_pool_create("timer_pool", &tparam);
+ if (tp == ODP_TIMER_POOL_INVALID)
+ CU_FAIL_FATAL("Timer pool create failed");
+
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp, 1) == 1);
+
+ odp_queue_param_init(&queue_param);
+ if (queue_type == ODP_QUEUE_TYPE_SCHED) {
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ }
+
+ queue = odp_queue_create("timer_queue", &queue_param);
+ if (queue == ODP_QUEUE_INVALID)
+ CU_FAIL_FATAL("Queue create failed");
+
+ period_ns = 4 * tparam.min_tmo;
+ period_tick = odp_timer_ns_to_tick(tp, period_ns);
+ test_period = num * period_ns;
+
+ ODPH_DBG(" period_ns %" PRIu64 "\n", period_ns);
+ ODPH_DBG(" period_tick %" PRIu64 "\n\n", period_tick);
+
+ tick_base = odp_timer_current_tick(tp);
+ t0 = odp_time_local();
+ t1 = t0;
+ nsec_base = odp_time_to_ns(t0);
+
+ for (i = 0; i < num; i++) {
+ tmo = odp_timeout_alloc(pool);
+ CU_ASSERT_FATAL(tmo != ODP_TIMEOUT_INVALID);
+ ev = odp_timeout_to_event(tmo);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+
+ user_ptr[i] = (void *)(uintptr_t)i;
+ tim = odp_timer_alloc(tp, queue, user_ptr[i]);
+ CU_ASSERT_FATAL(tim != ODP_TIMER_INVALID);
+ timer[i] = tim;
+
+ tick = tick_base + ((i + 1) * period_tick);
+
+ start_param.tick_type = ODP_TIMER_TICK_ABS;
+ start_param.tick = tick;
+ start_param.tmo_ev = ev;
+
+ ret = odp_timer_start(tim, &start_param);
+ target_tick[i] = tick;
+ target_nsec[i] = nsec_base + ((i + 1) * period_ns);
+
+ ODPH_DBG("abs timer tick %" PRIu64 "\n", tick);
+ if (ret == ODP_TIMER_TOO_NEAR)
+ ODPH_DBG("Timer set failed. Too near %" PRIu64 ".\n", tick);
+ else if (ret == ODP_TIMER_TOO_FAR)
+ ODPH_DBG("Timer set failed. Too far %" PRIu64 ".\n", tick);
+ else if (ret == ODP_TIMER_FAIL)
+ ODPH_DBG("Timer set failed %" PRIu64 "\n", tick);
+
+ CU_ASSERT(ret == ODP_TIMER_SUCCESS);
+ }
+
+ num_tmo = 0;
+
+ do {
+ if (queue_type == ODP_QUEUE_TYPE_SCHED)
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ else
+ ev = odp_queue_deq(queue);
+
+ t1 = odp_time_local();
+ nsec = odp_time_to_ns(t1);
+ diff_test = nsec - nsec_base;
+
+ if (ev != ODP_EVENT_INVALID) {
+ uint64_t target;
+
+ tmo = odp_timeout_from_event(ev);
+ tim = odp_timeout_timer(tmo);
+ tick = odp_timeout_tick(tmo);
+ target = target_nsec[num_tmo];
+
+ CU_ASSERT(timer[num_tmo] == tim);
+ CU_ASSERT(target_tick[num_tmo] == tick);
+ CU_ASSERT(user_ptr[num_tmo] == odp_timeout_user_ptr(tmo));
+
+ CU_ASSERT(nsec < (target + (5 * res_ns)));
+
+ if (exp_relax) {
+ CU_ASSERT(nsec > (target - (5 * res_ns)));
+ } else {
+ /* Timeout must not arrive before the current time has passed
+ * the target time (in timer ticks). */
+ CU_ASSERT(target_tick[num_tmo] <= odp_timer_current_tick(tp));
+
+ /* Timeout should not arrive before the target wall clock time.
+ * However, allow small drift or error between wall clock and
+ * timer. */
+ CU_ASSERT(nsec > (target - res_ns));
+ }
+
+ ODPH_DBG("timeout tick %" PRIu64 ", nsec %" PRIu64 ", "
+ "target nsec %" PRIu64 ", diff nsec %" PRIi64 "\n",
+ tick, nsec, target, (int64_t)(nsec - target));
+
+ odp_timeout_free(tmo);
+ CU_ASSERT(odp_timer_free(tim) == 0);
+
+ num_tmo++;
+ }
+
+ } while (diff_test < (2 * test_period) && num_tmo < num);
+
+ ODPH_DBG("test period %" PRIu64 "\n", diff_test);
+
+ CU_ASSERT(num_tmo == num);
+ CU_ASSERT(diff_test > (test_period - period_ns));
+ CU_ASSERT(diff_test < (test_period + period_ns));
+
+ /* Reset scheduler context for the next test case */
+ if (queue_type == ODP_QUEUE_TYPE_SCHED) {
+ odp_schedule_pause();
+ while (1) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ CU_FAIL("Drop extra event\n");
+ odp_event_free(ev);
+ }
+ odp_schedule_resume();
+ }
+
+ odp_timer_pool_destroy(tp);
+
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void timer_test_plain_queue(void)
+{
+ timer_test_queue_type(ODP_QUEUE_TYPE_PLAIN, 0, 0);
+}
+
+static void timer_test_sched_queue(void)
+{
+ timer_test_queue_type(ODP_QUEUE_TYPE_SCHED, 0, 0);
+}
+
+static void timer_test_plain_queue_priv(void)
+{
+ timer_test_queue_type(ODP_QUEUE_TYPE_PLAIN, PRIV, 0);
+}
+
+static void timer_test_sched_queue_priv(void)
+{
+ timer_test_queue_type(ODP_QUEUE_TYPE_SCHED, PRIV, 0);
+}
+
+static void timer_test_plain_queue_exp_relax(void)
+{
+ timer_test_queue_type(ODP_QUEUE_TYPE_PLAIN, 0, EXP_RELAX);
+}
+
+static void timer_test_sched_queue_exp_relax(void)
+{
+ timer_test_queue_type(ODP_QUEUE_TYPE_SCHED, 0, EXP_RELAX);
+}
+
+static void timer_test_cancel(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t params;
+ odp_timer_pool_param_t tparam;
+ odp_queue_param_t queue_param;
+ odp_timer_capability_t capa;
+ odp_timer_pool_t tp;
+ odp_timer_start_t start_param;
+ odp_queue_t queue;
+ odp_timer_t tim;
+ odp_event_t ev;
+ odp_timeout_t tmo;
+ odp_timer_retval_t rc;
+ int ret;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ memset(&capa, 0, sizeof(capa));
+ ret = odp_timer_capability(clk_src, &capa);
+ CU_ASSERT_FATAL(ret == 0);
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_TIMEOUT;
+ params.tmo.num = 1;
+
+ pool = odp_pool_create("tmo_pool_for_cancel", &params);
+
+ if (pool == ODP_POOL_INVALID)
+ CU_FAIL_FATAL("Timeout pool create failed");
+
+ odp_timer_pool_param_init(&tparam);
+ tparam.res_ns = global_mem->param.res_ns;
+ tparam.min_tmo = global_mem->param.min_tmo;
+ tparam.max_tmo = global_mem->param.max_tmo;
+ tparam.num_timers = 1;
+ tparam.priv = 0;
+ tparam.clk_src = clk_src;
+ tp = odp_timer_pool_create(NULL, &tparam);
+ if (tp == ODP_TIMER_POOL_INVALID)
+ CU_FAIL_FATAL("Timer pool create failed");
+
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp, 1) == 1);
+
+ odp_queue_param_init(&queue_param);
+ if (capa.queue_type_plain) {
+ queue_param.type = ODP_QUEUE_TYPE_PLAIN;
+ } else if (capa.queue_type_sched) {
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ }
+
+ queue = odp_queue_create("timer_queue", &queue_param);
+ if (queue == ODP_QUEUE_INVALID)
+ CU_FAIL_FATAL("Queue create failed");
+
+ tim = odp_timer_alloc(tp, queue, USER_PTR);
+ if (tim == ODP_TIMER_INVALID)
+ CU_FAIL_FATAL("Failed to allocate timer");
+ ODPH_DBG("Timer handle: %" PRIu64 "\n", odp_timer_to_u64(tim));
+
+ ev = odp_timeout_to_event(odp_timeout_alloc(pool));
+ if (ev == ODP_EVENT_INVALID)
+ CU_FAIL_FATAL("Failed to allocate timeout");
+
+ start_param.tick_type = ODP_TIMER_TICK_REL;
+ start_param.tick = odp_timer_ns_to_tick(tp, tparam.max_tmo / 2);
+ start_param.tmo_ev = ev;
+
+ rc = odp_timer_start(tim, &start_param);
+ if (rc != ODP_TIMER_SUCCESS)
+ CU_FAIL_FATAL("Failed to set timer (relative time)");
+
+ ev = ODP_EVENT_INVALID;
+ if (odp_timer_cancel(tim, &ev) != ODP_TIMER_SUCCESS)
+ CU_FAIL_FATAL("Failed to cancel timer (relative time)");
+
+ if (ev == ODP_EVENT_INVALID)
+ CU_FAIL_FATAL("Cancel did not return event");
+
+ tmo = odp_timeout_from_event(ev);
+ if (tmo == ODP_TIMEOUT_INVALID)
+ CU_FAIL_FATAL("Cancel did not return timeout");
+ ODPH_DBG("Timeout handle: %" PRIu64 "\n", odp_timeout_to_u64(tmo));
+
+ if (odp_timeout_timer(tmo) != tim)
+ CU_FAIL("Cancel invalid tmo.timer");
+
+ if (odp_timeout_user_ptr(tmo) != USER_PTR)
+ CU_FAIL("Cancel invalid tmo.user_ptr");
+
+ odp_timeout_free(tmo);
+
+ CU_ASSERT_FATAL(odp_timer_free(tim) == 0);
+
+ odp_timer_pool_destroy(tp);
+
+ if (odp_queue_destroy(queue) != 0)
+ CU_FAIL_FATAL("Failed to destroy queue");
+
+ if (odp_pool_destroy(pool) != 0)
+ CU_FAIL_FATAL("Failed to destroy pool");
+}
+
+static void timer_test_tmo_limit(odp_queue_type_t queue_type,
+ int max_res, int min)
+{
+ odp_timer_capability_t timer_capa;
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ odp_queue_param_t queue_param;
+ odp_timer_pool_param_t timer_param;
+ odp_timer_pool_t timer_pool;
+ odp_timer_start_t start_param;
+ odp_queue_t queue;
+ odp_timeout_t tmo;
+ odp_event_t ev;
+ odp_time_t t1, t2;
+ uint64_t res_ns, min_tmo, max_tmo;
+ uint64_t tmo_ns, tmo_tick, diff_ns, max_wait;
+ int i, ret, num_tmo;
+ int num = 5;
+ odp_timer_t timer[num];
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ memset(&timer_capa, 0, sizeof(timer_capa));
+ ret = odp_timer_capability(clk_src, &timer_capa);
+ CU_ASSERT_FATAL(ret == 0);
+
+ if (max_res) {
+ /* Maximum resolution parameters */
+ res_ns = timer_capa.max_res.res_ns;
+ min_tmo = timer_capa.max_res.min_tmo;
+ max_tmo = timer_capa.max_res.max_tmo;
+ } else {
+ /* Maximum timeout parameters */
+ res_ns = timer_capa.max_tmo.res_ns;
+ min_tmo = timer_capa.max_tmo.min_tmo;
+ max_tmo = timer_capa.max_tmo.max_tmo;
+ }
+
+ odp_timer_pool_param_init(&timer_param);
+ timer_param.res_ns = res_ns;
+ timer_param.min_tmo = min_tmo;
+ timer_param.max_tmo = max_tmo;
+ timer_param.num_timers = num;
+ timer_param.clk_src = clk_src;
+
+ timer_pool = odp_timer_pool_create("timer_pool", &timer_param);
+ if (timer_pool == ODP_TIMER_POOL_INVALID)
+ CU_FAIL_FATAL("Timer pool create failed");
+
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&timer_pool, 1) == 1);
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_TIMEOUT;
+ pool_param.tmo.num = num;
+
+ pool = odp_pool_create("timeout_pool", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_queue_param_init(&queue_param);
+ if (queue_type == ODP_QUEUE_TYPE_SCHED) {
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ }
+
+ queue = odp_queue_create("timeout_queue", &queue_param);
+ if (queue == ODP_QUEUE_INVALID)
+ CU_FAIL_FATAL("Queue create failed");
+
+ if (min)
+ tmo_ns = min_tmo;
+ else
+ tmo_ns = max_tmo;
+
+ tmo_tick = odp_timer_ns_to_tick(timer_pool, tmo_ns);
+ /* Min_tmo maybe zero. Wait min timeouts at least 20ms + resolution */
+ max_wait = (20 * ODP_TIME_MSEC_IN_NS + res_ns + 10 * tmo_ns);
+
+ ODPH_DBG("\nTimer pool parameters:\n");
+ ODPH_DBG(" res_ns %" PRIu64 "\n", timer_param.res_ns);
+ ODPH_DBG(" min_tmo %" PRIu64 "\n", timer_param.min_tmo);
+ ODPH_DBG(" max_tmo %" PRIu64 "\n", timer_param.max_tmo);
+ ODPH_DBG(" tmo_ns %" PRIu64 "\n", tmo_ns);
+ ODPH_DBG(" tmo_tick %" PRIu64 "\n\n", tmo_tick);
+
+ if (min) {
+ /*
+ * Prevent the test from taking too long by asserting that the
+ * timeout is reasonably short.
+ */
+ CU_ASSERT_FATAL(tmo_ns < 5 * ODP_TIME_SEC_IN_NS);
+ }
+
+ for (i = 0; i < num; i++) {
+ timer[i] = odp_timer_alloc(timer_pool, queue, NULL);
+ CU_ASSERT_FATAL(timer[i] != ODP_TIMER_INVALID);
+ }
+
+ num_tmo = 0;
+
+ for (i = 0; i < num; i++) {
+ tmo = odp_timeout_alloc(pool);
+ ev = odp_timeout_to_event(tmo);
+
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+
+ t1 = odp_time_local();
+
+ start_param.tick_type = ODP_TIMER_TICK_REL;
+ start_param.tick = tmo_tick;
+ start_param.tmo_ev = ev;
+
+ ret = odp_timer_start(timer[i], &start_param);
+
+ if (ret == ODP_TIMER_TOO_NEAR)
+ ODPH_DBG("Timer set failed. Too near %i.\n", i);
+ else if (ret == ODP_TIMER_TOO_FAR)
+ ODPH_DBG("Timer set failed. Too late %i.\n", i);
+ else if (ret == ODP_TIMER_FAIL)
+ ODPH_DBG("Timer set failed %i\n", i);
+
+ CU_ASSERT(ret == ODP_TIMER_SUCCESS);
+
+ if (min) {
+ /* Min timeout - wait for events */
+ int break_loop = 0;
+
+ while (1) {
+ if (queue_type == ODP_QUEUE_TYPE_SCHED)
+ ev = odp_schedule(NULL,
+ ODP_SCHED_NO_WAIT);
+ else
+ ev = odp_queue_deq(queue);
+
+ t2 = odp_time_local();
+ diff_ns = odp_time_diff_ns(t2, t1);
+
+ if (ev != ODP_EVENT_INVALID) {
+ odp_event_free(ev);
+ num_tmo++;
+ break_loop = 1;
+ ODPH_DBG("Timeout [%i]: %" PRIu64 " "
+ "nsec\n", i, diff_ns);
+ continue;
+ }
+
+ /* Ensure that schedule context is free */
+ if (break_loop)
+ break;
+
+ /* Give up after waiting max wait time */
+ if (diff_ns > max_wait)
+ break;
+ }
+ } else {
+ /* Max timeout - cancel events */
+ ev = ODP_EVENT_INVALID;
+
+ ret = odp_timer_cancel(timer[i], &ev);
+ t2 = odp_time_local();
+ diff_ns = odp_time_diff_ns(t2, t1);
+
+ CU_ASSERT(ret == ODP_TIMER_SUCCESS);
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+
+ ODPH_DBG("Cancelled [%i]: %" PRIu64 " nsec\n", i,
+ diff_ns);
+ }
+ }
+
+ if (min)
+ CU_ASSERT(num_tmo == num);
+
+ for (i = 0; i < num; i++)
+ CU_ASSERT(odp_timer_free(timer[i]) == 0);
+
+ odp_timer_pool_destroy(timer_pool);
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void timer_test_max_res_min_tmo_plain(void)
+{
+ timer_test_tmo_limit(ODP_QUEUE_TYPE_PLAIN, 1, 1);
+}
+
+static void timer_test_max_res_min_tmo_sched(void)
+{
+ timer_test_tmo_limit(ODP_QUEUE_TYPE_SCHED, 1, 1);
+}
+
+static void timer_test_max_res_max_tmo_plain(void)
+{
+ timer_test_tmo_limit(ODP_QUEUE_TYPE_PLAIN, 1, 0);
+}
+
+static void timer_test_max_res_max_tmo_sched(void)
+{
+ timer_test_tmo_limit(ODP_QUEUE_TYPE_SCHED, 1, 0);
+}
+
+static void timer_test_max_tmo_min_tmo_plain(void)
+{
+ timer_test_tmo_limit(ODP_QUEUE_TYPE_PLAIN, 0, 1);
+}
+
+static void timer_test_max_tmo_min_tmo_sched(void)
+{
+ timer_test_tmo_limit(ODP_QUEUE_TYPE_SCHED, 0, 1);
+}
+
+static void timer_test_max_tmo_max_tmo_plain(void)
+{
+ timer_test_tmo_limit(ODP_QUEUE_TYPE_PLAIN, 0, 0);
+}
+
+static void timer_test_max_tmo_max_tmo_sched(void)
+{
+ timer_test_tmo_limit(ODP_QUEUE_TYPE_SCHED, 0, 0);
+}
+
+/* Handle a received (timeout) event */
+static void handle_tmo(odp_event_t ev, bool stale, uint64_t prev_tick)
+{
+ odp_event_subtype_t subtype;
+ odp_timeout_t tmo;
+ odp_timer_t tim;
+ uint64_t tick;
+ struct test_timer *ttp;
+
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); /* Internal error */
+ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) {
+ /* Not a timeout event */
+ CU_FAIL("Unexpected event type received");
+ return;
+ }
+ if (odp_event_subtype(ev) != ODP_EVENT_NO_SUBTYPE) {
+ /* Not a timeout event */
+ CU_FAIL("Unexpected event subtype received");
+ return;
+ }
+ if (odp_event_types(ev, &subtype) != ODP_EVENT_TIMEOUT) {
+ /* Not a timeout event */
+ CU_FAIL("Unexpected event type received");
+ return;
+ }
+ if (subtype != ODP_EVENT_NO_SUBTYPE) {
+ /* Not a timeout event */
+ CU_FAIL("Unexpected event subtype received");
+ return;
+ }
+
+ /* Read the metadata from the timeout */
+ tmo = odp_timeout_from_event(ev);
+ tim = odp_timeout_timer(tmo);
+ tick = odp_timeout_tick(tmo);
+ ttp = odp_timeout_user_ptr(tmo);
+
+ if (tim == ODP_TIMER_INVALID)
+ CU_FAIL("odp_timeout_timer() invalid timer");
+
+ if (ttp == NULL) {
+ CU_FAIL("odp_timeout_user_ptr() null user ptr");
+ return;
+ }
+
+ if (ttp->ev2 != ev)
+ CU_FAIL("odp_timeout_user_ptr() wrong user ptr");
+
+ if (ttp->tim != tim)
+ CU_FAIL("odp_timeout_timer() wrong timer");
+
+ if (!stale) {
+#if ODP_DEPRECATED_API
+ if (!odp_timeout_fresh(tmo))
+ CU_FAIL("Wrong status (stale) for fresh timeout");
+#endif
+ /* tmo tick cannot be smaller than pre-calculated tick */
+ if (tick < ttp->tick) {
+ ODPH_DBG("Too small tick: pre-calculated %" PRIu64 " "
+ "timeout %" PRIu64 "\n", ttp->tick, tick);
+ CU_FAIL("odp_timeout_tick() too small tick");
+ }
+
+ if (tick > odp_timer_current_tick(global_mem->tp))
+ CU_FAIL("Timeout delivered early in ODP_TIMER_EXP_AFTER mode");
+
+ if (tick < prev_tick) {
+ ODPH_DBG("Too late tick: %" PRIu64 " prev_tick "
+ "%" PRIu64 "\n", tick, prev_tick);
+ /* We don't report late timeouts using CU_FAIL */
+ odp_atomic_inc_u32(&global_mem->ndelivtoolate);
+ }
+ }
+
+ /* Internal error */
+ CU_ASSERT_FATAL(ttp->ev == ODP_EVENT_INVALID);
+ ttp->ev = ev;
+}
+
+/* Worker thread entrypoint which performs timer alloc/set/cancel/free
+ * tests */
+static int worker_entrypoint(void *arg ODP_UNUSED)
+{
+ int thr = odp_thread_id();
+ uint32_t i, allocated;
+ unsigned seed = thr;
+ odp_queue_t queue;
+ struct test_timer *tt;
+ uint32_t nset;
+ uint64_t tck;
+ uint32_t nrcv;
+ uint32_t nreset;
+ uint32_t ncancel;
+ uint32_t ntoolate;
+ uint32_t ms;
+ uint64_t prev_tick, late_margin, nsec;
+ odp_event_t ev;
+ struct timespec ts;
+ uint32_t nstale;
+ odp_timer_retval_t rc;
+ odp_timer_start_t start_param;
+ odp_timer_pool_t tp = global_mem->tp;
+ odp_pool_t tbp = global_mem->tbp;
+ uint32_t num_timers = global_mem->timers_per_thread;
+ uint64_t min_tmo = global_mem->param.min_tmo;
+ odp_queue_param_t queue_param;
+ odp_thrmask_t thr_mask;
+ odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
+ uint64_t sched_tmo;
+ uint64_t res_ns = global_mem->param.res_ns;
+ odp_queue_type_t queue_type = global_mem->test_queue_type;
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = queue_type;
+
+ if (queue_type == ODP_QUEUE_TYPE_SCHED) {
+ odp_thrmask_zero(&thr_mask);
+ odp_thrmask_set(&thr_mask, odp_thread_id());
+ group = odp_schedule_group_create(NULL, &thr_mask);
+ if (group == ODP_SCHED_GROUP_INVALID)
+ CU_FAIL_FATAL("Schedule group create failed");
+
+ queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ queue_param.sched.group = group;
+ }
+
+ queue = odp_queue_create("timer_queue", &queue_param);
+ if (queue == ODP_QUEUE_INVALID)
+ CU_FAIL_FATAL("Queue create failed");
+
+ tt = malloc(sizeof(struct test_timer) * num_timers);
+ if (!tt)
+ CU_FAIL_FATAL("malloc failed");
+
+ /* Prepare all timers */
+ for (i = 0; i < num_timers; i++) {
+ tt[i].ev = odp_timeout_to_event(odp_timeout_alloc(tbp));
+ if (tt[i].ev == ODP_EVENT_INVALID) {
+ ODPH_DBG("Failed to allocate timeout ("
+ "%" PRIu32 "/%d)\n", i, num_timers);
+ break;
+ }
+ tt[i].tim = odp_timer_alloc(tp, queue, &tt[i]);
+ if (tt[i].tim == ODP_TIMER_INVALID) {
+ ODPH_DBG("Failed to allocate timer (%" PRIu32 "/%d)\n",
+ i, num_timers);
+ odp_event_free(tt[i].ev);
+ break;
+ }
+ tt[i].ev2 = tt[i].ev;
+ tt[i].tick = TICK_INVALID;
+ }
+ allocated = i;
+ if (allocated == 0)
+ CU_FAIL_FATAL("unable to alloc a timer");
+ odp_atomic_fetch_add_u32(&global_mem->timers_allocated, allocated);
+
+ odp_barrier_wait(&global_mem->test_barrier);
+
+ /* Initial set all timers with a random expiration time */
+ nset = 0;
+ for (i = 0; i < allocated; i++) {
+ nsec = min_tmo + THREE_POINT_THREE_MSEC +
+ (rand_r(&seed) % RANGE_MS) * 1000000ULL;
+ tck = odp_timer_current_tick(tp) +
+ odp_timer_ns_to_tick(tp, nsec);
+
+ start_param.tick_type = ODP_TIMER_TICK_ABS;
+ start_param.tick = tck;
+ start_param.tmo_ev = tt[i].ev;
+ tt[i].ev = ODP_EVENT_INVALID;
+
+ rc = odp_timer_start(tt[i].tim, &start_param);
+ if (rc == ODP_TIMER_TOO_NEAR) {
+ ODPH_ERR("Missed tick, setting timer\n");
+ } else if (rc != ODP_TIMER_SUCCESS) {
+ ODPH_ERR("Failed to set timer: %d\n", rc);
+ CU_FAIL("Failed to set timer");
+ } else {
+ tt[i].tick = tck;
+ nset++;
+ }
+ }
+
+ /* Step through wall time, 1ms at a time and check for expired timers */
+ nrcv = 0;
+ nreset = 0;
+ ncancel = 0;
+ ntoolate = 0;
+ late_margin = odp_timer_ns_to_tick(tp, 2 * res_ns);
+ prev_tick = odp_timer_current_tick(tp);
+
+ for (ms = 0; ms < 7 * RANGE_MS / 10 && allocated > 0; ms++) {
+ while ((ev = queue_type == ODP_QUEUE_TYPE_PLAIN ?
+ odp_queue_deq(queue) :
+ odp_schedule(NULL, ODP_SCHED_NO_WAIT))
+ != ODP_EVENT_INVALID) {
+ /* Allow timeouts to be delivered late_margin ticks late */
+ handle_tmo(ev, false, prev_tick - late_margin);
+ nrcv++;
+ }
+ prev_tick = odp_timer_current_tick(tp);
+ i = rand_r(&seed) % allocated;
+ if (tt[i].ev == ODP_EVENT_INVALID &&
+ (rand_r(&seed) % 2 == 0)) {
+ if (odp_timer_current_tick(tp) >= tt[i].tick)
+ /* Timer just expired. */
+ goto sleep;
+ /* Timer active, cancel it */
+ rc = odp_timer_cancel(tt[i].tim, &tt[i].ev);
+
+ if (rc == ODP_TIMER_SUCCESS) {
+ tt[i].tick = TICK_INVALID;
+ ncancel++;
+ } else if (rc == ODP_TIMER_TOO_NEAR) {
+ /* Cancel failed, timer already expired */
+ ntoolate++;
+ ODPH_DBG("Failed to cancel timer, already expired\n");
+ } else {
+ CU_FAIL_FATAL("Cancel failed");
+ }
+ } else {
+ uint64_t cur_tick;
+ int reset_timer = 0;
+
+ if (tt[i].ev != ODP_EVENT_INVALID) {
+ /* Timer inactive => set */
+ nset++;
+ } else if (odp_timer_current_tick(tp) >= tt[i].tick) {
+ /* Timer just expired. */
+ goto sleep;
+ } else {
+ /* Timer active => reset */
+ nreset++;
+ reset_timer = 1;
+ }
+
+ nsec = min_tmo + THREE_POINT_THREE_MSEC +
+ (rand_r(&seed) % RANGE_MS) * 1000000ULL;
+ tck = odp_timer_ns_to_tick(tp, nsec);
+
+ cur_tick = odp_timer_current_tick(tp);
+
+ start_param.tick_type = ODP_TIMER_TICK_REL;
+ start_param.tick = tck;
+ start_param.tmo_ev = tt[i].ev;
+
+ if (reset_timer)
+ rc = odp_timer_restart(tt[i].tim, &start_param);
+ else
+ rc = odp_timer_start(tt[i].tim, &start_param);
+
+ if (rc == ODP_TIMER_TOO_NEAR) {
+ CU_FAIL("Failed to set timer: TOO NEAR");
+ } else if (rc == ODP_TIMER_TOO_FAR) {
+ CU_FAIL("Failed to set timer: TOO FAR");
+ } else if (rc == ODP_TIMER_FAIL) {
+ /* Set/reset failed, timer already expired */
+ ntoolate++;
+ } else if (rc == ODP_TIMER_SUCCESS) {
+ /* Save expected expiration tick on success */
+ tt[i].tick = cur_tick + tck;
+ /* ODP timer owns the event now */
+ tt[i].ev = ODP_EVENT_INVALID;
+ } else {
+ CU_FAIL("Failed to set timer: bad return code");
+ }
+ }
+sleep:
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1000000; /* 1ms */
+ if (nanosleep(&ts, NULL) < 0)
+ CU_FAIL_FATAL("nanosleep failed");
+ }
+
+ /* Try to cancel all active timers */
+ nstale = 0;
+ for (i = 0; i < allocated; i++) {
+ if (tt[i].ev != ODP_EVENT_INVALID)
+ continue;
+
+ rc = odp_timer_cancel(tt[i].tim, &tt[i].ev);
+ tt[i].tick = TICK_INVALID;
+
+ if (rc == ODP_TIMER_TOO_NEAR) {
+ /* Cancel too late, timer already expired and timeout enqueued */
+ nstale++;
+ } else if (rc != ODP_TIMER_SUCCESS) {
+ CU_FAIL("Timer cancel failed");
+ }
+ }
+
+ ODPH_DBG("Thread %u: %" PRIu32 " timers set\n", thr, nset);
+ ODPH_DBG("Thread %u: %" PRIu32 " timers reset\n", thr, nreset);
+ ODPH_DBG("Thread %u: %" PRIu32 " timers cancelled\n", thr, ncancel);
+ ODPH_DBG("Thread %u: %" PRIu32 " timers reset/cancelled too late\n",
+ thr, ntoolate);
+ ODPH_DBG("Thread %u: %" PRIu32 " timeouts received\n", thr, nrcv);
+ ODPH_DBG("Thread %u: %" PRIu32 " "
+ "stale timeout(s) after odp_timer_cancel()\n", thr, nstale);
+
+ /* Delay some more to ensure timeouts for expired timers can be
+ * received. Can not use busy loop here to make background timer
+ * thread finish their work. */
+ ts.tv_sec = 0;
+ ts.tv_nsec = (3 * RANGE_MS / 10 + 50) * ODP_TIME_MSEC_IN_NS;
+ if (nanosleep(&ts, NULL) < 0)
+ CU_FAIL_FATAL("nanosleep failed");
+
+ sched_tmo = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS * RANGE_MS);
+ while (nstale != 0) {
+ if (queue_type == ODP_QUEUE_TYPE_PLAIN)
+ ev = odp_queue_deq(queue);
+ else
+ ev = odp_schedule(NULL, sched_tmo);
+ if (ev != ODP_EVENT_INVALID) {
+ handle_tmo(ev, true, 0/*Don't care for stale tmo's*/);
+ nstale--;
+ } else {
+ CU_FAIL("Failed to receive stale timeout");
+ break;
+ }
+ }
+
+ for (i = 0; i < allocated; i++) {
+ if (odp_timer_free(tt[i].tim))
+ CU_FAIL("odp_timer_free");
+ }
+
+ /* Check if there any more (unexpected) events */
+ if (queue_type == ODP_QUEUE_TYPE_PLAIN)
+ ev = odp_queue_deq(queue);
+ else
+ ev = odp_schedule(NULL, sched_tmo);
+ if (ev != ODP_EVENT_INVALID)
+ CU_FAIL("Unexpected event received");
+
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+ for (i = 0; i < allocated; i++) {
+ if (tt[i].ev != ODP_EVENT_INVALID)
+ odp_event_free(tt[i].ev);
+ }
+
+ if (queue_type == ODP_QUEUE_TYPE_SCHED)
+ CU_ASSERT(odp_schedule_group_destroy(group) == 0);
+
+ free(tt);
+ ODPH_DBG("Thread %u: exiting\n", thr);
+ return CU_get_number_of_failures();
+}
+
+static void timer_test_all(odp_queue_type_t queue_type)
+{
+ int rc;
+ odp_pool_param_t params;
+ odp_timer_pool_param_t tparam;
+ odp_timer_pool_info_t tpinfo;
+ uint64_t ns, tick, ns2;
+ uint64_t res_ns, min_tmo, max_tmo;
+ uint32_t timers_allocated;
+ odp_pool_capability_t pool_capa;
+ odp_timer_capability_t timer_capa;
+ odp_schedule_capability_t sched_capa;
+ odp_pool_t tbp;
+ odp_timer_pool_t tp;
+ uint32_t num_timers;
+ uint32_t num_workers;
+ int timers_per_thread;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ CU_ASSERT_FATAL(odp_schedule_capability(&sched_capa) == 0);
+ /* Reserve at least one core for running other processes so the timer
+ * test hopefully can run undisturbed and thus get better timing
+ * results. */
+ num_workers = odp_cpumask_default_worker(NULL, 0);
+
+ /* force to max CPU count */
+ if (num_workers > MAX_WORKERS)
+ num_workers = MAX_WORKERS;
+
+ if (queue_type == ODP_QUEUE_TYPE_SCHED &&
+ num_workers > sched_capa.max_groups)
+ num_workers = sched_capa.max_groups;
+
+ /* On a single-CPU machine run at least one thread */
+ if (num_workers < 1)
+ num_workers = 1;
+
+ num_timers = num_workers * NTIMERS;
+ CU_ASSERT_FATAL(!odp_timer_capability(clk_src, &timer_capa));
+ if (timer_capa.max_timers && timer_capa.max_timers < num_timers)
+ num_timers = timer_capa.max_timers;
+
+ CU_ASSERT_FATAL(!odp_pool_capability(&pool_capa));
+ if (pool_capa.tmo.max_num && num_timers > pool_capa.tmo.max_num)
+ num_timers = pool_capa.tmo.max_num;
+
+ /* Create timeout pools */
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_TIMEOUT;
+ params.tmo.num = num_timers;
+
+ timers_per_thread = (num_timers / num_workers) - EXTRA_TIMERS;
+ global_mem->timers_per_thread = timers_per_thread > 1 ?
+ timers_per_thread : 1;
+
+ global_mem->tbp = odp_pool_create("tmo_pool", &params);
+ if (global_mem->tbp == ODP_POOL_INVALID)
+ CU_FAIL_FATAL("Timeout pool create failed");
+ tbp = global_mem->tbp;
+
+ /* Create a timer pool */
+ res_ns = global_mem->param.res_ns;
+ max_tmo = global_mem->param.max_tmo;
+ min_tmo = global_mem->param.min_tmo;
+
+ odp_timer_pool_param_init(&tparam);
+ tparam.res_ns = res_ns;
+ tparam.min_tmo = min_tmo;
+ tparam.max_tmo = max_tmo;
+ tparam.num_timers = num_timers;
+ tparam.priv = 0;
+ tparam.clk_src = clk_src;
+ global_mem->tp = odp_timer_pool_create(NAME, &tparam);
+ if (global_mem->tp == ODP_TIMER_POOL_INVALID)
+ CU_FAIL_FATAL("Timer pool create failed");
+ tp = global_mem->tp;
+
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp, 1) == 1);
+
+ if (odp_timer_pool_info(tp, &tpinfo) != 0)
+ CU_FAIL("odp_timer_pool_info");
+ CU_ASSERT(strcmp(tpinfo.name, NAME) == 0);
+ CU_ASSERT(tpinfo.param.res_ns == res_ns);
+ CU_ASSERT(tpinfo.param.min_tmo == min_tmo);
+ CU_ASSERT(tpinfo.param.max_tmo == max_tmo);
+ CU_ASSERT(strcmp(tpinfo.name, NAME) == 0);
+
+ ODPH_DBG("Timer pool handle: %" PRIu64 "\n", odp_timer_pool_to_u64(tp));
+ ODPH_DBG("Resolution: %" PRIu64 "\n", tparam.res_ns);
+ ODPH_DBG("Min timeout: %" PRIu64 "\n", tparam.min_tmo);
+ ODPH_DBG("Max timeout: %" PRIu64 "\n", tparam.max_tmo);
+ ODPH_DBG("Num timers: %u\n", tparam.num_timers);
+ ODPH_DBG("Tmo range: %u ms (%" PRIu64 " ticks)\n", RANGE_MS,
+ odp_timer_ns_to_tick(tp, 1000000ULL * RANGE_MS));
+ ODPH_DBG("Max timers: %" PRIu32 "\n", timer_capa.max_timers);
+ ODPH_DBG("Max timer pools: %" PRIu32 "\n", timer_capa.max_pools);
+ ODPH_DBG("Max timer pools combined: %" PRIu32 "\n",
+ timer_capa.max_pools_combined);
+
+ tick = odp_timer_ns_to_tick(tp, 0);
+ CU_ASSERT(tick == 0);
+ ns2 = odp_timer_tick_to_ns(tp, tick);
+ CU_ASSERT(ns2 == 0);
+
+ for (ns = res_ns; ns < max_tmo; ns += res_ns) {
+ tick = odp_timer_ns_to_tick(tp, ns);
+ ns2 = odp_timer_tick_to_ns(tp, tick);
+
+ if (ns2 < ns - res_ns) {
+ ODPH_DBG("FAIL ns:%" PRIu64 " tick:%" PRIu64 " ns2:"
+ "%" PRIu64 "\n", ns, tick, ns2);
+ CU_FAIL("tick conversion: nsec too small\n");
+ }
+
+ if (ns2 > ns + res_ns) {
+ ODPH_DBG("FAIL ns:%" PRIu64 " tick:%" PRIu64 " ns2:"
+ "%" PRIu64 "\n", ns, tick, ns2);
+ CU_FAIL("tick conversion: nsec too large\n");
+ }
+ }
+
+ /* Initialize barrier used by worker threads for synchronization */
+ odp_barrier_init(&global_mem->test_barrier, num_workers);
+
+ /* Initialize the shared timeout counter */
+ odp_atomic_init_u32(&global_mem->ndelivtoolate, 0);
+
+ /* Initialize the number of finally allocated elements */
+ odp_atomic_init_u32(&global_mem->timers_allocated, 0);
+
+ /* Create and start worker threads */
+ global_mem->test_queue_type = queue_type;
+ odp_cunit_thread_create(num_workers, worker_entrypoint, NULL, 0, 0);
+
+ /* Wait for worker threads to exit */
+ odp_cunit_thread_join(num_workers);
+ ODPH_DBG("Number of timeouts delivered/received too late: "
+ "%" PRIu32 "\n",
+ odp_atomic_load_u32(&global_mem->ndelivtoolate));
+
+ /* Check some statistics after the test */
+ if (odp_timer_pool_info(tp, &tpinfo) != 0)
+ CU_FAIL("odp_timer_pool_info");
+ CU_ASSERT(tpinfo.param.num_timers == num_timers);
+ CU_ASSERT(tpinfo.cur_timers == 0);
+ timers_allocated = odp_atomic_load_u32(&global_mem->timers_allocated);
+ CU_ASSERT(tpinfo.hwm_timers == timers_allocated);
+
+ /* Destroy timer pool, all timers must have been freed */
+ odp_timer_pool_destroy(tp);
+
+ /* Destroy timeout pool, all timeouts must have been freed */
+ rc = odp_pool_destroy(tbp);
+ CU_ASSERT(rc == 0);
+}
+
+static void timer_test_plain_all(void)
+{
+ timer_test_all(ODP_QUEUE_TYPE_PLAIN);
+}
+
+static void timer_test_sched_all(void)
+{
+ timer_test_all(ODP_QUEUE_TYPE_SCHED);
+}
+
+static void timer_test_periodic_capa(void)
+{
+ odp_timer_capability_t timer_capa;
+ odp_timer_periodic_capability_t capa;
+ odp_fract_u64_t min_fract, max_fract, base_freq;
+ uint64_t freq_range, freq_step, first_hz, res_ns, max_multiplier;
+ double freq, min_freq, max_freq;
+ int ret;
+ uint32_t i, j;
+ uint32_t num = 100;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ memset(&timer_capa, 0, sizeof(odp_timer_capability_t));
+ CU_ASSERT_FATAL(odp_timer_capability(clk_src, &timer_capa) == 0);
+ CU_ASSERT(timer_capa.periodic.max_pools);
+ CU_ASSERT(timer_capa.periodic.max_timers);
+
+ min_fract = timer_capa.periodic.min_base_freq_hz;
+ max_fract = timer_capa.periodic.max_base_freq_hz;
+
+ CU_ASSERT_FATAL(min_fract.integer || min_fract.numer);
+ CU_ASSERT_FATAL(max_fract.integer || max_fract.numer);
+
+ if (min_fract.numer) {
+ CU_ASSERT_FATAL(min_fract.denom);
+ CU_ASSERT_FATAL(min_fract.numer < min_fract.denom);
+ }
+
+ if (max_fract.numer) {
+ CU_ASSERT_FATAL(max_fract.denom);
+ CU_ASSERT_FATAL(max_fract.numer < max_fract.denom);
+ }
+
+ min_freq = odp_fract_u64_to_dbl(&min_fract);
+ max_freq = odp_fract_u64_to_dbl(&max_fract);
+ CU_ASSERT(min_freq <= max_freq);
+
+ memset(&capa, 0, sizeof(odp_timer_periodic_capability_t));
+
+ /* Min freq, capa fills in resolution */
+ capa.base_freq_hz = min_fract;
+ capa.max_multiplier = 1;
+ capa.res_ns = 0;
+
+ CU_ASSERT(odp_timer_periodic_capability(clk_src, &capa) == 1);
+ CU_ASSERT(capa.base_freq_hz.integer == min_fract.integer);
+ CU_ASSERT(capa.base_freq_hz.numer == min_fract.numer);
+ CU_ASSERT(capa.base_freq_hz.denom == min_fract.denom);
+ CU_ASSERT(capa.max_multiplier >= 1);
+ CU_ASSERT(capa.res_ns > 0);
+
+ /* Max freq, capa fills in resolution */
+ capa.base_freq_hz = max_fract;
+ capa.max_multiplier = 1;
+ capa.res_ns = 0;
+
+ CU_ASSERT(odp_timer_periodic_capability(clk_src, &capa) == 1);
+ CU_ASSERT(capa.base_freq_hz.integer == max_fract.integer);
+ CU_ASSERT(capa.base_freq_hz.numer == max_fract.numer);
+ CU_ASSERT(capa.base_freq_hz.denom == max_fract.denom);
+ CU_ASSERT(capa.max_multiplier >= 1);
+ CU_ASSERT(capa.res_ns > 0);
+
+ freq_range = max_fract.integer - min_fract.integer;
+
+ if (freq_range < 10 * num)
+ num = freq_range / 10;
+
+ /* Too short frequency range */
+ if (num == 0)
+ return;
+
+ freq_step = freq_range / num;
+ first_hz = min_fract.integer + 1;
+
+ ODPH_DBG("min %" PRIu64 ", max %" PRIu64 ", range %" PRIu64 ", step %" PRIu64 "\n",
+ min_fract.integer, max_fract.integer, freq_range, freq_step);
+
+ for (i = 0; i < num; i++) {
+ base_freq.integer = first_hz + i * freq_step;
+ base_freq.numer = 0;
+ base_freq.denom = 0;
+
+ freq = odp_fract_u64_to_dbl(&base_freq);
+
+ if (freq > max_freq)
+ base_freq = max_fract;
+
+ for (j = 0; j < 4; j++) {
+ capa.base_freq_hz = base_freq;
+
+ max_multiplier = 1;
+ res_ns = 0;
+
+ if (j & 0x1)
+ max_multiplier = 2;
+
+ if (j & 0x2)
+ res_ns = 1 + (ODP_TIME_SEC_IN_NS / (10 * base_freq.integer));
+
+ capa.max_multiplier = max_multiplier;
+ capa.res_ns = res_ns;
+
+ ODPH_DBG("freq %" PRIu64 ", multip %" PRIu64 ", res %" PRIu64 ",\n",
+ base_freq.integer, max_multiplier, res_ns);
+
+ ret = odp_timer_periodic_capability(clk_src, &capa);
+
+ if (ret == 1) {
+ CU_ASSERT(capa.base_freq_hz.integer == base_freq.integer);
+ CU_ASSERT(capa.base_freq_hz.numer == base_freq.numer);
+ CU_ASSERT(capa.base_freq_hz.denom == base_freq.denom);
+ } else if (ret == 0) {
+ CU_ASSERT(capa.base_freq_hz.integer != base_freq.integer ||
+ capa.base_freq_hz.numer != base_freq.numer ||
+ capa.base_freq_hz.denom != base_freq.denom)
+
+ if (capa.base_freq_hz.numer) {
+ CU_ASSERT_FATAL(capa.base_freq_hz.denom);
+ CU_ASSERT_FATAL(capa.base_freq_hz.numer <
+ capa.base_freq_hz.denom);
+ }
+
+ CU_ASSERT(odp_fract_u64_to_dbl(&capa.base_freq_hz) >= min_freq);
+ CU_ASSERT(odp_fract_u64_to_dbl(&capa.base_freq_hz) <= max_freq);
+ }
+
+ if (ret >= 0) {
+ CU_ASSERT(capa.max_multiplier >= max_multiplier);
+
+ if (res_ns) {
+ /* Same or better resolution */
+ CU_ASSERT(capa.res_ns <= res_ns);
+ } else {
+ CU_ASSERT(capa.res_ns > 0);
+ }
+ }
+ }
+ }
+}
+
+static void timer_test_periodic(odp_queue_type_t queue_type, int use_first, int rounds,
+ int reuse_event)
+{
+ odp_timer_capability_t timer_capa;
+ odp_timer_periodic_capability_t periodic_capa;
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ odp_queue_param_t queue_param;
+ odp_timer_pool_param_t timer_param;
+ odp_timer_pool_t timer_pool;
+ odp_timer_periodic_start_t start_param;
+ odp_queue_t queue;
+ odp_timeout_t tmo;
+ odp_event_t ev = ODP_EVENT_INVALID;
+ odp_timer_t timer;
+ odp_time_t t1, t2;
+ uint64_t tick, cur_tick, period_ns, duration_ns, diff_ns, offset_ns;
+ double freq, freq_out, min_freq, max_freq;
+ int ret;
+ const char *user_ctx = "User context";
+ int num_tmo;
+ int done;
+ const int num = 200;
+ /* Test frequency: 1x 1000 Hz, or 1x min/max_base_freq */
+ const uint64_t multiplier = 1;
+ odp_fract_u64_t base_freq = {1000, 0, 0};
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ memset(&timer_capa, 0, sizeof(odp_timer_capability_t));
+ CU_ASSERT_FATAL(odp_timer_capability(clk_src, &timer_capa) == 0);
+
+ CU_ASSERT_FATAL(timer_capa.periodic.max_pools);
+ CU_ASSERT_FATAL(timer_capa.periodic.max_timers);
+ CU_ASSERT_FATAL(timer_capa.periodic.min_base_freq_hz.integer ||
+ timer_capa.periodic.min_base_freq_hz.numer);
+ CU_ASSERT_FATAL(timer_capa.periodic.max_base_freq_hz.integer ||
+ timer_capa.periodic.max_base_freq_hz.numer);
+
+ min_freq = odp_fract_u64_to_dbl(&timer_capa.periodic.min_base_freq_hz);
+ max_freq = odp_fract_u64_to_dbl(&timer_capa.periodic.max_base_freq_hz);
+ CU_ASSERT(min_freq <= max_freq);
+
+ if (odp_fract_u64_to_dbl(&base_freq) < min_freq)
+ base_freq = timer_capa.periodic.min_base_freq_hz;
+ else if (odp_fract_u64_to_dbl(&base_freq) > max_freq)
+ base_freq = timer_capa.periodic.max_base_freq_hz;
+
+ freq = odp_fract_u64_to_dbl(&base_freq);
+
+ /* No resolution requirement */
+ memset(&periodic_capa, 0, sizeof(odp_timer_periodic_capability_t));
+ periodic_capa.base_freq_hz = base_freq;
+ periodic_capa.max_multiplier = multiplier;
+
+ ret = odp_timer_periodic_capability(clk_src, &periodic_capa);
+ CU_ASSERT(ret == 0 || ret == 1);
+
+ if (ret < 0) {
+ ODPH_ERR("Periodic timer does not support tested frequency\n");
+ return;
+ }
+
+ freq_out = odp_fract_u64_to_dbl(&periodic_capa.base_freq_hz);
+
+ if (ret == 0) {
+ /* Allow 10% difference in outputted base frequency */
+ CU_ASSERT((freq_out > (0.9 * freq)) && (freq_out < (1.1 * freq)));
+
+ if (periodic_capa.base_freq_hz.numer) {
+ CU_ASSERT_FATAL(periodic_capa.base_freq_hz.numer <
+ periodic_capa.base_freq_hz.denom);
+ }
+ } else {
+ CU_ASSERT(base_freq.integer == periodic_capa.base_freq_hz.integer);
+ CU_ASSERT(base_freq.numer == periodic_capa.base_freq_hz.numer);
+ CU_ASSERT(base_freq.denom == periodic_capa.base_freq_hz.denom);
+ }
+
+ CU_ASSERT(periodic_capa.res_ns > 0);
+ CU_ASSERT(periodic_capa.max_multiplier >= multiplier);
+
+ base_freq = periodic_capa.base_freq_hz;
+ freq = odp_fract_u64_to_dbl(&base_freq);
+ period_ns = ODP_TIME_SEC_IN_NS / (freq * multiplier);
+ duration_ns = num * period_ns;
+
+ odp_timer_pool_param_init(&timer_param);
+ timer_param.timer_type = ODP_TIMER_TYPE_PERIODIC;
+ timer_param.res_ns = 2 * periodic_capa.res_ns;
+ timer_param.num_timers = 1;
+ timer_param.clk_src = clk_src;
+ timer_param.periodic.base_freq_hz = base_freq;
+ timer_param.periodic.max_multiplier = multiplier;
+
+ ODPH_DBG("\n");
+ ODPH_DBG("Periodic timer pool create params:\n");
+ ODPH_DBG(" Resolution ns: %" PRIu64 "\n", timer_param.res_ns);
+ ODPH_DBG(" Base freq hz: %" PRIu64 " + %" PRIu64 "/%" PRIu64 " (%f)\n",
+ timer_param.periodic.base_freq_hz.integer,
+ timer_param.periodic.base_freq_hz.numer,
+ timer_param.periodic.base_freq_hz.denom, freq);
+ ODPH_DBG(" Max multiplier: %" PRIu64 "\n", timer_param.periodic.max_multiplier);
+ ODPH_DBG("Capabilities:\n");
+ ODPH_DBG(" Max multiplier: %" PRIu64 " (with %f hz)\n",
+ periodic_capa.max_multiplier, freq);
+ ODPH_DBG(" Max resolution: %" PRIu64 " ns (with %f hz)\n", periodic_capa.res_ns, freq);
+ ODPH_DBG(" Min base freq: %f hz\n", min_freq);
+ ODPH_DBG(" Max base freq: %f hz\n", max_freq);
+
+ timer_pool = odp_timer_pool_create("periodic_timer", &timer_param);
+ CU_ASSERT_FATAL(timer_pool != ODP_TIMER_POOL_INVALID);
+
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&timer_pool, 1) == 1);
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_TIMEOUT;
+ pool_param.tmo.num = 1;
+
+ pool = odp_pool_create("timeout_pool", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_queue_param_init(&queue_param);
+ if (queue_type == ODP_QUEUE_TYPE_SCHED) {
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ }
+
+ queue = odp_queue_create("timeout_queue", &queue_param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ timer = odp_timer_alloc(timer_pool, queue, user_ctx);
+ CU_ASSERT_FATAL(timer != ODP_TIMER_INVALID);
+
+ /* Pool should have only one timer */
+ CU_ASSERT_FATAL(odp_timer_alloc(timer_pool, queue, user_ctx) == ODP_TIMER_INVALID);
+
+ memset(&start_param, 0, sizeof(odp_timer_periodic_start_t));
+ offset_ns = period_ns / 2;
+
+ if (use_first) {
+ /* First tick moves timer to start before the first period */
+ duration_ns -= (period_ns - offset_ns);
+ }
+
+ for (int round = 0; round < rounds; round++) {
+ num_tmo = 0;
+ done = 0;
+
+ if (!reuse_event || round == 0) {
+ tmo = odp_timeout_alloc(pool);
+ ev = odp_timeout_to_event(tmo);
+ }
+
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ cur_tick = odp_timer_current_tick(timer_pool);
+ tick = cur_tick + odp_timer_ns_to_tick(timer_pool, offset_ns);
+
+ if (use_first)
+ start_param.first_tick = tick;
+
+ start_param.freq_multiplier = multiplier;
+ start_param.tmo_ev = ev;
+
+ ODPH_DBG("Periodic timer start:\n");
+ ODPH_DBG(" Current tick: %" PRIu64 "\n", cur_tick);
+ ODPH_DBG(" First tick: %" PRIu64 "\n", start_param.first_tick);
+ ODPH_DBG(" Multiplier: %" PRIu64 "\n", start_param.freq_multiplier);
+ ODPH_DBG(" Period: %" PRIu64 " nsec\n", period_ns);
+ ODPH_DBG("Expected duration: %" PRIu64 " nsec\n", duration_ns);
+
+ ret = odp_timer_periodic_start(timer, &start_param);
+
+ if (ret == ODP_TIMER_TOO_NEAR)
+ ODPH_ERR("First tick too near\n");
+ else if (ret == ODP_TIMER_TOO_FAR)
+ ODPH_ERR("First tick too far\n");
+ else if (ret == ODP_TIMER_FAIL)
+ ODPH_ERR("Periodic timer start failed\n");
+
+ CU_ASSERT_FATAL(ret == ODP_TIMER_SUCCESS);
+
+ t1 = odp_time_local();
+
+ /* Wait for timeouts. Make sure that scheduler context is not held when
+ * exiting the loop. */
+ while (1) {
+ if (queue_type == ODP_QUEUE_TYPE_SCHED)
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ else
+ ev = odp_queue_deq(queue);
+
+ if (ev == ODP_EVENT_INVALID) {
+ t2 = odp_time_local();
+ diff_ns = odp_time_diff_ns(t2, t1);
+ if (diff_ns > (10 * duration_ns))
+ break;
+
+ if (num_tmo >= num)
+ break;
+
+ continue;
+ }
+
+ CU_ASSERT(odp_event_type(ev) == ODP_EVENT_TIMEOUT);
+
+ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) {
+ odp_event_free(ev);
+ continue;
+ }
+
+ CU_ASSERT(odp_timer_periodic_ack(timer, ev) == 0);
+ num_tmo++;
+ }
+
+ CU_ASSERT(num_tmo == num);
+
+ /* Allow +-30% error on test duration */
+ CU_ASSERT((diff_ns > 0.7 * duration_ns) && (diff_ns < 1.3 * duration_ns));
+
+ /* Stop periodic timer */
+ ret = odp_timer_periodic_cancel(timer);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ODPH_DBG("Measured duration: %" PRIu64 " nsec\n", diff_ns);
+
+ t1 = odp_time_local();
+ while (1) {
+ if (queue_type == ODP_QUEUE_TYPE_SCHED)
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ else
+ ev = odp_queue_deq(queue);
+
+ if (ev == ODP_EVENT_INVALID) {
+ t2 = odp_time_local();
+ diff_ns = odp_time_diff_ns(t2, t1);
+ if (diff_ns > (10 * duration_ns))
+ break;
+
+ if (done)
+ break;
+
+ continue;
+ }
+
+ CU_ASSERT(odp_event_type(ev) == ODP_EVENT_TIMEOUT);
+
+ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) {
+ odp_event_free(ev);
+ continue;
+ }
+
+ ret = odp_timer_periodic_ack(timer, ev);
+ CU_ASSERT(ret == 1 || ret == 2);
+
+ if (ret == 2) {
+ done = 1;
+ if (reuse_event && round < rounds - 1)
+ break;
+ odp_event_free(ev);
+ }
+ }
+
+ /* Check that ack() returned 2 on the last event */
+ CU_ASSERT(done);
+ CU_ASSERT(ret == 2);
+ }
+
+ CU_ASSERT(odp_timer_free(timer) == 0);
+ odp_timer_pool_destroy(timer_pool);
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void timer_test_periodic_sched(void)
+{
+ timer_test_periodic(ODP_QUEUE_TYPE_SCHED, 0, 1, 0);
+}
+
+static void timer_test_periodic_plain(void)
+{
+ timer_test_periodic(ODP_QUEUE_TYPE_PLAIN, 0, 1, 0);
+}
+
+static void timer_test_periodic_sched_first(void)
+{
+ timer_test_periodic(ODP_QUEUE_TYPE_SCHED, FIRST_TICK, 1, 0);
+}
+
+static void timer_test_periodic_plain_first(void)
+{
+ timer_test_periodic(ODP_QUEUE_TYPE_PLAIN, FIRST_TICK, 1, 0);
+}
+
+static void timer_test_periodic_reuse(void)
+{
+ timer_test_periodic(ODP_QUEUE_TYPE_SCHED, 0, 2, 0);
+}
+
+static void timer_test_periodic_event_reuse(void)
+{
+ timer_test_periodic(ODP_QUEUE_TYPE_SCHED, 0, 2, 1);
+}
+
+odp_testinfo_t timer_general_suite[] = {
+ ODP_TEST_INFO(timer_test_param_init),
+ ODP_TEST_INFO(timer_test_timeout_pool_alloc),
+ ODP_TEST_INFO(timer_test_timeout_pool_alloc_multi),
+ ODP_TEST_INFO(timer_test_timeout_from_event),
+ ODP_TEST_INFO(timer_test_timeout_pool_free),
+ ODP_TEST_INFO(timer_test_timeout_user_area),
+ ODP_TEST_INFO(timer_test_capa_allsrc),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t timer_general_suites[] = {
+ {"Timer general", NULL, NULL, timer_general_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+odp_testinfo_t timer_suite[] = {
+ ODP_TEST_INFO(timer_test_capa),
+ ODP_TEST_INFO(timer_pool_create_destroy),
+ ODP_TEST_INFO(timer_pool_create_max),
+ ODP_TEST_INFO(timer_pool_max_res),
+ ODP_TEST_INFO(timer_pool_current_tick),
+ ODP_TEST_INFO(timer_pool_sample_ticks),
+ ODP_TEST_INFO(timer_pool_tick_info),
+ ODP_TEST_INFO_CONDITIONAL(timer_plain_rel_wait, check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_plain_abs_wait, check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_plain_rel_cancel, check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_plain_abs_cancel, check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_plain_rel_restart_wait, check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_plain_abs_restart_wait, check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_plain_rel_restart_cancel, check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_plain_abs_restart_cancel, check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_plain_abs_wait_3sec, check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_sched_rel_wait, check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_sched_abs_wait, check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_sched_rel_cancel, check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_sched_abs_cancel, check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_sched_rel_restart_wait, check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_sched_abs_restart_wait, check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_sched_rel_restart_cancel, check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_sched_abs_restart_cancel, check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_sched_abs_wait_3sec, check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_tmo_event_plain,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_tmo_event_sched,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_buf_event_plain,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_buf_event_sched,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_pkt_event_plain,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_pkt_event_sched,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_tmo_event_reuse,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_buf_event_reuse,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_pkt_event_reuse,
+ check_sched_queue_support),
+ ODP_TEST_INFO(timer_test_cancel),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_res_min_tmo_plain,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_res_min_tmo_sched,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_res_max_tmo_plain,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_res_max_tmo_sched,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_tmo_min_tmo_plain,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_tmo_min_tmo_sched,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_tmo_max_tmo_plain,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_tmo_max_tmo_sched,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_plain_queue,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_sched_queue,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_plain_queue_priv,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_sched_queue_priv,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_plain_queue_exp_relax,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_sched_queue_exp_relax,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_plain_all,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_sched_all,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_capa,
+ check_periodic_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_sched,
+ check_periodic_sched_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_sched_first,
+ check_periodic_sched_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_plain,
+ check_periodic_plain_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_plain_first,
+ check_periodic_plain_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_reuse,
+ check_periodic_sched_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_event_reuse,
+ check_periodic_sched_support),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t timer_suites[] = {
+ {"Timer", NULL, NULL, timer_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int main(int argc, char *argv[])
+{
+ int ret = 0;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ if (global_init())
+ return -1;
+
+ odp_cunit_register_global_init(timer_global_init);
+ odp_cunit_register_global_term(timer_global_term);
+
+ if (odp_cunit_register(timer_general_suites))
+ goto fail;
+
+ if (odp_cunit_run())
+ goto fail;
+
+ for (int i = ODP_CLOCK_SRC_0; i < ODP_CLOCK_NUM_SRC; i++) {
+ odp_timer_capability_t capa;
+
+ if (odp_timer_capability(i, &capa))
+ continue;
+
+ printf("\n\n"
+ "-------------------------------------------------------------------------------\n"
+ " Running tests with clock source %d\n"
+ "-------------------------------------------------------------------------------\n\n",
+ i);
+
+ test_global->clk_src = i;
+
+ odp_cunit_register_global_init(timer_global_init);
+ odp_cunit_register_global_term(timer_global_term);
+
+ if (odp_cunit_register(timer_suites))
+ goto fail;
+
+ if (odp_cunit_run())
+ ret = -1;
+ }
+
+ global_term();
+ return ret;
+
+fail:
+ global_term();
+ return -1;
+}
diff --git a/test/common_plat/validation/api/traffic_mngr/.gitignore b/test/validation/api/traffic_mngr/.gitignore
index efd07a27d..efd07a27d 100644
--- a/test/common_plat/validation/api/traffic_mngr/.gitignore
+++ b/test/validation/api/traffic_mngr/.gitignore
diff --git a/test/validation/api/traffic_mngr/Makefile.am b/test/validation/api/traffic_mngr/Makefile.am
new file mode 100644
index 000000000..53a00f5e6
--- /dev/null
+++ b/test/validation/api/traffic_mngr/Makefile.am
@@ -0,0 +1,5 @@
+include ../Makefile.inc
+
+test_PROGRAMS = traffic_mngr_main
+traffic_mngr_main_SOURCES = traffic_mngr.c
+LDADD += -lm
diff --git a/test/common_plat/validation/api/traffic_mngr/traffic_mngr.c b/test/validation/api/traffic_mngr/traffic_mngr.c
index 88a7d8c7e..b7f546dcd 100644
--- a/test/common_plat/validation/api/traffic_mngr/traffic_mngr.c
+++ b/test/validation/api/traffic_mngr/traffic_mngr.c
@@ -1,31 +1,33 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2022, Marvell
+ * Copyright (c) 2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE
+#endif
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include <unistd.h>
#include <math.h>
-#include <odp.h>
+
+#include <odp_api.h>
#include <odp/helper/odph_api.h>
-#include <test_debug.h>
#include "odp_cunit_common.h"
-#include "traffic_mngr.h"
#define TM_DEBUG 0
-#define MAX_CAPABILITIES 16
#define MAX_NUM_IFACES 2
#define MAX_TM_SYSTEMS 3
#define NUM_LEVELS 3
#define NUM_PRIORITIES 4
#define NUM_QUEUES_PER_NODE NUM_PRIORITIES
-#define FANIN_RATIO 8
+#define FANIN_RATIO 8u
#define NUM_LEVEL0_TM_NODES 1
#define NUM_LEVEL1_TM_NODES FANIN_RATIO
#define NUM_LEVEL2_TM_NODES (FANIN_RATIO * FANIN_RATIO)
@@ -63,7 +65,7 @@
#define MED_DROP_PROB 4
#define MAX_DROP_PROB 8
-#define MAX_PKTS 1000
+#define MAX_PKTS 1000u
#define PKT_BUF_SIZE 1460
#define MAX_PAYLOAD 1400
#define USE_IPV4 false
@@ -107,18 +109,21 @@
#define MBPS 1000000
#define GBPS 1000000000
-#define MIN(a, b) (((a) <= (b)) ? (a) : (b))
-#define MAX(a, b) (((a) <= (b)) ? (b) : (a))
-
#define TM_PERCENT(percent) ((uint32_t)(100 * percent))
typedef enum {
SHAPER_PROFILE, SCHED_PROFILE, THRESHOLD_PROFILE, WRED_PROFILE
} profile_kind_t;
+typedef enum {
+ THRESHOLD_BYTE,
+ THRESHOLD_PACKET,
+ THRESHOLD_BYTE_AND_PACKET
+} threshold_type_t;
+
typedef struct {
uint32_t num_queues;
- odp_tm_queue_t tm_queues[0];
+ odp_tm_queue_t tm_queues[];
} tm_queue_desc_t;
typedef struct tm_node_desc_s tm_node_desc_t;
@@ -131,7 +136,7 @@ struct tm_node_desc_s {
odp_tm_node_t node;
odp_tm_node_t parent_node;
tm_queue_desc_t *queue_desc;
- tm_node_desc_t *children[0];
+ tm_node_desc_t *children[];
};
typedef struct {
@@ -201,7 +206,7 @@ static const char ALPHABET[] =
/* The following constant table determines the minimum and maximum number of
* pkts that will be received when sending 100 pkts through a system with a
* drop probability of p% (using a uniform probability distribution), with a
- * confidence of 99.9% 99.99% and 99.999%. The confidence is interepreted as
+ * confidence of 99.9% 99.99% and 99.999%. The confidence is interpreted as
* follows: a 99.99% confidence says that receiving LESS pkts than the given
* minimum or receiving MORE pkts than the given maximum (assuming a uniform
* drop percent of p) will happen less than 1 time in 10,000 trials.
@@ -280,6 +285,11 @@ static uint32_t num_odp_tm_systems;
static odp_tm_capabilities_t tm_capabilities;
+static bool dynamic_shaper_update = true;
+static bool dynamic_sched_update = true;
+static bool dynamic_threshold_update = true;
+static bool dynamic_wred_update = true;
+
static odp_tm_shaper_t shaper_profiles[NUM_SHAPER_PROFILES];
static odp_tm_sched_t sched_profiles[NUM_SCHED_PROFILES];
static odp_tm_threshold_t threshold_profiles[NUM_THRESHOLD_PROFILES];
@@ -316,10 +326,12 @@ static uint32_t num_ifaces;
static odp_pool_t pools[MAX_NUM_IFACES] = {ODP_POOL_INVALID, ODP_POOL_INVALID};
static odp_pktio_t pktios[MAX_NUM_IFACES];
+static odp_bool_t pktio_started[MAX_NUM_IFACES];
static odp_pktin_queue_t pktins[MAX_NUM_IFACES];
-static odp_pktout_queue_t pktouts[MAX_NUM_IFACES];
static odp_pktin_queue_t rcv_pktin;
static odp_pktio_t xmt_pktio;
+static odp_pktio_capability_t xmt_pktio_capa;
+static odp_lso_profile_t lso_ipv4_profile;
static odph_ethaddr_t src_mac;
static odph_ethaddr_t dst_mac;
@@ -327,6 +339,13 @@ static odph_ethaddr_t dst_mac;
static uint32_t cpu_unique_id;
static uint32_t cpu_tcp_seq_num;
+static int8_t suite_inactive;
+
+static uint64_t tm_shaper_min_rate;
+static uint64_t tm_shaper_max_rate;
+static uint32_t tm_shaper_min_burst;
+static uint32_t tm_shaper_max_burst;
+
static void busy_wait(uint64_t nanoseconds)
{
odp_time_t start_time, end_time;
@@ -373,38 +392,45 @@ static odp_bool_t approx_eq64(uint64_t val, uint64_t correct)
return false;
}
+static uint64_t
+clamp_rate(uint64_t rate)
+{
+ return ODPH_MIN(ODPH_MAX(rate, tm_shaper_min_rate), tm_shaper_max_rate);
+}
+
+static uint32_t
+clamp_burst(uint32_t burst)
+{
+ return ODPH_MIN(ODPH_MAX(burst, tm_shaper_min_burst), tm_shaper_max_burst);
+}
+
static int test_overall_capabilities(void)
{
odp_tm_level_capabilities_t *per_level;
- odp_tm_capabilities_t capabilities_array[MAX_CAPABILITIES];
+ odp_tm_capabilities_t capabilities_array[2];
odp_tm_capabilities_t *cap_ptr;
+ odp_tm_egress_t egress;
+ odp_bool_t *prio_modes;
uint32_t num_records, idx, num_levels, level;
int rc;
- rc = odp_tm_capabilities(capabilities_array, MAX_CAPABILITIES);
- if (rc < 0) {
- CU_ASSERT(rc < 0);
- return -1;
- }
+ odp_tm_egress_init(&egress);
+ egress.egress_kind = ODP_TM_EGRESS_PKT_IO;
+ egress.pktio = xmt_pktio;
- /* Now test the return code (which did not indicate a failure code)
- * to make sure that there is at least ONE capabilities record
- * returned */
- if (rc == 0) {
- CU_ASSERT(rc != 0);
- return -1;
- }
+ rc = odp_tm_egress_capabilities(&capabilities_array[0], &egress);
+ CU_ASSERT_FATAL(rc == 0);
+ num_records = 1;
- /* Now test the return code to see if there were more capabilities
- * records than the call above allowed for. This is not an error,
- * just an interesting fact.
- */
- num_records = MAX_CAPABILITIES;
- if (MAX_CAPABILITIES < rc)
- LOG_DBG("There were more than %u capabilities (%u)\n",
- MAX_CAPABILITIES, rc);
- else
- num_records = rc;
+ /* Get capabilities for egress kind function. */
+ odp_tm_egress_init(&egress);
+ egress.egress_kind = ODP_TM_EGRESS_FN;
+ rc = odp_tm_egress_capabilities(&capabilities_array[1], &egress);
+ CU_ASSERT_FATAL(rc == 0);
+
+ /* Validate this record only if egress function is supported */
+ if (capabilities_array[1].max_tm_queues)
+ num_records++;
/* Loop through the returned capabilities (there MUST be at least one)
* and do some basic checks to prove that it isn't just an empty
@@ -439,7 +465,25 @@ static int test_overall_capabilities(void)
CU_ASSERT(per_level->max_priority != 0);
return -1;
}
+
+ if (per_level->tm_node_shaper_supported ||
+ per_level->tm_node_rate_limiter_supported) {
+ CU_ASSERT(per_level->max_burst > 0);
+ CU_ASSERT(per_level->min_rate > 0);
+ CU_ASSERT(per_level->max_rate > 0);
+ }
+
+ if (per_level->tm_node_shaper_packet_mode) {
+ CU_ASSERT(per_level->max_burst_packets > 0);
+ CU_ASSERT(per_level->min_rate_packets > 0);
+ CU_ASSERT(per_level->max_rate_packets > 0);
+ }
}
+
+ /* At least one pkt priority mode needs to be supported */
+ prio_modes = cap_ptr->pkt_prio_modes;
+ CU_ASSERT((prio_modes[ODP_TM_PKT_PRIO_MODE_PRESERVE] != 0) ||
+ (prio_modes[ODP_TM_PKT_PRIO_MODE_OVERWRITE] != 0))
}
return 0;
@@ -455,7 +499,7 @@ static int wait_linkup(odp_pktio_t pktio)
for (i = 0; i < wait_num; i++) {
ret = odp_pktio_link_status(pktio);
- if (ret < 0 || ret == 1)
+ if (ret == ODP_PKTIO_LINK_STATUS_UNKNOWN || ret == ODP_PKTIO_LINK_STATUS_UP)
break;
/* link is down, call status again after delay */
odp_time_wait_ns(wait_ns);
@@ -467,12 +511,15 @@ static int wait_linkup(odp_pktio_t pktio)
static int open_pktios(void)
{
odp_pktio_param_t pktio_param;
+ odp_pktio_config_t pktio_config;
odp_pool_param_t pool_param;
odp_pktio_t pktio;
odp_pool_t pkt_pool;
uint32_t iface;
char pool_name[ODP_POOL_NAME_LEN];
int rc, ret;
+ int pkt_aging = 0;
+ int lso = 0;
odp_pool_param_init(&pool_param);
pool_param.pkt.num = 10 * MAX_PKTS;
@@ -480,7 +527,6 @@ static int open_pktios(void)
odp_pktio_param_init(&pktio_param);
pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
- pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
for (iface = 0; iface < num_ifaces; iface++) {
snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s",
@@ -493,33 +539,51 @@ static int open_pktios(void)
}
pools[iface] = pkt_pool;
- pktio = odp_pktio_open(iface_name[iface], pkt_pool,
- &pktio_param);
- if (pktio == ODP_PKTIO_INVALID)
- pktio = odp_pktio_lookup(iface_name[iface]);
+
+ /* Zero'th device is always PKTOUT TM as we use it from XMIT */
+ if (iface == 0) {
+ pktio_param.out_mode = ODP_PKTOUT_MODE_TM;
+
+ pktio = odp_pktio_open(iface_name[iface], pkt_pool,
+ &pktio_param);
+
+ /* On failure check if pktio can be opened in non-TM mode.
+ * If non-TM mode works, then we can assume that PKTIO
+ * does not support TM
+ */
+ if (pktio == ODP_PKTIO_INVALID) {
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+ pktio = odp_pktio_open(iface_name[iface], pkt_pool,
+ &pktio_param);
+
+ /* Return >0 to indicate no TM support */
+ if (pktio != ODP_PKTIO_INVALID) {
+ odp_pktio_close(pktio);
+ return 1;
+ }
+ }
+ } else {
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DISABLED;
+
+ pktio = odp_pktio_open(iface_name[iface], pkt_pool,
+ &pktio_param);
+ }
+
+ pktios[iface] = pktio;
if (pktio == ODP_PKTIO_INVALID) {
- LOG_ERR("odp_pktio_open() failed\n");
+ ODPH_ERR("odp_pktio_open() failed\n");
return -1;
}
/* Set defaults for PktIn and PktOut queues */
(void)odp_pktin_queue_config(pktio, NULL);
- (void)odp_pktout_queue_config(pktio, NULL);
rc = odp_pktio_promisc_mode_set(pktio, true);
if (rc != 0)
printf("****** promisc_mode_set failed ******\n");
- pktios[iface] = pktio;
-
if (odp_pktin_queue(pktio, &pktins[iface], 1) != 1) {
odp_pktio_close(pktio);
- LOG_ERR("odp_pktio_open() failed: no pktin queue\n");
- return -1;
- }
-
- if (odp_pktout_queue(pktio, &pktouts[iface], 1) != 1) {
- odp_pktio_close(pktio);
- LOG_ERR("odp_pktio_open() failed: no pktout queue\n");
+ ODPH_ERR("odp_pktio_open() failed: no pktin queue\n");
return -1;
}
@@ -533,7 +597,7 @@ static int open_pktios(void)
ODPH_ETHADDR_LEN);
if (rc != ODPH_ETHADDR_LEN) {
- LOG_ERR("odp_pktio_mac_addr() failed\n");
+ ODPH_ERR("odp_pktio_mac_addr() failed\n");
return -1;
}
}
@@ -543,25 +607,69 @@ static int open_pktios(void)
rcv_pktin = pktins[1];
ret = odp_pktio_start(pktios[1]);
if (ret != 0) {
- LOG_ERR("odp_pktio_start() failed\n");
+ ODPH_ERR("odp_pktio_start() failed\n");
return -1;
}
+ pktio_started[1] = true;
} else {
xmt_pktio = pktios[0];
rcv_pktin = pktins[0];
}
- ret = odp_pktio_start(pktios[0]);
+ if (odp_pktio_capability(xmt_pktio, &xmt_pktio_capa)) {
+ ODPH_ERR("pktio capa failed\n");
+ return -1;
+ }
+
+ odp_pktio_config_init(&pktio_config);
+
+ /* Enable packet aging if supported */
+ if (xmt_pktio_capa.max_tx_aging_tmo_ns) {
+ pkt_aging = 1;
+ pktio_config.pktout.bit.aging_ena = 1;
+ }
+
+ /* Enable LSO if supported */
+ if (xmt_pktio_capa.lso.max_profiles && xmt_pktio_capa.lso.max_profiles_per_pktio) {
+ lso = 1;
+ pktio_config.enable_lso = 1;
+ }
+
+ /* Enable selected features */
+ if (lso || pkt_aging) {
+ if (odp_pktio_config(xmt_pktio, &pktio_config)) {
+ ODPH_ERR("pktio configure failed\n");
+ return -1;
+ }
+ }
+
+ /* Add LSO profiles before start */
+ if (lso) {
+ odp_lso_profile_param_t prof_param;
+
+ if (xmt_pktio_capa.lso.proto.ipv4) {
+ odp_lso_profile_param_init(&prof_param);
+ prof_param.lso_proto = ODP_LSO_PROTO_IPV4;
+
+ lso_ipv4_profile = odp_lso_profile_create(xmt_pktio, &prof_param);
+ if (lso_ipv4_profile == ODP_LSO_PROFILE_INVALID) {
+ ODPH_ERR("Failed to create IPv4 LSO profile\n");
+ return -1;
+ }
+ }
+ }
+
+ ret = odp_pktio_start(xmt_pktio);
if (ret != 0) {
- LOG_ERR("odp_pktio_start() failed\n");
+ ODPH_ERR("odp_pktio_start() failed\n");
return -1;
}
+ pktio_started[0] = true;
/* Now wait until the link or links are up. */
- rc = wait_linkup(pktios[0]);
+ rc = wait_linkup(xmt_pktio);
if (rc != 1) {
- LOG_ERR("link %" PRIX64 " not up\n",
- odp_pktio_to_u64(pktios[0]));
+ ODPH_ERR("link %" PRIX64 " not up\n", odp_pktio_to_u64(xmt_pktio));
return -1;
}
@@ -571,8 +679,7 @@ static int open_pktios(void)
/* Wait for 2nd link to be up */
rc = wait_linkup(pktios[1]);
if (rc != 1) {
- LOG_ERR("link %" PRIX64 " not up\n",
- odp_pktio_to_u64(pktios[0]));
+ ODPH_ERR("link %" PRIX64 " not up\n", odp_pktio_to_u64(pktios[1]));
return -1;
}
@@ -595,15 +702,16 @@ static int get_unique_id(odp_packet_t odp_pkt,
/* For IPv4 pkts use the ident field to store the unique_id. */
ident_offset = l3_offset + offsetof(odph_ipv4hdr_t, id);
- odp_packet_copy_to_mem(odp_pkt, ident_offset, 2, &be_ip_ident);
+ CU_ASSERT_FATAL(odp_packet_copy_to_mem(odp_pkt, ident_offset, 2,
+ &be_ip_ident) == 0);
unique_id = odp_be_to_cpu_16(be_ip_ident);
is_ipv4 = true;
} else if (odp_packet_has_ipv6(odp_pkt)) {
/* For IPv6 pkts use the flow field to store the unique_id. */
flow_offset = l3_offset + offsetof(odph_ipv6hdr_t, ver_tc_flow);
- odp_packet_copy_to_mem(odp_pkt, flow_offset, 4,
- &be_ver_tc_flow);
+ CU_ASSERT_FATAL(odp_packet_copy_to_mem(odp_pkt, flow_offset, 4,
+ &be_ver_tc_flow) == 0);
ver_tc_flow = odp_be_to_cpu_32(be_ver_tc_flow);
unique_id = ver_tc_flow & ODPH_IPV6HDR_FLOW_LABEL_MASK;
is_ipv4 = false;
@@ -645,8 +753,9 @@ static int get_ip_tos(odp_packet_t odp_pkt, uint8_t *tos_ptr)
{
odph_ipv4hdr_t *ipv4_hdr;
odph_ipv6hdr_t *ipv6_hdr;
- uint32_t hdr_len, ver_tc_flow;
+ uint32_t ver_tc_flow;
uint8_t tos, tc;
+ uint32_t hdr_len = 0;
if (odp_packet_has_ipv4(odp_pkt)) {
ipv4_hdr = odp_packet_l3_ptr(odp_pkt, &hdr_len);
@@ -701,7 +810,7 @@ static odp_packet_t make_pkt(odp_pool_t pkt_pool,
l2_len = l2_hdr_len + l3_len;
pkt_len = l2_len;
if (unique_id == 0) {
- LOG_ERR("make_pkt called with invalid unique_id of 0\n");
+ ODPH_ERR("%s called with invalid unique_id of 0\n", __func__);
return ODP_PACKET_INVALID;
}
@@ -808,7 +917,7 @@ static odp_packet_t make_pkt(odp_pool_t pkt_pool,
/* Next the UDP/TCP checksum. */
if (odph_udp_tcp_chksum(odp_pkt, ODPH_CHKSUM_GENERATE, NULL) != 0)
- LOG_ERR("odph_udp_tcp_chksum failed\n");
+ ODPH_ERR("odph_udp_tcp_chksum failed\n");
return odp_pkt;
}
@@ -830,43 +939,58 @@ static xmt_pkt_desc_t *find_matching_xmt_pkt_desc(uint16_t unique_id)
return NULL;
}
-static int receive_pkts(odp_tm_t odp_tm,
- odp_pktin_queue_t pktin,
- uint32_t num_pkts,
- uint64_t rate_bps)
+static int32_t receive_loop(odp_tm_t tm, odp_pktin_queue_t pktin, uint32_t num_pkts,
+ uint64_t timeout_ns)
{
- xmt_pkt_desc_t *xmt_pkt_desc;
- rcv_pkt_desc_t *rcv_pkt_desc;
- odp_packet_t rcv_pkt;
- odp_time_t start_time, current_time, duration, xmt_time;
- odp_time_t rcv_time, delta_time;
- uint64_t temp1, timeout_ns, duration_ns, delta_ns;
- uint32_t pkts_rcvd, rcv_idx, l4_offset, l4_hdr_len, app_offset;
- uint16_t unique_id;
- uint8_t *pkt_class_ptr, pkt_class, is_ipv4_pkt;
- int rc;
-
- temp1 = (1000000ULL * 10000ULL * (uint64_t)num_pkts) / rate_bps;
- timeout_ns = 1000ULL * ((4ULL * temp1) + 10000ULL);
+ odp_time_t start_time, current_time;
+ uint64_t duration_ns;
+ uint32_t pkts_rcvd;
+ int rc;
pkts_rcvd = 0;
start_time = odp_time_local();
duration_ns = 0;
- while ((pkts_rcvd < num_pkts) || (!odp_tm_is_idle(odp_tm))) {
+ while ((pkts_rcvd < num_pkts) || (!odp_tm_is_idle(tm))) {
rc = odp_pktin_recv(pktin, &rcv_pkts[pkts_rcvd], 1);
if (rc < 0)
- return rc;
+ return -1;
current_time = odp_time_local();
- duration = odp_time_diff(current_time, start_time);
- duration_ns = odp_time_to_ns(duration);
+ duration_ns = odp_time_diff_ns(current_time, start_time);
+
if (rc == 1)
rcv_pkt_descs[pkts_rcvd++].rcv_time = current_time;
else if (timeout_ns < duration_ns)
break;
}
+ return pkts_rcvd;
+}
+
+static int receive_pkts(odp_tm_t tm, odp_pktin_queue_t pktin, uint32_t num_pkts,
+ uint64_t rate_bps)
+{
+ xmt_pkt_desc_t *xmt_pkt_desc;
+ rcv_pkt_desc_t *rcv_pkt_desc;
+ odp_packet_t rcv_pkt;
+ odp_time_t xmt_time;
+ odp_time_t rcv_time, delta_time;
+ uint64_t delta_ns, tmp, timeout_ns;
+ uint32_t pkts_rcvd, rcv_idx, l4_offset, l4_hdr_len, app_offset;
+ uint16_t unique_id;
+ uint8_t *pkt_class_ptr, pkt_class, is_ipv4_pkt;
+ int32_t rc;
+
+ tmp = (1000000ULL * 10000ULL * (uint64_t)num_pkts) / rate_bps;
+ timeout_ns = 1000ULL * ((4ULL * tmp) + 10000ULL);
+
+ rc = receive_loop(tm, pktin, num_pkts, timeout_ns);
+ if (rc < 0)
+ return -1;
+
+ pkts_rcvd = rc;
+
/* Now go through matching the rcv pkts to the xmt pkts, determining
* which xmt_pkts were lost and for the ones that did arrive, how
* long did they take. We don't do this work while receiving the pkts
@@ -882,17 +1006,18 @@ static int receive_pkts(odp_tm_t odp_tm,
(odp_packet_has_l3_error(rcv_pkt) << 2) |
(odp_packet_has_l4_error(rcv_pkt) << 3);
- LOG_ERR("received a pkt with the following errors\n");
- LOG_ERR(" l2_err=%u l3_err=%u l4_err=%u. Skipping\n",
- (rcv_pkt_desc->errors >> 1) & 0x1,
- (rcv_pkt_desc->errors >> 2) & 0x1,
- (rcv_pkt_desc->errors >> 3) & 0x1);
+ ODPH_ERR("received a pkt with the following errors\n");
+ ODPH_ERR(" l2_err=%u l3_err=%u l4_err=%u. "
+ "Skipping\n",
+ (rcv_pkt_desc->errors >> 1) & 0x1,
+ (rcv_pkt_desc->errors >> 2) & 0x1,
+ (rcv_pkt_desc->errors >> 3) & 0x1);
}
unique_id = 0;
rc = get_unique_id(rcv_pkt, &unique_id, &is_ipv4_pkt);
if (rc != 0) {
- LOG_ERR("received a non IPv4/IPv6 pkt\n");
+ ODPH_ERR("received a non IPv4/IPv6 pkt\n");
return -1;
}
@@ -1067,6 +1192,45 @@ static int make_pkts(uint32_t num_pkts,
return 0;
}
+static uint32_t send_pkts_multi(odp_tm_queue_t tm_queue, uint32_t num_pkts)
+{
+ xmt_pkt_desc_t *xmt_pkt_desc;
+ odp_packet_t odp_pkt;
+ uint32_t xmt_pkt_idx, pkts_sent;
+ int64_t rc, i = 0;
+
+ /* Now send the pkts as fast as we can. RED drops are internally
+ * consumed by odp_tm_enq_multi().
+ */
+ xmt_pkt_idx = num_pkts_sent;
+ rc = odp_tm_enq_multi(tm_queue, &xmt_pkts[xmt_pkt_idx], num_pkts);
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(rc <= num_pkts);
+
+ /* Record consumed packets */
+ pkts_sent = 0;
+ for (i = 0; i < rc; i++) {
+ xmt_pkt_desc = &xmt_pkt_descs[xmt_pkt_idx + i];
+ xmt_pkt_desc->xmt_idx = xmt_pkt_idx + i;
+ xmt_pkt_desc->xmt_time = odp_time_local();
+ xmt_pkt_desc->tm_queue = tm_queue;
+ pkts_sent++;
+ }
+
+ /* Free rejected pkts */
+ for (; i < num_pkts; i++) {
+ xmt_pkt_desc = &xmt_pkt_descs[xmt_pkt_idx + i];
+ xmt_pkt_desc->xmt_idx = xmt_pkt_idx + i;
+
+ odp_pkt = xmt_pkts[xmt_pkt_idx + i];
+ odp_packet_free(odp_pkt);
+ xmt_pkts[xmt_pkt_idx + i] = ODP_PACKET_INVALID;
+ }
+ num_pkts_sent += num_pkts;
+
+ return pkts_sent;
+}
+
static uint32_t send_pkts(odp_tm_queue_t tm_queue, uint32_t num_pkts)
{
xmt_pkt_desc_t *xmt_pkt_desc;
@@ -1082,10 +1246,12 @@ static uint32_t send_pkts(odp_tm_queue_t tm_queue, uint32_t num_pkts)
xmt_pkt_desc = &xmt_pkt_descs[xmt_pkt_idx];
/* Alternate calling with odp_tm_enq and odp_tm_enq_with_cnt */
- if ((idx & 1) == 0)
+ if ((idx & 1) == 0) {
rc = odp_tm_enq(tm_queue, odp_pkt);
- else
+ CU_ASSERT(rc <= 0);
+ } else {
rc = odp_tm_enq_with_cnt(tm_queue, odp_pkt);
+ }
xmt_pkt_desc->xmt_idx = xmt_pkt_idx;
if (0 <= rc) {
@@ -1103,6 +1269,66 @@ static uint32_t send_pkts(odp_tm_queue_t tm_queue, uint32_t num_pkts)
return pkts_sent;
}
+static uint32_t send_pkts_lso(odp_tm_queue_t tm_queue, uint32_t num_pkts,
+ odp_lso_protocol_t lso_proto, uint32_t max_len)
+{
+ odp_packet_lso_opt_t lso_opt;
+ odp_lso_profile_t profile;
+ xmt_pkt_desc_t *xmt_pkt_desc;
+ odp_packet_t pkt;
+ uint32_t offset, pkts_sent;
+ int64_t rc, i, idx;
+
+ pkt = xmt_pkts[0];
+
+ if (lso_proto == ODP_LSO_PROTO_IPV4) {
+ profile = lso_ipv4_profile;
+ offset = odp_packet_l4_offset(pkt);
+ } else {
+ ODPH_ERR("Bad LSO protocol\n");
+ return 0;
+ }
+
+ lso_opt.lso_profile = profile;
+ lso_opt.payload_offset = offset;
+ lso_opt.max_payload_len = max_len;
+
+ pkts_sent = 0;
+
+ for (i = 0; i < num_pkts; i++) {
+ idx = num_pkts_sent + i;
+ pkt = xmt_pkts[idx];
+ xmt_pkt_desc = &xmt_pkt_descs[idx];
+
+ rc = odp_tm_enq_multi_lso(tm_queue, &pkt, 1, &lso_opt);
+
+ CU_ASSERT(rc == 0 || rc == 1);
+
+ if (rc < 0) {
+ ODPH_ERR("Enqueue LSO failed\n");
+ num_pkts_sent += i;
+ return pkts_sent;
+ }
+
+ xmt_pkt_desc->xmt_idx = idx;
+
+ if (rc > 0) {
+ /* Record consumed packets */
+ xmt_pkt_desc->xmt_time = odp_time_local();
+ xmt_pkt_desc->tm_queue = tm_queue;
+ pkts_sent++;
+ } else {
+ /* Free rejected pkts */
+ odp_packet_free(pkt);
+ xmt_pkts[idx] = ODP_PACKET_INVALID;
+ }
+ }
+
+ num_pkts_sent += num_pkts;
+
+ return pkts_sent;
+}
+
static uint32_t pkts_rcvd_in_send_order(void)
{
xmt_pkt_desc_t *xmt_pkt_desc;
@@ -1148,12 +1374,11 @@ static uint32_t pkts_rcvd_in_given_order(uint32_t unique_id_list[],
{
rcv_pkt_desc_t *rcv_pkt_desc;
odp_bool_t is_match;
- uint32_t rcv_pkt_idx, pkts_in_order, pkts_out_of_order;
+ uint32_t rcv_pkt_idx, pkts_in_order;
uint32_t rcv_unique_id;
int last_pkt_idx, pkt_idx;
pkts_in_order = 1;
- pkts_out_of_order = 0;
last_pkt_idx = -1;
pkt_idx = -1;
@@ -1173,12 +1398,8 @@ static uint32_t pkts_rcvd_in_given_order(uint32_t unique_id_list[],
unique_id_list,
unique_id_list_len);
if (0 <= pkt_idx) {
- if (0 <= last_pkt_idx) {
- if (last_pkt_idx < pkt_idx)
- pkts_in_order++;
- else
- pkts_out_of_order++;
- }
+ if (0 <= last_pkt_idx && last_pkt_idx < pkt_idx)
+ pkts_in_order++;
last_pkt_idx = pkt_idx;
}
@@ -1238,8 +1459,8 @@ static inline void calc_rcv_stats(rcv_stats_t *rcv_stats,
last_rcv_gap_idx = (rcv_gap_cnt * (100 - ending_drop_percent)) / 100;
for (idx = first_rcv_gap_idx; idx <= last_rcv_gap_idx; idx++) {
rcv_gap = rcv_gaps[idx];
- rcv_stats->min_rcv_gap = MIN(rcv_stats->min_rcv_gap, rcv_gap);
- rcv_stats->max_rcv_gap = MAX(rcv_stats->max_rcv_gap, rcv_gap);
+ rcv_stats->min_rcv_gap = ODPH_MIN(rcv_stats->min_rcv_gap, rcv_gap);
+ rcv_stats->max_rcv_gap = ODPH_MAX(rcv_stats->max_rcv_gap, rcv_gap);
rcv_stats->total_rcv_gap += rcv_gap;
rcv_stats->total_rcv_gap_squared += rcv_gap * rcv_gap;
rcv_stats->num_samples++;
@@ -1310,18 +1531,20 @@ static int create_tm_queue(odp_tm_t odp_tm,
queue_params.wred_profile[PKT_GREEN] = green_profile;
queue_params.wred_profile[PKT_YELLOW] = yellow_profile;
queue_params.wred_profile[PKT_RED] = red_profile;
+ queue_params.ordered_enqueue = true;
}
tm_queue = odp_tm_queue_create(odp_tm, &queue_params);
if (tm_queue == ODP_TM_INVALID) {
- LOG_ERR("odp_tm_queue_create() failed\n");
+ ODPH_ERR("odp_tm_queue_create() failed\n");
return -1;
}
queue_desc->tm_queues[priority] = tm_queue;
rc = odp_tm_queue_connect(tm_queue, tm_node);
if (rc != 0) {
- LOG_ERR("odp_tm_queue_connect() failed\n");
+ ODPH_ERR("odp_tm_queue_connect() failed for queue %" PRIx64
+ "\n", odp_tm_queue_to_u64(tm_queue));
odp_tm_queue_destroy(tm_queue);
return -1;
}
@@ -1372,6 +1595,9 @@ static tm_node_desc_t *create_tm_node(odp_tm_t odp_tm,
node_params.max_fanin = FANIN_RATIO;
node_params.level = level;
+ /* This is ignored when pkt priority mode is not overwrite */
+ node_params.priority = 0;
+
if (parent_node_desc == NULL)
snprintf(node_name, sizeof(node_name), "node_%" PRIu32,
node_idx + 1);
@@ -1381,8 +1607,8 @@ static tm_node_desc_t *create_tm_node(odp_tm_t odp_tm,
tm_node = odp_tm_node_create(odp_tm, node_name, &node_params);
if (tm_node == ODP_TM_INVALID) {
- LOG_ERR("odp_tm_node_create() failed @ level=%" PRIu32 "\n",
- level);
+ ODPH_ERR("odp_tm_node_create() failed @ level=%" PRIu32 "\n",
+ level);
return NULL;
}
@@ -1394,8 +1620,8 @@ static tm_node_desc_t *create_tm_node(odp_tm_t odp_tm,
rc = odp_tm_node_connect(tm_node, parent_node);
if (rc != 0) {
- LOG_ERR("odp_tm_node_connect() failed @ level=%" PRIu32 "\n",
- level);
+ ODPH_ERR("odp_tm_node_connect() failed @ level=%" PRIu32 "\n",
+ level);
odp_tm_node_destroy(tm_node);
return NULL;
}
@@ -1428,8 +1654,8 @@ static tm_node_desc_t *create_tm_node(odp_tm_t odp_tm,
rc = create_tm_queue(odp_tm, tm_node, node_idx, queue_desc,
priority);
if (rc != 0) {
- LOG_ERR("create_tm_queue() failed @ "
- "level=%" PRIu32 "\n", level);
+ ODPH_ERR("create_tm_queue() failed @ "
+ "level=%" PRIu32 "\n", level);
while (priority > 0)
(void)destroy_tm_queue
(queue_desc->tm_queues[--priority]);
@@ -1454,7 +1680,8 @@ static tm_node_desc_t *create_tm_subtree(odp_tm_t odp_tm,
node_desc = create_tm_node(odp_tm, level, num_levels,
node_idx, parent_node);
if (node_desc == NULL) {
- LOG_ERR("create_tm_node() failed @ level=%" PRIu32 "\n", level);
+ ODPH_ERR("create_tm_node() failed @ level=%" PRIu32 "\n",
+ level);
return NULL;
}
@@ -1464,8 +1691,8 @@ static tm_node_desc_t *create_tm_subtree(odp_tm_t odp_tm,
num_levels, child_idx,
node_desc);
if (child_desc == NULL) {
- LOG_ERR("create_tm_subtree failed "
- "level=%" PRIu32 "\n", level);
+ ODPH_ERR("%s failed level=%" PRIu32 "\n",
+ __func__, level);
return NULL;
}
@@ -1577,12 +1804,65 @@ static uint32_t find_child_queues(uint8_t tm_system_idx,
return num_queues;
}
+static void
+set_reqs_based_on_capas(odp_tm_requirements_t *req)
+{
+ odp_packet_color_t color;
+ int j;
+
+ /* Use tm capabilities identified based on egress capabilities
+ * to see what can be enabled.
+ */
+ if (tm_capabilities.ecn_marking_supported)
+ req->ecn_marking_needed = true;
+ if (tm_capabilities.drop_prec_marking_supported)
+ req->drop_prec_marking_needed = true;
+ if (tm_capabilities.tm_queue_wred_supported)
+ req->tm_queue_wred_needed = true;
+ if (tm_capabilities.tm_queue_dual_slope_supported)
+ req->tm_queue_dual_slope_needed = true;
+ if (tm_capabilities.vlan_marking_supported)
+ req->vlan_marking_needed = true;
+ if (tm_capabilities.tm_queue_threshold.byte ||
+ tm_capabilities.tm_queue_threshold.packet ||
+ tm_capabilities.tm_queue_threshold.byte_and_packet)
+ req->tm_queue_threshold_needed = true;
+
+ for (j = 0; j < tm_capabilities.max_levels; j++) {
+ if (tm_capabilities.per_level[j].tm_node_threshold.byte ||
+ tm_capabilities.per_level[j].tm_node_threshold.packet ||
+ tm_capabilities.per_level[j].tm_node_threshold.byte_and_packet)
+ req->per_level[j].tm_node_threshold_needed = true;
+ }
+
+ /* Mark colors as needed if at least one of the marking
+ * feature is needed.
+ * */
+ if (req->ecn_marking_needed || req->drop_prec_marking_needed) {
+ for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
+ req->marking_colors_needed[color] = true;
+ }
+
+ if (tm_capabilities.tm_queue_shaper_supported ||
+ tm_capabilities.tm_queue_rate_limiter_supported)
+ req->tm_queue_shaper_needed = true;
+
+ /* We can use any packet priority mode since it does not affect
+ * our tests. Our scheduler test tests scheduling only in a node
+ * directly connected to TM queues and such nodes see the original
+ * packet priority before it could have been overwritten by any node.
+ */
+ req->pkt_prio_mode = ODP_TM_PKT_PRIO_MODE_PRESERVE;
+ if (!tm_capabilities.pkt_prio_modes[ODP_TM_PKT_PRIO_MODE_PRESERVE])
+ req->pkt_prio_mode = ODP_TM_PKT_PRIO_MODE_OVERWRITE;
+
+}
+
static int create_tm_system(void)
{
odp_tm_level_requirements_t *per_level;
odp_tm_requirements_t requirements;
odp_tm_egress_t egress;
- odp_packet_color_t color;
tm_node_desc_t *root_node_desc;
uint32_t level, max_nodes[ODP_TM_MAX_LEVELS];
odp_tm_t odp_tm, found_odp_tm;
@@ -1592,16 +1872,10 @@ static int create_tm_system(void)
odp_tm_requirements_init(&requirements);
odp_tm_egress_init(&egress);
- requirements.max_tm_queues = NUM_TM_QUEUES + 1;
+ requirements.max_tm_queues = NUM_TM_QUEUES;
requirements.num_levels = NUM_LEVELS;
- requirements.tm_queue_shaper_needed = true;
- requirements.tm_queue_wred_needed = true;
- requirements.tm_queue_dual_slope_needed = true;
- requirements.vlan_marking_needed = false;
- requirements.ecn_marking_needed = true;
- requirements.drop_prec_marking_needed = true;
- for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
- requirements.marking_colors_needed[color] = true;
+
+ set_reqs_based_on_capas(&requirements);
/* Set the max_num_tm_nodes to be double the expected number of nodes
* at that level */
@@ -1629,17 +1903,15 @@ static int create_tm_system(void)
snprintf(tm_name, sizeof(tm_name), "TM_system_%" PRIu32,
num_odp_tm_systems);
odp_tm = odp_tm_create(tm_name, &requirements, &egress);
- if (odp_tm == ODP_TM_INVALID) {
- LOG_ERR("odp_tm_create() failed\n");
- return -1;
- }
+ CU_ASSERT_FATAL(odp_tm != ODP_TM_INVALID);
+
odp_tm_systems[num_odp_tm_systems] = odp_tm;
root_node_desc = create_tm_subtree(odp_tm, 0, NUM_LEVELS, 0, NULL);
root_node_descs[num_odp_tm_systems] = root_node_desc;
if (root_node_desc == NULL) {
- LOG_ERR("create_tm_subtree() failed\n");
+ ODPH_ERR("create_tm_subtree() failed\n");
return -1;
}
@@ -1648,13 +1920,28 @@ static int create_tm_system(void)
/* Test odp_tm_capability and odp_tm_find. */
rc = odp_tm_capability(odp_tm, &tm_capabilities);
if (rc != 0) {
- LOG_ERR("odp_tm_capability() failed\n");
+ ODPH_ERR("odp_tm_capability() failed for tm: %" PRIx64 "\n",
+ odp_tm_to_u64(odp_tm));
return -1;
}
+ /* Update dynamic capability flags from created tm system */
+ dynamic_shaper_update = tm_capabilities.dynamic_shaper_update;
+ dynamic_sched_update = tm_capabilities.dynamic_sched_update;
+ dynamic_threshold_update = tm_capabilities.dynamic_threshold_update;
+ dynamic_wred_update = tm_capabilities.dynamic_wred_update;
+
found_odp_tm = odp_tm_find(tm_name, &requirements, &egress);
if ((found_odp_tm == ODP_TM_INVALID) || (found_odp_tm != odp_tm)) {
- LOG_ERR("odp_tm_find() failed\n");
+ ODPH_ERR("odp_tm_find() failed\n");
+ return -1;
+ }
+
+ /* Start TM system */
+ CU_ASSERT((rc = odp_tm_start(odp_tm)) == 0);
+ if (rc != 0) {
+ ODPH_ERR("odp_tm_start() failed for tm: %" PRIx64 "\n",
+ odp_tm_to_u64(odp_tm));
return -1;
}
@@ -1672,8 +1959,8 @@ static void dump_tm_subtree(tm_node_desc_t *node_desc)
rc = odp_tm_node_info(node_desc->node, &node_info);
if (rc != 0) {
- LOG_ERR("odp_tm_node_info failed for tm_node=0x%" PRIX64 "\n",
- node_desc->node);
+ ODPH_ERR("odp_tm_node_info failed for tm_node=0x%" PRIX64 "\n",
+ odp_tm_node_to_u64(node_desc->node));
}
num_queues = 0;
@@ -1684,8 +1971,9 @@ static void dump_tm_subtree(tm_node_desc_t *node_desc)
"level=%" PRIu32" parent=0x%" PRIX64 " children=%" PRIu32 " "
"queues=%" PRIu32 " queue_fanin=%" PRIu32 " "
"node_fanin=%" PRIu32 "\n",
- node_desc, node_desc->node_name, node_desc->node,
- node_desc->node_idx, node_desc->level, node_desc->parent_node,
+ (void *)node_desc, node_desc->node_name,
+ odp_tm_node_to_u64(node_desc->node), node_desc->node_idx,
+ node_desc->level, odp_tm_node_to_u64(node_desc->parent_node),
node_desc->num_children, num_queues, node_info.tm_queue_fanin,
node_info.tm_node_fanin);
@@ -1713,15 +2001,15 @@ static int unconfig_tm_queue_profiles(odp_tm_queue_t tm_queue)
rc = odp_tm_queue_info(tm_queue, &queue_info);
if (rc != 0) {
- LOG_ERR("odp_tm_queue_info failed code=%d\n", rc);
+ ODPH_ERR("odp_tm_queue_info failed code=%d\n", rc);
return rc;
}
if (queue_info.shaper_profile != ODP_TM_INVALID) {
rc = odp_tm_queue_shaper_config(tm_queue, ODP_TM_INVALID);
if (rc != 0) {
- LOG_ERR("odp_tm_queue_shaper_config failed code=%d\n",
- rc);
+ ODPH_ERR("odp_tm_queue_shaper_config failed code=%d\n",
+ rc);
return rc;
}
}
@@ -1729,8 +2017,8 @@ static int unconfig_tm_queue_profiles(odp_tm_queue_t tm_queue)
if (queue_info.threshold_profile != ODP_TM_INVALID) {
rc = odp_tm_queue_threshold_config(tm_queue, ODP_TM_INVALID);
if (rc != 0) {
- LOG_ERR("odp_tm_queue_threshold_config failed "
- "code=%d\n", rc);
+ ODPH_ERR("odp_tm_queue_threshold_config failed "
+ "code=%d\n", rc);
return rc;
}
}
@@ -1741,8 +2029,8 @@ static int unconfig_tm_queue_profiles(odp_tm_queue_t tm_queue)
rc = odp_tm_queue_wred_config(tm_queue, color,
ODP_TM_INVALID);
if (rc != 0) {
- LOG_ERR("odp_tm_queue_wred_config failed "
- "color=%" PRIu32 " code=%d\n",
+ ODPH_ERR("odp_tm_queue_wred_config failed "
+ "color=%" PRIu32 " code=%d\n",
color, rc);
return rc;
}
@@ -1764,24 +2052,24 @@ static int destroy_tm_queues(tm_queue_desc_t *queue_desc)
if (tm_queue != ODP_TM_INVALID) {
rc = odp_tm_queue_disconnect(tm_queue);
if (rc != 0) {
- LOG_ERR("odp_tm_queue_disconnect failed "
- "idx=%" PRIu32 " code=%d\n",
+ ODPH_ERR("odp_tm_queue_disconnect failed "
+ "idx=%" PRIu32 " code=%d\n",
queue_idx, rc);
return rc;
}
rc = unconfig_tm_queue_profiles(tm_queue);
if (rc != 0) {
- LOG_ERR("unconfig_tm_queue_profiles failed "
- "idx=%" PRIu32 " code=%d\n",
+ ODPH_ERR("unconfig_tm_queue_profiles failed "
+ "idx=%" PRIu32 " code=%d\n",
queue_idx, rc);
return rc;
}
rc = odp_tm_queue_destroy(tm_queue);
if (rc != 0) {
- LOG_ERR("odp_tm_queue_destroy failed "
- "idx=%" PRIu32 " code=%d\n",
+ ODPH_ERR("odp_tm_queue_destroy failed "
+ "idx=%" PRIu32 " code=%d\n",
queue_idx, rc);
return rc;
}
@@ -1801,15 +2089,15 @@ static int unconfig_tm_node_profiles(odp_tm_node_t tm_node)
rc = odp_tm_node_info(tm_node, &node_info);
if (rc != 0) {
- LOG_ERR("odp_tm_node_info failed code=%d\n", rc);
+ ODPH_ERR("odp_tm_node_info failed code=%d\n", rc);
return rc;
}
if (node_info.shaper_profile != ODP_TM_INVALID) {
rc = odp_tm_node_shaper_config(tm_node, ODP_TM_INVALID);
if (rc != 0) {
- LOG_ERR("odp_tm_node_shaper_config failed code=%d\n",
- rc);
+ ODPH_ERR("odp_tm_node_shaper_config failed code=%d\n",
+ rc);
return rc;
}
}
@@ -1817,8 +2105,8 @@ static int unconfig_tm_node_profiles(odp_tm_node_t tm_node)
if (node_info.threshold_profile != ODP_TM_INVALID) {
rc = odp_tm_node_threshold_config(tm_node, ODP_TM_INVALID);
if (rc != 0) {
- LOG_ERR("odp_tm_node_threshold_config failed "
- "code=%d\n", rc);
+ ODPH_ERR("odp_tm_node_threshold_config failed "
+ "code=%d\n", rc);
return rc;
}
}
@@ -1829,9 +2117,9 @@ static int unconfig_tm_node_profiles(odp_tm_node_t tm_node)
rc = odp_tm_node_wred_config(tm_node, color,
ODP_TM_INVALID);
if (rc != 0) {
- LOG_ERR("odp_tm_node_wred_config failed "
- "color=%" PRIu32 " code=%d\n",
- color, rc);
+ ODPH_ERR("odp_tm_node_wred_config failed "
+ "color=%" PRIu32 " code=%d\n",
+ color, rc);
return rc;
}
}
@@ -1854,9 +2142,8 @@ static int destroy_tm_subtree(tm_node_desc_t *node_desc)
if (child_desc != NULL) {
rc = destroy_tm_subtree(child_desc);
if (rc != 0) {
- LOG_ERR("destroy_tm_subtree failed "
- "child_num=%" PRIu32 " code=%d\n",
- child_num, rc);
+ ODPH_ERR("%s failed child_num=%" PRIu32 " "
+ "code=%d\n", __func__, child_num, rc);
return rc;
}
}
@@ -1866,7 +2153,7 @@ static int destroy_tm_subtree(tm_node_desc_t *node_desc)
if (queue_desc != NULL) {
rc = destroy_tm_queues(queue_desc);
if (rc != 0) {
- LOG_ERR("destroy_tm_queues failed code=%d\n", rc);
+ ODPH_ERR("destroy_tm_queues failed code=%d\n", rc);
return rc;
}
}
@@ -1874,19 +2161,19 @@ static int destroy_tm_subtree(tm_node_desc_t *node_desc)
tm_node = node_desc->node;
rc = odp_tm_node_disconnect(tm_node);
if (rc != 0) {
- LOG_ERR("odp_tm_node_disconnect failed code=%d\n", rc);
+ ODPH_ERR("odp_tm_node_disconnect failed code=%d\n", rc);
return rc;
}
rc = unconfig_tm_node_profiles(tm_node);
if (rc != 0) {
- LOG_ERR("unconfig_tm_node_profiles failed code=%d\n", rc);
+ ODPH_ERR("unconfig_tm_node_profiles failed code=%d\n", rc);
return rc;
}
rc = odp_tm_node_destroy(tm_node);
if (rc != 0) {
- LOG_ERR("odp_tm_node_destroy failed code=%d\n", rc);
+ ODPH_ERR("odp_tm_node_destroy failed code=%d\n", rc);
return rc;
}
@@ -1908,8 +2195,11 @@ static int destroy_all_shaper_profiles(void)
if (shaper_profile != ODP_TM_INVALID) {
rc = odp_tm_shaper_destroy(shaper_profile);
if (rc != 0) {
- LOG_ERR("odp_tm_sched_destroy failed "
- "idx=%" PRIu32 " code=%d\n", idx, rc);
+ ODPH_ERR("odp_tm_sched_destroy failed "
+ "node=%" PRIx64 " idx=%" PRIu32
+ " code=%d\n",
+ odp_tm_shaper_to_u64(shaper_profile),
+ idx, rc);
return rc;
}
shaper_profiles[idx] = ODP_TM_INVALID;
@@ -1930,8 +2220,11 @@ static int destroy_all_sched_profiles(void)
if (sched_profile != ODP_TM_INVALID) {
rc = odp_tm_sched_destroy(sched_profile);
if (rc != 0) {
- LOG_ERR("odp_tm_sched_destroy failed "
- "idx=%" PRIu32 " code=%d\n", idx, rc);
+ ODPH_ERR("odp_tm_sched_destroy failed "
+ "node=%" PRIx64 " idx=%" PRIu32
+ " code=%d\n",
+ odp_tm_sched_to_u64(sched_profile),
+ idx, rc);
return rc;
}
sched_profiles[idx] = ODP_TM_INVALID;
@@ -1943,17 +2236,20 @@ static int destroy_all_sched_profiles(void)
static int destroy_all_threshold_profiles(void)
{
- odp_tm_threshold_t threshold_profile;
+ odp_tm_threshold_t thr_profile;
uint32_t idx;
int rc;
for (idx = 0; idx < NUM_THRESHOLD_PROFILES; idx++) {
- threshold_profile = threshold_profiles[idx];
- if (threshold_profile != ODP_TM_INVALID) {
- rc = odp_tm_threshold_destroy(threshold_profile);
+ thr_profile = threshold_profiles[idx];
+ if (thr_profile != ODP_TM_INVALID) {
+ rc = odp_tm_threshold_destroy(thr_profile);
if (rc != 0) {
- LOG_ERR("odp_tm_threshold_destroy failed "
- "idx=%" PRIu32 " code=%d\n", idx, rc);
+ ODPH_ERR("odp_tm_threshold_destroy failed "
+ "node=%" PRIx64 " idx=%" PRIu32
+ " code=%d\n",
+ odp_tm_threshold_to_u64(thr_profile),
+ idx, rc);
return rc;
}
threshold_profiles[idx] = ODP_TM_INVALID;
@@ -1965,20 +2261,21 @@ static int destroy_all_threshold_profiles(void)
static int destroy_all_wred_profiles(void)
{
- odp_tm_wred_t wred_profile;
+ odp_tm_wred_t wred_prof;
uint32_t idx, color;
int rc;
for (idx = 0; idx < NUM_WRED_PROFILES; idx++) {
for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
- wred_profile = wred_profiles[idx][color];
- if (wred_profile != ODP_TM_INVALID) {
- rc = odp_tm_wred_destroy(wred_profile);
+ wred_prof = wred_profiles[idx][color];
+ if (wred_prof != ODP_TM_INVALID) {
+ rc = odp_tm_wred_destroy(wred_prof);
if (rc != 0) {
- LOG_ERR("odp_tm_wred_destroy failed "
- "idx=%" PRIu32 " "
- "color=%" PRIu32 " code=%d\n",
- idx, color, rc);
+ ODPH_ERR("odp_tm_wred_destroy failed "
+ "node=%" PRIx64 " idx=%" PRIu32
+ " color=%" PRIu32 " code=%d\n",
+ odp_tm_wred_to_u64(wred_prof),
+ idx, color, rc);
return rc;
}
wred_profiles[idx][color] = ODP_TM_INVALID;
@@ -1995,25 +2292,25 @@ static int destroy_all_profiles(void)
rc = destroy_all_shaper_profiles();
if (rc != 0) {
- LOG_ERR("destroy_all_shaper_profiles failed code=%d\n", rc);
+ ODPH_ERR("destroy_all_shaper_profiles failed code=%d\n", rc);
return rc;
}
rc = destroy_all_sched_profiles();
if (rc != 0) {
- LOG_ERR("destroy_all_sched_profiles failed code=%d\n", rc);
+ ODPH_ERR("destroy_all_sched_profiles failed code=%d\n", rc);
return rc;
}
rc = destroy_all_threshold_profiles();
if (rc != 0) {
- LOG_ERR("destroy_all_threshold_profiles failed code=%d\n", rc);
+ ODPH_ERR("destroy_all_threshold_profiles failed code=%d\n", rc);
return rc;
}
rc = destroy_all_wred_profiles();
if (rc != 0) {
- LOG_ERR("destroy_all_wred_profiles failed code=%d\n", rc);
+ ODPH_ERR("destroy_all_wred_profiles failed code=%d\n", rc);
return rc;
}
@@ -2026,11 +2323,16 @@ static int destroy_tm_systems(void)
/* Close/free the TM systems. */
for (idx = 0; idx < num_odp_tm_systems; idx++) {
+ if (odp_tm_stop(odp_tm_systems[idx]) != 0)
+ return -1;
+
if (destroy_tm_subtree(root_node_descs[idx]) != 0)
return -1;
if (odp_tm_destroy(odp_tm_systems[idx]) != 0)
return -1;
+
+ odp_tm_systems[idx] = ODP_TM_INVALID;
}
/* Close/free the TM profiles. */
@@ -2040,9 +2342,12 @@ static int destroy_tm_systems(void)
return 0;
}
-int traffic_mngr_suite_init(void)
+static int traffic_mngr_suite_init(void)
{
+ odp_tm_capabilities_t egress_capa;
uint32_t payload_len, copy_len;
+ odp_tm_egress_t egress;
+ int j, ret;
/* Initialize some global variables. */
num_pkts_made = 0;
@@ -2055,7 +2360,7 @@ int traffic_mngr_suite_init(void)
payload_len = 0;
while (payload_len < MAX_PAYLOAD) {
- copy_len = MIN(MAX_PAYLOAD - payload_len, sizeof(ALPHABET));
+ copy_len = ODPH_MIN(MAX_PAYLOAD - payload_len, sizeof(ALPHABET));
memcpy(&payload_data[payload_len], ALPHABET, copy_len);
payload_len += copy_len;
}
@@ -2078,29 +2383,133 @@ int traffic_mngr_suite_init(void)
iface_name[0], iface_name[1]);
}
- if (open_pktios() != 0)
+ pktios[0] = ODP_PKTIO_INVALID;
+ pktios[1] = ODP_PKTIO_INVALID;
+
+ ret = open_pktios();
+ if (ret < 0)
return -1;
+ /* Positive return indicates, that pktio open failed with out mode as TM
+ * but succeeded with direct mode.
+ */
+ if (ret > 0)
+ goto skip_tests;
+
+ odp_tm_egress_init(&egress);
+ egress.egress_kind = ODP_TM_EGRESS_PKT_IO;
+ egress.pktio = xmt_pktio;
+
+ /* Get TM capabilities */
+ ret = odp_tm_egress_capabilities(&egress_capa, &egress);
+ if (ret) {
+ ODPH_ERR("Failed to retrieve tm capabilities");
+ return ret;
+ }
+
+ /* Check for sufficient TM queues */
+ if (egress_capa.max_tm_queues < NUM_TM_QUEUES)
+ goto skip_tests;
+
+ /* Check for sufficient TM levels */
+ if (egress_capa.max_levels < NUM_LEVELS)
+ goto skip_tests;
+
+ tm_shaper_min_rate = egress_capa.per_level[0].min_rate;
+ tm_shaper_max_rate = egress_capa.per_level[0].max_rate;
+ tm_shaper_min_burst = egress_capa.per_level[0].min_burst;
+ tm_shaper_max_burst = egress_capa.per_level[0].max_burst;
+
+ for (j = 0; j < NUM_LEVELS; j++) {
+ odp_tm_level_capabilities_t *per_level =
+ &egress_capa.per_level[j];
+
+ /* Per node fanin */
+ if (per_level->max_fanin_per_node < FANIN_RATIO)
+ break;
+
+ if (j == 0)
+ continue;
+
+ if (per_level->min_rate > tm_shaper_min_rate)
+ tm_shaper_min_rate = per_level->min_rate;
+
+ if (per_level->min_burst > tm_shaper_min_burst)
+ tm_shaper_min_burst = per_level->min_burst;
+
+ if (per_level->max_rate < tm_shaper_max_rate)
+ tm_shaper_max_rate = per_level->max_rate;
+
+ if (per_level->max_burst < tm_shaper_max_burst)
+ tm_shaper_max_burst = per_level->max_burst;
+ }
+
+ if (tm_shaper_min_rate > tm_shaper_max_rate ||
+ tm_shaper_min_burst > tm_shaper_max_burst)
+ goto skip_tests;
+
+ if (j != NUM_LEVELS)
+ goto skip_tests;
+
+ if (egress_capa.pkt_prio_modes[ODP_TM_PKT_PRIO_MODE_PRESERVE] &&
+ egress_capa.max_schedulers_per_node < NUM_QUEUES_PER_NODE)
+ goto skip_tests;
+
+ if (!egress_capa.pkt_prio_modes[ODP_TM_PKT_PRIO_MODE_PRESERVE] &&
+ egress_capa.max_schedulers_per_node < 1)
+ goto skip_tests;
+
+ /* Init tm capabilities with matching egress capa until tm is created */
+ tm_capabilities = egress_capa;
+
+ if (!tm_capabilities.dynamic_shaper_update)
+ dynamic_shaper_update = false;
+
+ if (!tm_capabilities.dynamic_sched_update)
+ dynamic_sched_update = false;
+
+ if (!tm_capabilities.dynamic_threshold_update)
+ dynamic_threshold_update = false;
+
+ if (!tm_capabilities.dynamic_wred_update)
+ dynamic_wred_update = false;
+
+ return 0;
+skip_tests:
+ /* Mark all tests as inactive under this suite */
+ odp_cunit_set_inactive();
+ suite_inactive++;
return 0;
}
-int traffic_mngr_suite_term(void)
+static int traffic_mngr_suite_term(void)
{
uint32_t iface;
/* Close the pktios and associated packet pools. */
free_rcvd_pkts();
for (iface = 0; iface < num_ifaces; iface++) {
- if (odp_pktio_stop(pktios[iface]) != 0)
- return -1;
+ /* Skip pktios not initialized */
+ if (pktios[iface] != ODP_PKTIO_INVALID) {
+ if (pktio_started[iface] &&
+ odp_pktio_stop(pktios[iface]) != 0)
+ return -1;
- if (odp_pktio_close(pktios[iface]) != 0)
- return -1;
+ if (odp_pktio_close(pktios[iface]) != 0)
+ return -1;
+ pktios[iface] = ODP_PKTIO_INVALID;
+ pktio_started[iface] = false;
+ }
if (odp_pool_destroy(pools[iface]) != 0)
return -1;
+
+ pools[iface] = ODP_POOL_INVALID;
}
+ if (odp_cunit_print_inactive())
+ return -1;
+
return 0;
}
@@ -2108,6 +2517,7 @@ static void check_shaper_profile(char *shaper_name, uint32_t shaper_idx)
{
odp_tm_shaper_params_t shaper_params;
odp_tm_shaper_t profile;
+ int rc;
profile = odp_tm_shaper_lookup(shaper_name);
CU_ASSERT(profile != ODP_TM_INVALID);
@@ -2115,21 +2525,23 @@ static void check_shaper_profile(char *shaper_name, uint32_t shaper_idx)
if (profile != shaper_profiles[shaper_idx - 1])
return;
- odp_tm_shaper_params_read(profile, &shaper_params);
- CU_ASSERT(approx_eq64(shaper_params.commit_bps,
- shaper_idx * MIN_COMMIT_BW));
- CU_ASSERT(approx_eq64(shaper_params.peak_bps,
- shaper_idx * MIN_PEAK_BW));
+ memset(&shaper_params, 0, sizeof(shaper_params));
+ rc = odp_tm_shaper_params_read(profile, &shaper_params);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(approx_eq64(shaper_params.commit_rate,
+ clamp_rate(shaper_idx * MIN_COMMIT_BW)));
+ CU_ASSERT(approx_eq64(shaper_params.peak_rate,
+ clamp_rate(shaper_idx * MIN_PEAK_BW)));
CU_ASSERT(approx_eq32(shaper_params.commit_burst,
- shaper_idx * MIN_COMMIT_BURST));
+ clamp_burst(shaper_idx * MIN_COMMIT_BURST)));
CU_ASSERT(approx_eq32(shaper_params.peak_burst,
- shaper_idx * MIN_PEAK_BURST));
+ clamp_burst(shaper_idx * MIN_PEAK_BURST)));
CU_ASSERT(shaper_params.shaper_len_adjust == SHAPER_LEN_ADJ);
- CU_ASSERT(shaper_params.dual_rate == 0);
+ CU_ASSERT(shaper_params.dual_rate == true);
}
-void traffic_mngr_test_shaper_profile(void)
+static void traffic_mngr_test_shaper_profile(void)
{
odp_tm_shaper_params_t shaper_params;
odp_tm_shaper_t profile;
@@ -2138,15 +2550,15 @@ void traffic_mngr_test_shaper_profile(void)
odp_tm_shaper_params_init(&shaper_params);
shaper_params.shaper_len_adjust = SHAPER_LEN_ADJ;
- shaper_params.dual_rate = 0;
+ shaper_params.dual_rate = true;
for (idx = 1; idx <= NUM_SHAPER_TEST_PROFILES; idx++) {
snprintf(shaper_name, sizeof(shaper_name),
"shaper_profile_%" PRIu32, idx);
- shaper_params.commit_bps = idx * MIN_COMMIT_BW;
- shaper_params.peak_bps = idx * MIN_PEAK_BW;
- shaper_params.commit_burst = idx * MIN_COMMIT_BURST;
- shaper_params.peak_burst = idx * MIN_PEAK_BURST;
+ shaper_params.commit_rate = clamp_rate(idx * MIN_COMMIT_BW);
+ shaper_params.peak_rate = clamp_rate(idx * MIN_PEAK_BW);
+ shaper_params.commit_burst = clamp_burst(idx * MIN_COMMIT_BURST);
+ shaper_params.peak_burst = clamp_burst(idx * MIN_PEAK_BURST);
profile = odp_tm_shaper_create(shaper_name, &shaper_params);
CU_ASSERT_FATAL(profile != ODP_TM_INVALID);
@@ -2193,7 +2605,7 @@ static void check_sched_profile(char *sched_name, uint32_t sched_idx)
}
}
-void traffic_mngr_test_sched_profile(void)
+static void traffic_mngr_test_sched_profile(void)
{
odp_tm_sched_params_t sched_params;
odp_tm_sched_t profile;
@@ -2205,7 +2617,7 @@ void traffic_mngr_test_sched_profile(void)
for (idx = 1; idx <= NUM_SCHED_TEST_PROFILES; idx++) {
snprintf(sched_name, sizeof(sched_name),
"sched_profile_%" PRIu32, idx);
- for (priority = 0; priority < 16; priority++) {
+ for (priority = 0; priority < ODP_TM_MAX_PRIORITIES; priority++) {
sched_params.sched_modes[priority] =
ODP_TM_BYTE_BASED_WEIGHTS;
sched_params.sched_weights[priority] = 8 + idx +
@@ -2236,10 +2648,12 @@ void traffic_mngr_test_sched_profile(void)
}
static void check_threshold_profile(char *threshold_name,
- uint32_t threshold_idx)
+ uint32_t threshold_idx,
+ threshold_type_t threshold)
{
odp_tm_threshold_params_t threshold_params;
odp_tm_threshold_t profile;
+ int ret;
profile = odp_tm_thresholds_lookup(threshold_name);
CU_ASSERT(profile != ODP_TM_INVALID);
@@ -2248,16 +2662,23 @@ static void check_threshold_profile(char *threshold_name,
if (profile == threshold_profiles[threshold_idx - 1])
return;
- odp_tm_thresholds_params_read(profile, &threshold_params);
- CU_ASSERT(threshold_params.max_pkts ==
- threshold_idx * MIN_PKT_THRESHOLD);
- CU_ASSERT(threshold_params.max_bytes ==
- threshold_idx * MIN_BYTE_THRESHOLD);
- CU_ASSERT(threshold_params.enable_max_pkts == 1);
- CU_ASSERT(threshold_params.enable_max_bytes == 1);
+ ret = odp_tm_thresholds_params_read(profile, &threshold_params);
+ CU_ASSERT(ret == 0);
+
+ if (ret)
+ return;
+
+ if (threshold == THRESHOLD_PACKET || threshold == THRESHOLD_BYTE_AND_PACKET) {
+ CU_ASSERT(threshold_params.enable_max_pkts == 1);
+ CU_ASSERT(threshold_params.max_pkts == threshold_idx * MIN_PKT_THRESHOLD);
+ }
+ if (threshold == THRESHOLD_BYTE || threshold == THRESHOLD_BYTE_AND_PACKET) {
+ CU_ASSERT(threshold_params.enable_max_bytes == 1);
+ CU_ASSERT(threshold_params.max_bytes == threshold_idx * MIN_BYTE_THRESHOLD);
+ }
}
-void traffic_mngr_test_threshold_profile(void)
+static void traffic_mngr_test_threshold_profile(threshold_type_t threshold)
{
odp_tm_threshold_params_t threshold_params;
odp_tm_threshold_t profile;
@@ -2265,14 +2686,19 @@ void traffic_mngr_test_threshold_profile(void)
char threshold_name[TM_NAME_LEN];
odp_tm_threshold_params_init(&threshold_params);
- threshold_params.enable_max_pkts = 1;
- threshold_params.enable_max_bytes = 1;
+
+ if (threshold == THRESHOLD_PACKET || threshold == THRESHOLD_BYTE_AND_PACKET)
+ threshold_params.enable_max_pkts = 1;
+ if (threshold == THRESHOLD_BYTE || threshold == THRESHOLD_BYTE_AND_PACKET)
+ threshold_params.enable_max_bytes = 1;
for (idx = 1; idx <= NUM_THRESH_TEST_PROFILES; idx++) {
snprintf(threshold_name, sizeof(threshold_name),
"threshold_profile_%" PRIu32, idx);
- threshold_params.max_pkts = idx * MIN_PKT_THRESHOLD;
- threshold_params.max_bytes = idx * MIN_BYTE_THRESHOLD;
+ if (threshold == THRESHOLD_PACKET || threshold == THRESHOLD_BYTE_AND_PACKET)
+ threshold_params.max_pkts = idx * MIN_PKT_THRESHOLD;
+ if (threshold == THRESHOLD_BYTE || threshold == THRESHOLD_BYTE_AND_PACKET)
+ threshold_params.max_bytes = idx * MIN_BYTE_THRESHOLD;
profile = odp_tm_threshold_create(threshold_name,
&threshold_params);
@@ -2294,16 +2720,37 @@ void traffic_mngr_test_threshold_profile(void)
threshold_idx = ((3 + 7 * idx) % NUM_THRESH_TEST_PROFILES) + 1;
snprintf(threshold_name, sizeof(threshold_name),
"threshold_profile_%" PRIu32, threshold_idx);
- check_threshold_profile(threshold_name, threshold_idx);
+ check_threshold_profile(threshold_name, threshold_idx, threshold);
+ }
+
+ for (i = 0; i < NUM_THRESH_TEST_PROFILES; i++) {
+ CU_ASSERT(odp_tm_threshold_destroy(threshold_profiles[i]) == 0);
+ num_threshold_profiles--;
}
}
+static void traffic_mngr_test_threshold_profile_byte(void)
+{
+ traffic_mngr_test_threshold_profile(THRESHOLD_BYTE);
+}
+
+static void traffic_mngr_test_threshold_profile_packet(void)
+{
+ traffic_mngr_test_threshold_profile(THRESHOLD_PACKET);
+}
+
+static void traffic_mngr_test_threshold_profile_byte_and_packet(void)
+{
+ traffic_mngr_test_threshold_profile(THRESHOLD_BYTE_AND_PACKET);
+}
+
static void check_wred_profile(char *wred_name,
uint32_t wred_idx,
uint32_t color)
{
odp_tm_wred_params_t wred_params;
odp_tm_wred_t profile;
+ int ret;
profile = odp_tm_wred_lookup(wred_name);
CU_ASSERT(profile != ODP_TM_INVALID);
@@ -2311,7 +2758,12 @@ static void check_wred_profile(char *wred_name,
if (profile != wred_profiles[wred_idx - 1][color])
return;
- odp_tm_wred_params_read(profile, &wred_params);
+ ret = odp_tm_wred_params_read(profile, &wred_params);
+ CU_ASSERT(ret == 0);
+
+ if (ret)
+ return;
+
CU_ASSERT(wred_params.min_threshold == wred_idx * MIN_WRED_THRESH);
CU_ASSERT(wred_params.med_threshold == wred_idx * MED_WRED_THRESH);
CU_ASSERT(wred_params.med_drop_prob == wred_idx * MED_DROP_PROB);
@@ -2321,7 +2773,7 @@ static void check_wred_profile(char *wred_name,
CU_ASSERT(wred_params.use_byte_fullness == 0);
}
-void traffic_mngr_test_wred_profile(void)
+static void traffic_mngr_test_wred_profile(void)
{
odp_tm_wred_params_t wred_params;
odp_tm_wred_t profile;
@@ -2375,28 +2827,44 @@ void traffic_mngr_test_wred_profile(void)
static int set_shaper(const char *node_name,
const char *shaper_name,
- const uint64_t commit_bps,
- const uint64_t commit_burst_in_bits)
+ uint64_t commit_bps,
+ uint64_t commit_burst_in_bits)
{
odp_tm_shaper_params_t shaper_params;
odp_tm_shaper_t shaper_profile;
odp_tm_node_t tm_node;
+ int rc;
+
+ commit_bps = clamp_rate(commit_bps);
+ commit_burst_in_bits = clamp_burst(commit_burst_in_bits);
tm_node = find_tm_node(0, node_name);
if (tm_node == ODP_TM_INVALID) {
- LOG_ERR("find_tm_node(%s) failed\n", node_name);
+ ODPH_ERR("find_tm_node(%s) failed\n", node_name);
CU_ASSERT_FATAL(tm_node != ODP_TM_INVALID);
return -1;
}
odp_tm_shaper_params_init(&shaper_params);
- shaper_params.commit_bps = commit_bps;
- shaper_params.peak_bps = 0;
+ shaper_params.commit_rate = commit_bps;
+ shaper_params.peak_rate = 0;
shaper_params.commit_burst = commit_burst_in_bits;
shaper_params.peak_burst = 0;
shaper_params.shaper_len_adjust = 0;
shaper_params.dual_rate = 0;
+ if (!dynamic_shaper_update) {
+ /* Stop TM system before update when dynamic update is not
+ * supported.
+ */
+ CU_ASSERT_FATAL(odp_tm_stop(odp_tm_systems[0]) == 0);
+ }
+
+ if (!shaper_name) {
+ shaper_profile = ODP_TM_INVALID;
+ goto skip_profile;
+ }
+
/* First see if a shaper profile already exists with this name, in
* which case we use that profile, else create a new one. */
shaper_profile = odp_tm_shaper_lookup(shaper_name);
@@ -2409,36 +2877,68 @@ static int set_shaper(const char *node_name,
num_shaper_profiles++;
}
- return odp_tm_node_shaper_config(tm_node, shaper_profile);
+skip_profile:
+ rc = odp_tm_node_shaper_config(tm_node, shaper_profile);
+
+ if (!dynamic_shaper_update) {
+ /* Start TM system, post update */
+ CU_ASSERT_FATAL(odp_tm_start(odp_tm_systems[0]) == 0);
+ }
+ return rc;
}
-int traffic_mngr_check_shaper(void)
+static int traffic_mngr_check_shaper(void)
{
odp_cpumask_t cpumask;
int cpucount = odp_cpumask_all_available(&cpumask);
+/* Skip the shaper test on arm64 systems */
+#if defined(__aarch64__)
+ printf("\nTemporarily skip shaper test which intermittently "
+ "fails on arm64 systems. Will be activated when issue "
+ "is resolved\n");
+ return ODP_TEST_INACTIVE;
+#endif
+
if (cpucount < 2) {
- LOG_DBG("\nSkipping shaper test because cpucount = %d "
- "is less then min number 2 required\n", cpucount);
- LOG_DBG("Rerun with more cpu resources\n");
+ ODPH_DBG("\nSkipping shaper test because cpucount = %d "
+ "is less then min number 2 required\n", cpucount);
+ ODPH_DBG("Rerun with more cpu resources\n");
return ODP_TEST_INACTIVE;
}
+ /* This test needs 1 Mbps, 4 Mbps, 10 Mpbs, 40 Mbps, 100 Mbps */
+ if ((tm_shaper_min_rate > 100 * MBPS) || (tm_shaper_max_rate < 1 * MBPS))
+ return ODP_TEST_INACTIVE;
+
+ /* All the subtests run with burst of 10000 bits */
+ if ((tm_shaper_min_burst > 10000) || tm_shaper_max_burst < 10000)
+ return ODP_TEST_INACTIVE;
+
return ODP_TEST_ACTIVE;
}
-int traffic_mngr_check_scheduler(void)
+static int traffic_mngr_check_scheduler(void)
{
odp_cpumask_t cpumask;
int cpucount = odp_cpumask_all_available(&cpumask);
if (cpucount < 2) {
- LOG_DBG("\nSkipping scheduler test because cpucount = %d "
- "is less then min number 2 required\n", cpucount);
- LOG_DBG("Rerun with more cpu resources\n");
+ ODPH_DBG("\nSkipping scheduler test because cpucount = %d "
+ "is less then min number 2 required\n", cpucount);
+ ODPH_DBG("Rerun with more cpu resources\n");
return ODP_TEST_INACTIVE;
}
+ /* Scheduler test test_sched_queue_priority() depends on rate of
+ * 64 Kbps and burst of 5600.
+ */
+ if ((tm_shaper_min_rate > 64 * 1000) ||
+ (tm_shaper_max_rate < 64 * 1000) ||
+ (tm_shaper_min_burst > 5600) ||
+ (tm_shaper_max_burst < 5600))
+ return ODP_TEST_INACTIVE;
+
return ODP_TEST_ACTIVE;
}
@@ -2495,19 +2995,19 @@ static int test_shaper_bw(const char *shaper_name,
/* This is fairly major failure in that most of the pkts didn't
* even get received, regardless of rate or order. Log the error
* to assist with debugging */
- LOG_ERR("Sent %" PRIu32 " pkts but only %" PRIu32 " "
- "came back\n", pkts_sent, num_rcv_pkts);
+ ODPH_ERR("Sent %" PRIu32 " pkts but only %" PRIu32 " "
+ "came back\n", pkts_sent, num_rcv_pkts);
CU_ASSERT(num_rcv_pkts <= (pkts_sent / 2));
} else if (pkts_rcvd_in_order <= 32) {
- LOG_ERR("Sent %" PRIu32 " pkts but only %" PRIu32 " "
- "came back (%" PRIu32 " in order)\n",
- pkts_sent, num_rcv_pkts, pkts_rcvd_in_order);
+ ODPH_ERR("Sent %" PRIu32 " pkts but only %" PRIu32 " "
+ "came back (%" PRIu32 " in order)\n",
+ pkts_sent, num_rcv_pkts, pkts_rcvd_in_order);
CU_ASSERT(pkts_rcvd_in_order <= 32);
} else {
if (pkts_rcvd_in_order < pkts_sent)
- LOG_DBG("Info: of %" PRIu32 " pkts sent %" PRIu32 " "
- "came back (%" PRIu32 " in order)\n",
- pkts_sent, num_rcv_pkts, pkts_rcvd_in_order);
+ ODPH_DBG("Info: of %" PRIu32 " pkts sent %" PRIu32 " "
+ "came back (%" PRIu32 " in order)\n",
+ pkts_sent, num_rcv_pkts, pkts_rcvd_in_order);
/* Next determine the inter arrival receive pkt statistics. */
rc = rcv_rate_stats(&rcv_stats, pkt_info.pkt_class);
@@ -2522,33 +3022,36 @@ static int test_shaper_bw(const char *shaper_name,
100) + 2;
if ((avg_rcv_gap < min_rcv_gap) ||
(max_rcv_gap < avg_rcv_gap)) {
- LOG_ERR("min=%" PRIu32 " avg_rcv_gap=%" PRIu32 " "
- "max=%" PRIu32 " std_dev_gap=%" PRIu32 "\n",
- rcv_stats.min_rcv_gap, avg_rcv_gap,
- rcv_stats.max_rcv_gap, rcv_stats.std_dev_gap);
- LOG_ERR(" expected_rcv_gap=%" PRIu64 " acceptable "
- "rcv_gap range=%" PRIu32 "..%" PRIu32 "\n",
- expected_rcv_gap_us, min_rcv_gap, max_rcv_gap);
+ ODPH_ERR("min=%" PRIu32 " avg_rcv_gap=%" PRIu32 " "
+ "max=%" PRIu32 " std_dev_gap=%" PRIu32 "\n",
+ rcv_stats.min_rcv_gap, avg_rcv_gap,
+ rcv_stats.max_rcv_gap, rcv_stats.std_dev_gap);
+ ODPH_ERR(" expected_rcv_gap=%" PRIu64 " acceptable "
+ "rcv_gap range=%" PRIu32 "..%" PRIu32 "\n",
+ expected_rcv_gap_us, min_rcv_gap, max_rcv_gap);
+ ODPH_ERR("agv_rcv_gap=%" PRIu32 " acceptable "
+ "rcv_gap range=%" PRIu32 "..%" PRIu32 "\n",
+ avg_rcv_gap, min_rcv_gap, max_rcv_gap);
+ ret_code = -1;
} else if (expected_rcv_gap_us < rcv_stats.std_dev_gap) {
- LOG_ERR("min=%" PRIu32 " avg_rcv_gap=%" PRIu32 " "
- "max=%" PRIu32 " std_dev_gap=%" PRIu32 "\n",
- rcv_stats.min_rcv_gap, avg_rcv_gap,
- rcv_stats.max_rcv_gap, rcv_stats.std_dev_gap);
- LOG_ERR(" expected_rcv_gap=%" PRIu64 " acceptable "
- "rcv_gap range=%" PRIu32 "..%" PRIu32 "\n",
- expected_rcv_gap_us, min_rcv_gap, max_rcv_gap);
- ret_code = 0;
+ ODPH_ERR("min=%" PRIu32 " avg_rcv_gap=%" PRIu32 " "
+ "max=%" PRIu32 " std_dev_gap=%" PRIu32 "\n",
+ rcv_stats.min_rcv_gap, avg_rcv_gap,
+ rcv_stats.max_rcv_gap, rcv_stats.std_dev_gap);
+ ODPH_ERR(" expected_rcv_gap=%" PRIu64 " acceptable "
+ "rcv_gap range=%" PRIu32 "..%" PRIu32 "\n",
+ expected_rcv_gap_us, min_rcv_gap, max_rcv_gap);
+ ODPH_ERR("std_dev_gap=%" PRIu32 " > "
+ "expected_rcv_gap_us=%" PRIu64 "\n",
+ rcv_stats.std_dev_gap, expected_rcv_gap_us);
+ ret_code = -1;
} else {
ret_code = 0;
}
-
- CU_ASSERT((min_rcv_gap <= avg_rcv_gap) &&
- (avg_rcv_gap <= max_rcv_gap));
- CU_ASSERT(rcv_stats.std_dev_gap <= expected_rcv_gap_us);
}
/* Disable the shaper, so as to get the pkts out quicker. */
- set_shaper(node_name, shaper_name, 0, 0);
+ set_shaper(node_name, NULL, 0, 0);
flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
return ret_code;
@@ -2572,7 +3075,14 @@ static int set_sched_fanin(const char *node_name,
if (node_desc == NULL)
return -1;
- fanin_cnt = MIN(node_desc->num_children, FANIN_RATIO);
+ if (!dynamic_sched_update) {
+ /* Stop TM system before update when dynamic update is not
+ * supported.
+ */
+ CU_ASSERT_FATAL(odp_tm_stop(odp_tm_systems[0]) == 0);
+ }
+
+ fanin_cnt = ODPH_MIN(node_desc->num_children, FANIN_RATIO);
for (fanin = 0; fanin < fanin_cnt; fanin++) {
odp_tm_sched_params_init(&sched_params);
sched_weight = sched_weights[fanin];
@@ -2608,10 +3118,15 @@ static int set_sched_fanin(const char *node_name,
rc = odp_tm_node_sched_config(tm_node, fanin_node,
sched_profile);
if (rc != 0)
- return -1;
+ goto exit;
}
- return 0;
+exit:
+ if (!dynamic_sched_update) {
+ /* Start TM system, post update */
+ CU_ASSERT_FATAL(odp_tm_start(odp_tm_systems[0]) == 0);
+ }
+ return rc;
}
static int test_sched_queue_priority(const char *shaper_name,
@@ -2656,18 +3171,23 @@ static int test_sched_queue_priority(const char *shaper_name,
/* Send the low priority dummy pkts first. The arrival order of
* these pkts will be ignored. */
- pkts_sent = send_pkts(tm_queues[NUM_PRIORITIES - 1], 4);
+ pkts_sent = send_pkts_multi(tm_queues[NUM_PRIORITIES - 1], 4);
/* Now send "num_pkts" first at the lowest priority, then "num_pkts"
* at the second lowest priority, etc until "num_pkts" are sent last
* at the highest priority. */
for (priority = NUM_PRIORITIES - 1; 0 <= priority; priority--)
- pkts_sent += send_pkts(tm_queues[priority], num_pkts);
+ pkts_sent += send_pkts_multi(tm_queues[priority], num_pkts);
- busy_wait(1000000); /* wait 1 millisecond */
+ busy_wait(100 * ODP_TIME_MSEC_IN_NS);
- /* Disable the shaper, so as to get the pkts out quicker. */
- set_shaper(node_name, shaper_name, 0, 0);
+ /* Disable the shaper, so as to get the pkts out quicker.
+ * We cannot do this if dynamic shaper update is not supported. Without
+ * dynamic update support set_shaper() can cause packet drops due to
+ * start/stop.
+ */
+ if (dynamic_shaper_update)
+ set_shaper(node_name, NULL, 0, 0);
num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin,
pkt_cnt + 4, 64 * 1000);
@@ -2678,13 +3198,15 @@ static int test_sched_queue_priority(const char *shaper_name,
pkts_in_order = pkts_rcvd_in_given_order(unique_id_list, pkt_cnt, 0,
false, false);
if (pkts_in_order != pkt_cnt) {
- LOG_ERR("pkts_sent=%" PRIu32 " pkt_cnt=%" PRIu32 " "
- "num_rcv_pkts=%" PRIu32 " rcvd_in_order=%" PRIu32 "\n",
- pkts_sent, pkt_cnt, num_rcv_pkts, pkts_in_order);
+ ODPH_ERR("pkts_sent=%" PRIu32 " pkt_cnt=%" PRIu32 " "
+ "num_rcv_pkts=%" PRIu32 " rcvd_in_order=%" PRIu32 "\n",
+ pkts_sent, pkt_cnt, num_rcv_pkts, pkts_in_order);
}
CU_ASSERT(pkts_in_order == pkt_cnt);
+ /* Disable shaper in case it is still enabled */
+ set_shaper(node_name, NULL, 0, 0);
flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
return 0;
@@ -2770,10 +3292,15 @@ static int test_sched_node_priority(const char *shaper_name,
}
}
- busy_wait(1000000); /* wait 1 millisecond */
+ busy_wait(100 * ODP_TIME_MSEC_IN_NS);
- /* Disable the shaper, so as to get the pkts out quicker. */
- set_shaper(node_name, shaper_name, 0, 0);
+ /* Disable the shaper, so as to get the pkts out quicker.
+ * We cannot do this if dynamic shaper update is not supported. Without
+ * dynamic update support set_shaper() can cause packet drops due to
+ * start/stop.
+ */
+ if (dynamic_shaper_update)
+ set_shaper(node_name, NULL, 0, 0);
num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin,
pkts_sent, 64 * 1000);
@@ -2785,6 +3312,8 @@ static int test_sched_node_priority(const char *shaper_name,
0, false, false);
CU_ASSERT(pkts_in_order == total_pkt_cnt);
+ /* Disable shaper in case it is still enabled */
+ set_shaper(node_name, NULL, 0, 0);
flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
return 0;
@@ -2818,7 +3347,7 @@ static int test_sched_wfq(const char *sched_base_name,
/* Now determine at least one tm_queue that feeds into each fanin/
* child node. */
priority = 0;
- fanin_cnt = MIN(node_desc->num_children, FANIN_RATIO);
+ fanin_cnt = ODPH_MIN(node_desc->num_children, FANIN_RATIO);
for (fanin = 0; fanin < fanin_cnt; fanin++) {
child_desc = node_desc->children[fanin];
num_queues = find_child_queues(0, child_desc, priority,
@@ -2862,11 +3391,17 @@ static int test_sched_wfq(const char *sched_base_name,
if (FANIN_RATIO <= fanin)
fanin = 0;
}
+ CU_ASSERT(pkts_sent == pkt_cnt + 4);
busy_wait(1000000); /* wait 1 millisecond */
- /* Disable the shaper, so as to get the pkts out quicker. */
- set_shaper(node_name, shaper_name, 0, 0);
+ /* Disable the shaper, so as to get the pkts out quicker.
+ * We cannot do this if dynamic shaper update is not supported. Without
+ * dynamic update support set_shaper() can cause packet drops due to
+ * start/stop.
+ */
+ if (dynamic_shaper_update)
+ set_shaper(node_name, NULL, 0, 0);
num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin,
pkt_cnt + 4, 64 * 1000);
@@ -2878,6 +3413,8 @@ static int test_sched_wfq(const char *sched_base_name,
CU_ASSERT(rcv_rate_stats(&rcv_stats[fanin], pkt_class) == 0);
}
+ /* Disable shaper in case it is still enabled */
+ set_shaper(node_name, NULL, 0, 0);
flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
return 0;
@@ -2888,21 +3425,41 @@ static int set_queue_thresholds(odp_tm_queue_t tm_queue,
odp_tm_threshold_params_t *threshold_params)
{
odp_tm_threshold_t threshold_profile;
+ int ret;
+
+ if (!dynamic_threshold_update) {
+ /* Stop TM system before update when dynamic update is not
+ * supported.
+ */
+ CU_ASSERT_FATAL(odp_tm_stop(odp_tm_systems[0]) == 0);
+ }
/* First see if a threshold profile already exists with this name, in
* which case we use that profile, else create a new one. */
threshold_profile = odp_tm_thresholds_lookup(threshold_name);
if (threshold_profile != ODP_TM_INVALID) {
- odp_tm_thresholds_params_update(threshold_profile,
- threshold_params);
+ ret = odp_tm_thresholds_params_update(threshold_profile,
+ threshold_params);
+ if (ret)
+ goto exit;
} else {
threshold_profile = odp_tm_threshold_create(threshold_name,
threshold_params);
+ if (threshold_profile == ODP_TM_INVALID) {
+ ret = -1;
+ goto exit;
+ }
threshold_profiles[num_threshold_profiles] = threshold_profile;
num_threshold_profiles++;
}
- return odp_tm_queue_threshold_config(tm_queue, threshold_profile);
+ ret = odp_tm_queue_threshold_config(tm_queue, threshold_profile);
+exit:
+ if (!dynamic_threshold_update) {
+ /* Start TM system, post update */
+ CU_ASSERT_FATAL(odp_tm_start(odp_tm_systems[0]) == 0);
+ }
+ return ret;
}
static int test_threshold(const char *threshold_name,
@@ -2915,32 +3472,36 @@ static int test_threshold(const char *threshold_name,
odp_tm_threshold_params_t threshold_params;
odp_tm_queue_t tm_queue;
pkt_info_t pkt_info;
- uint32_t num_pkts, pkt_len, pkts_sent;
+ uint32_t pkt_len, pkts_sent;
+ uint32_t num_pkts = 0;
odp_tm_threshold_params_init(&threshold_params);
if (max_pkts != 0) {
- max_pkts = MIN(max_pkts, MAX_PKTS / 3);
+ max_pkts = ODPH_MIN(max_pkts, MAX_PKTS / 3);
threshold_params.max_pkts = max_pkts;
threshold_params.enable_max_pkts = true;
num_pkts = 2 * max_pkts;
pkt_len = 256;
- } else if (max_bytes != 0) {
- max_bytes = MIN(max_bytes, MAX_PKTS * MAX_PAYLOAD / 3);
+ }
+
+ if (max_bytes != 0) {
+ max_bytes = ODPH_MIN(max_bytes, MAX_PKTS * MAX_PAYLOAD / 3);
threshold_params.max_bytes = max_bytes;
threshold_params.enable_max_bytes = true;
num_pkts = 2 * max_bytes / MAX_PAYLOAD;
pkt_len = MAX_PAYLOAD;
- } else {
- return -1;
}
+ if (max_pkts == 0 && max_bytes == 0)
+ return -1;
+
/* Pick a tm_queue and set the tm_queue's threshold profile and then
* send in twice the amount of traffic as suggested by the thresholds
* and make sure at least SOME pkts get dropped. */
tm_queue = find_tm_queue(0, node_name, priority);
if (set_queue_thresholds(tm_queue, threshold_name,
&threshold_params) != 0) {
- LOG_ERR("set_queue_thresholds failed\n");
+ ODPH_ERR("set_queue_thresholds failed\n");
return -1;
}
@@ -2951,7 +3512,7 @@ static int test_threshold(const char *threshold_name,
pkt_info.drop_eligible = true;
pkt_info.pkt_class = 1;
if (make_pkts(num_pkts, pkt_len, &pkt_info) != 0) {
- LOG_ERR("make_pkts failed\n");
+ ODPH_ERR("make_pkts failed\n");
return -1;
}
@@ -2961,7 +3522,7 @@ static int test_threshold(const char *threshold_name,
1 * GBPS);
/* Disable the shaper, so as to get the pkts out quicker. */
- set_shaper(node_name, shaper_name, 0, 0);
+ set_shaper(node_name, NULL, 0, 0);
flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
@@ -2979,7 +3540,7 @@ static wred_pkt_cnts_t *search_expected_pkt_rcv_tbl(odp_tm_percent_t confidence,
uint32_t idx, table_size;
/* Search the EXPECTED_PKT_RCVD table to find a matching entry */
- table_size = sizeof(EXPECTED_PKT_RCVD) / sizeof(wred_pkt_cnts_t);
+ table_size = ODPH_ARRAY_SIZE(EXPECTED_PKT_RCVD);
for (idx = 0; idx < table_size; idx++) {
wred_pkt_cnts = &EXPECTED_PKT_RCVD[idx];
if ((wred_pkt_cnts->confidence_percent == confidence) &&
@@ -2999,6 +3560,7 @@ static int set_queue_wred(odp_tm_queue_t tm_queue,
{
odp_tm_wred_params_t wred_params;
odp_tm_wred_t wred_profile;
+ int rc;
odp_tm_wred_params_init(&wred_params);
if (use_dual_slope) {
@@ -3016,6 +3578,13 @@ static int set_queue_wred(odp_tm_queue_t tm_queue,
wred_params.enable_wred = true;
wred_params.use_byte_fullness = use_byte_fullness;
+ if (!dynamic_wred_update) {
+ /* Stop TM system before update when dynamic update is not
+ * supported.
+ */
+ CU_ASSERT_FATAL(odp_tm_stop(odp_tm_systems[0]) == 0);
+ }
+
/* First see if a wred profile already exists with this name, in
* which case we use that profile, else create a new one. */
wred_profile = odp_tm_wred_lookup(wred_name);
@@ -3034,7 +3603,14 @@ static int set_queue_wred(odp_tm_queue_t tm_queue,
}
}
- return odp_tm_queue_wred_config(tm_queue, pkt_color, wred_profile);
+ rc = odp_tm_queue_wred_config(tm_queue, pkt_color, wred_profile);
+
+ if (!dynamic_wred_update) {
+ /* Start TM system, post update */
+ CU_ASSERT_FATAL(odp_tm_start(odp_tm_systems[0]) == 0);
+ }
+ return rc;
+
}
static int test_byte_wred(const char *wred_name,
@@ -3051,6 +3627,7 @@ static int test_byte_wred(const char *wred_name,
odp_tm_queue_t tm_queue;
pkt_info_t pkt_info;
uint32_t num_fill_pkts, num_test_pkts, pkts_sent;
+ int ret;
/* Pick the tm_queue and set the tm_queue's wred profile to drop the
* given percentage of traffic, then send 100 pkts and see how many
@@ -3070,7 +3647,7 @@ static int test_byte_wred(const char *wred_name,
threshold_params.enable_max_bytes = true;
if (set_queue_thresholds(tm_queue, threshold_name,
&threshold_params) != 0) {
- LOG_ERR("set_queue_thresholds failed\n");
+ ODPH_ERR("set_queue_thresholds failed\n");
return -1;
}
@@ -3098,8 +3675,14 @@ static int test_byte_wred(const char *wred_name,
pkts_sent = send_pkts(tm_queue, num_test_pkts);
- /* Disable the shaper, so as to get the pkts out quicker. */
- set_shaper(node_name, shaper_name, 0, 0);
+ /* Disable the shaper, so as to get the pkts out quicker.
+ * We cannot do this if dynamic shaper update is not supported. Without
+ * dynamic update support set_shaper() can cause packet drops due to
+ * start/stop.
+ */
+ if (dynamic_shaper_update)
+ set_shaper(node_name, NULL, 0, 0);
+
num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin,
num_fill_pkts + pkts_sent, 64 * 1000);
@@ -3109,16 +3692,18 @@ static int test_byte_wred(const char *wred_name,
if (wred_pkt_cnts == NULL)
return -1;
+ /* Disable shaper in case it is still enabled */
+ set_shaper(node_name, NULL, 0, 0);
flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
- if ((wred_pkt_cnts->min_cnt <= pkts_sent) &&
- (pkts_sent <= wred_pkt_cnts->max_cnt))
- return 0;
-
- CU_ASSERT((wred_pkt_cnts->min_cnt <= pkts_sent) &&
- (pkts_sent <= wred_pkt_cnts->max_cnt));
- return 0;
+ ret = !((wred_pkt_cnts->min_cnt <= pkts_sent) &&
+ (pkts_sent <= wred_pkt_cnts->max_cnt));
+ if (ret)
+ ODPH_DBG("min %" PRIu32 " pkts %" PRIu32 " max %" PRIu32 "\n",
+ wred_pkt_cnts->min_cnt, pkts_sent,
+ wred_pkt_cnts->max_cnt);
+ return odp_cunit_ret(ret);
}
static int test_pkt_wred(const char *wred_name,
@@ -3135,6 +3720,7 @@ static int test_pkt_wred(const char *wred_name,
odp_tm_queue_t tm_queue;
pkt_info_t pkt_info;
uint32_t num_fill_pkts, num_test_pkts, pkts_sent;
+ int ret;
/* Pick the tm_queue and set the tm_queue's wred profile to drop the
* given percentage of traffic, then send 100 pkts and see how many
@@ -3151,9 +3737,11 @@ static int test_pkt_wred(const char *wred_name,
odp_tm_threshold_params_init(&threshold_params);
threshold_params.max_pkts = 1000;
threshold_params.enable_max_pkts = true;
- if (set_queue_thresholds(tm_queue, threshold_name,
- &threshold_params) != 0) {
- LOG_ERR("set_queue_thresholds failed\n");
+
+ ret = set_queue_thresholds(tm_queue, threshold_name,
+ &threshold_params);
+ if (ret) {
+ ODPH_ERR("set_queue_thresholds failed\n");
return -1;
}
@@ -3180,10 +3768,20 @@ static int test_pkt_wred(const char *wred_name,
pkts_sent = send_pkts(tm_queue, num_test_pkts);
- /* Disable the shaper, so as to get the pkts out quicker. */
- set_shaper(node_name, shaper_name, 0, 0);
- num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin,
- num_fill_pkts + pkts_sent, 64 * 1000);
+ /* Disable the shaper, so as to get the pkts out quicker.
+ * We cannot do this if dynamic shaper update is not supported. Without
+ * dynamic update support set_shaper() can cause packet drops due to
+ * start/stop.
+ */
+ if (dynamic_shaper_update)
+ set_shaper(node_name, NULL, 0, 0);
+
+ ret = receive_pkts(odp_tm_systems[0], rcv_pktin,
+ num_fill_pkts + pkts_sent, 64 * 1000);
+ if (ret < 0)
+ return -1;
+
+ num_rcv_pkts = ret;
/* Search the EXPECTED_PKT_RCVD table to find a matching entry */
wred_pkt_cnts = search_expected_pkt_rcv_tbl(TM_PERCENT(99.9),
@@ -3191,15 +3789,19 @@ static int test_pkt_wred(const char *wred_name,
if (wred_pkt_cnts == NULL)
return -1;
+ /* Disable shaper in case it is still enabled */
+ set_shaper(node_name, NULL, 0, 0);
flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
- if ((wred_pkt_cnts->min_cnt <= pkts_sent) &&
- (pkts_sent <= wred_pkt_cnts->max_cnt))
- return 0;
+ if ((pkts_sent < wred_pkt_cnts->min_cnt) ||
+ (pkts_sent > wred_pkt_cnts->max_cnt)) {
+ ODPH_ERR("min_cnt %d <= pkts_sent %d <= max_cnt %d\n",
+ wred_pkt_cnts->min_cnt, pkts_sent,
+ wred_pkt_cnts->max_cnt);
+ return -1;
+ }
- CU_ASSERT((wred_pkt_cnts->min_cnt <= pkts_sent) &&
- (pkts_sent <= wred_pkt_cnts->max_cnt));
return 0;
}
@@ -3232,6 +3834,7 @@ static int test_query_functions(const char *shaper_name,
expected_pkt_cnt = num_pkts - 2;
expected_byte_cnt = expected_pkt_cnt * PKT_BUF_SIZE;
+ memset(&query_info, 0, sizeof(query_info));
rc = odp_tm_queue_query(tm_queue,
ODP_TM_QUERY_PKT_CNT | ODP_TM_QUERY_BYTE_CNT,
&query_info);
@@ -3241,6 +3844,7 @@ static int test_query_functions(const char *shaper_name,
CU_ASSERT(query_info.total_byte_cnt_valid);
CU_ASSERT(expected_byte_cnt < query_info.total_byte_cnt);
+ memset(&query_info, 0, sizeof(query_info));
rc = odp_tm_priority_query(odp_tm_systems[0], priority,
ODP_TM_QUERY_PKT_CNT | ODP_TM_QUERY_BYTE_CNT,
&query_info);
@@ -3250,6 +3854,7 @@ static int test_query_functions(const char *shaper_name,
CU_ASSERT(query_info.total_byte_cnt_valid);
CU_ASSERT(expected_byte_cnt < query_info.total_byte_cnt);
+ memset(&query_info, 0, sizeof(query_info));
rc = odp_tm_total_query(odp_tm_systems[0],
ODP_TM_QUERY_PKT_CNT | ODP_TM_QUERY_BYTE_CNT,
&query_info);
@@ -3260,7 +3865,7 @@ static int test_query_functions(const char *shaper_name,
CU_ASSERT(expected_byte_cnt < query_info.total_byte_cnt);
/* Disable the shaper, so as to get the pkts out quicker. */
- set_shaper(node_name, shaper_name, 0, 0);
+ set_shaper(node_name, NULL, 0, 0);
num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin, num_pkts,
commit_bps);
@@ -3290,7 +3895,7 @@ static int check_vlan_marking_pkts(void)
/* Make sure no VLAN header. */
if (odp_packet_has_vlan(rcv_pkt)) {
err_cnt++;
- LOG_ERR("VLAN incorrectly added\n");
+ ODPH_ERR("VLAN incorrectly added\n");
CU_ASSERT(odp_packet_has_vlan(rcv_pkt));
}
break;
@@ -3300,7 +3905,7 @@ static int check_vlan_marking_pkts(void)
/* Make sure it does have a VLAN header */
if (!odp_packet_has_vlan(rcv_pkt)) {
err_cnt++;
- LOG_ERR("VLAN header missing\n");
+ ODPH_ERR("VLAN header missing\n");
CU_ASSERT(!odp_packet_has_vlan(rcv_pkt));
break;
}
@@ -3309,7 +3914,7 @@ static int check_vlan_marking_pkts(void)
* pkt_class == 3. */
if (get_vlan_tci(rcv_pkt, &tci) != 0) {
err_cnt++;
- LOG_ERR("VLAN header missing\n");
+ ODPH_ERR("VLAN header missing\n");
CU_ASSERT(!odp_packet_has_vlan(rcv_pkt));
break;
}
@@ -3317,8 +3922,8 @@ static int check_vlan_marking_pkts(void)
dei = (tci >> ODPH_VLANHDR_DEI_SHIFT) & 1;
expected_dei = (pkt_class == 2) ? 0 : 1;
if (dei != expected_dei) {
- LOG_ERR("expected_dei=%u rcvd dei=%u\n",
- expected_dei, dei);
+ ODPH_ERR("expected_dei=%u rcvd dei=%u\n",
+ expected_dei, dei);
err_cnt++;
CU_ASSERT(dei == expected_dei);
}
@@ -3327,7 +3932,7 @@ static int check_vlan_marking_pkts(void)
default:
/* Log error but otherwise ignore, since it is
* probably a stray pkt from a previous test. */
- LOG_ERR("Pkt rcvd with invalid pkt class\n");
+ ODPH_ERR("Pkt rcvd with invalid pkt class\n");
}
}
@@ -3350,7 +3955,7 @@ static int test_vlan_marking(const char *node_name,
for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
rc = odp_tm_vlan_marking(odp_tm, color, false);
if (rc != 0) {
- LOG_ERR("disabling odp_tm_vlan_marking() failed\n");
+ ODPH_ERR("disabling odp_tm_vlan_marking() failed\n");
return -1;
}
}
@@ -3360,7 +3965,7 @@ static int test_vlan_marking(const char *node_name,
tm_queue = find_tm_queue(0, node_name, 0);
if (tm_queue == ODP_TM_INVALID) {
- LOG_ERR("No tm_queue found for node_name='%s'\n", node_name);
+ ODPH_ERR("No tm_queue found for node_name='%s'\n", node_name);
return -1;
}
@@ -3393,11 +3998,11 @@ static int test_vlan_marking(const char *node_name,
num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin, pkts_sent,
1000 * 1000);
if (num_rcv_pkts == 0) {
- LOG_ERR("No pkts received\n");
+ ODPH_ERR("No pkts received\n");
rc = -1;
} else if (num_rcv_pkts != pkts_sent) {
- LOG_ERR("pkts_sent=%" PRIu32 " but num_rcv_pkts=%" PRIu32 "\n",
- pkts_sent, num_rcv_pkts);
+ ODPH_ERR("pkts_sent=%" PRIu32 " but num_rcv_pkts=%" PRIu32 "\n",
+ pkts_sent, num_rcv_pkts);
dump_rcvd_pkts(0, num_rcv_pkts - 1);
CU_ASSERT(num_rcv_pkts == pkts_sent);
} else {
@@ -3459,9 +4064,9 @@ static int check_tos_marking_pkts(odp_bool_t use_ipv6,
if (rc != 1) {
if (use_ipv6)
- LOG_ERR("Expected IPv6 pkt but got IPv4");
+ ODPH_ERR("Expected IPv6 pkt but got IPv4");
else
- LOG_ERR("Expected IPv4 pkt but got IPv6");
+ ODPH_ERR("Expected IPv4 pkt but got IPv6");
return -1;
}
@@ -3474,9 +4079,9 @@ static int check_tos_marking_pkts(odp_bool_t use_ipv6,
if (rc != 1) {
if (use_tcp)
- LOG_ERR("Expected TCP pkt but got UDP");
+ ODPH_ERR("Expected TCP pkt but got UDP");
else
- LOG_ERR("Expected UDP pkt but got TCP");
+ ODPH_ERR("Expected UDP pkt but got TCP");
return -1;
}
@@ -3484,7 +4089,7 @@ static int check_tos_marking_pkts(odp_bool_t use_ipv6,
/* Now get the tos field to see if it was changed */
rc = get_ip_tos(rcv_pkt, &tos);
if (rc != 0) {
- LOG_ERR("get_ip_tos failed\n");
+ ODPH_ERR("get_ip_tos failed\n");
return -1;
}
@@ -3492,8 +4097,8 @@ static int check_tos_marking_pkts(odp_bool_t use_ipv6,
case 2:
/* Tos field must be unchanged. */
if (unmarked_tos != tos) {
- LOG_ERR("Tos was changed from 0x%X to 0x%X\n",
- unmarked_tos, tos);
+ ODPH_ERR("Tos was changed from 0x%X to 0x%X\n",
+ unmarked_tos, tos);
return -1;
}
break;
@@ -3501,8 +4106,8 @@ static int check_tos_marking_pkts(odp_bool_t use_ipv6,
case 3:
/* Tos field must be changed. */
if (tos != expected_tos) {
- LOG_ERR("tos=0x%X instead of expected 0x%X\n",
- tos, expected_tos);
+ ODPH_ERR("tos=0x%X instead of expected 0x%X\n",
+ tos, expected_tos);
CU_ASSERT(tos == expected_tos);
}
break;
@@ -3510,8 +4115,8 @@ static int check_tos_marking_pkts(odp_bool_t use_ipv6,
default:
/* Log error but otherwise ignore, since it is
* probably a stray pkt from a previous test. */
- LOG_ERR("Pkt rcvd with invalid pkt class=%u\n",
- pkt_class);
+ ODPH_ERR("Pkt rcvd with invalid pkt class=%u\n",
+ pkt_class);
}
}
@@ -3540,13 +4145,13 @@ static int test_ip_marking(const char *node_name,
for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
rc = odp_tm_ecn_marking(odp_tm, color, false);
if (rc != 0) {
- LOG_ERR("disabling odp_tm_ecn_marking() failed\n");
+ ODPH_ERR("disabling odp_tm_ecn_marking() failed\n");
return -1;
}
rc = odp_tm_drop_prec_marking(odp_tm, color, false);
if (rc != 0) {
- LOG_ERR("disabling odp_tm_drop_prec_marking failed\n");
+ ODPH_ERR("disabling odp_tm_drop_prec_marking failed\n");
return -1;
}
}
@@ -3558,7 +4163,7 @@ static int test_ip_marking(const char *node_name,
if (test_ecn) {
rc = odp_tm_ecn_marking(odp_tm, pkt_color, true);
if (rc != 0) {
- LOG_ERR("odp_tm_ecn_marking() call failed\n");
+ ODPH_ERR("odp_tm_ecn_marking() call failed\n");
return -1;
}
}
@@ -3566,14 +4171,14 @@ static int test_ip_marking(const char *node_name,
if (test_drop_prec) {
rc = odp_tm_drop_prec_marking(odp_tm, pkt_color, true);
if (rc != 0) {
- LOG_ERR("odp_tm_drop_prec_marking() call failed\n");
+ ODPH_ERR("odp_tm_drop_prec_marking() call failed\n");
return -1;
}
}
tm_queue = find_tm_queue(0, node_name, 0);
if (tm_queue == ODP_TM_INVALID) {
- LOG_ERR("No tm_queue found for node_name='%s'\n", node_name);
+ ODPH_ERR("No tm_queue found for node_name='%s'\n", node_name);
return -1;
}
@@ -3594,7 +4199,7 @@ static int test_ip_marking(const char *node_name,
pkt_info.pkt_class = 2;
if (make_pkts(pkt_cnt, pkt_len, &pkt_info) != 0) {
- LOG_ERR("make_pkts failed\n");
+ ODPH_ERR("make_pkts failed\n");
return -1;
}
}
@@ -3605,12 +4210,12 @@ static int test_ip_marking(const char *node_name,
ret_code = -1;
if (num_rcv_pkts == 0) {
- LOG_ERR("No pkts received\n");
+ ODPH_ERR("No pkts received\n");
CU_ASSERT(num_rcv_pkts != 0);
ret_code = -1;
} else if (num_rcv_pkts != pkts_sent) {
- LOG_ERR("pkts_sent=%" PRIu32 " but num_rcv_pkts=%" PRIu32 "\n",
- pkts_sent, num_rcv_pkts);
+ ODPH_ERR("pkts_sent=%" PRIu32 " but num_rcv_pkts=%" PRIu32 "\n",
+ pkts_sent, num_rcv_pkts);
dump_rcvd_pkts(0, num_rcv_pkts - 1);
CU_ASSERT(num_rcv_pkts == pkts_sent);
ret_code = -1;
@@ -3643,8 +4248,8 @@ static int test_protocol_marking(const char *node_name,
test_ecn, test_drop_prec, new_dscp, dscp_mask);
CU_ASSERT(rc == 0);
if (rc != 0) {
- LOG_ERR("test_ip_marking failed using IPV4/UDP pkts color=%u "
- "test_ecn=%u test_drop_prec=%u\n",
+ ODPH_ERR("test_ip_marking failed using IPV4/UDP pkts color=%u "
+ "test_ecn=%u test_drop_prec=%u\n",
pkt_color, test_ecn, test_drop_prec);
errs++;
}
@@ -3653,8 +4258,8 @@ static int test_protocol_marking(const char *node_name,
test_ecn, test_drop_prec, new_dscp, dscp_mask);
CU_ASSERT(rc == 0);
if (rc != 0) {
- LOG_ERR("test_ip_marking failed using IPV6/UDP pkts color=%u "
- "test_ecn=%u test_drop_prec=%u\n",
+ ODPH_ERR("test_ip_marking failed using IPV6/UDP pkts color=%u "
+ "test_ecn=%u test_drop_prec=%u\n",
pkt_color, test_ecn, test_drop_prec);
errs++;
}
@@ -3663,8 +4268,8 @@ static int test_protocol_marking(const char *node_name,
test_ecn, test_drop_prec, new_dscp, dscp_mask);
CU_ASSERT(rc == 0);
if (rc != 0) {
- LOG_ERR("test_ip_marking failed using IPV4/TCP pkts color=%u "
- "test_ecn=%u test_drop_prec=%u\n",
+ ODPH_ERR("test_ip_marking failed using IPV4/TCP pkts color=%u "
+ "test_ecn=%u test_drop_prec=%u\n",
pkt_color, test_ecn, test_drop_prec);
errs++;
}
@@ -3673,9 +4278,9 @@ static int test_protocol_marking(const char *node_name,
test_ecn, test_drop_prec, new_dscp, dscp_mask);
CU_ASSERT(rc == 0);
if (rc != 0) {
- LOG_ERR("test_ip_marking failed using IPV6/TCP pkts color=%u "
- "test_ecn=%u test_drop_prec=%u\n",
- pkt_color, test_ecn, test_drop_prec);
+ ODPH_ERR("test_ip_marking failed using IPV6/TCP pkts color=%u "
+ "test_ecn=%u test_drop_prec=%u\n",
+ pkt_color, test_ecn, test_drop_prec);
errs++;
}
@@ -3726,14 +4331,14 @@ static int walk_tree_backwards(odp_tm_node_t tm_node)
* and active tm_queue is reached. */
rc = odp_tm_node_info(tm_node, &node_info);
if (rc != 0) {
- LOG_ERR("odp_tm_node_info failed for tm_node=0x%" PRIX64 "\n",
- tm_node);
+ ODPH_ERR("odp_tm_node_info failed for tm_node=0x%" PRIX64 "\n",
+ odp_tm_node_to_u64(tm_node));
return rc;
}
if ((node_info.tm_queue_fanin == 0) &&
(node_info.tm_node_fanin == 0)) {
- LOG_ERR("odp_tm_node_info showed no fanin for this node\n");
+ ODPH_ERR("odp_tm_node_info showed no fanin for this node\n");
return -1;
}
@@ -3755,7 +4360,7 @@ static int walk_tree_backwards(odp_tm_node_t tm_node)
if ((fanin_info.tm_queue != ODP_TM_INVALID) &&
(fanin_info.tm_node != ODP_TM_INVALID)) {
- LOG_ERR("Both tm_queue and tm_node are set\n");
+ ODPH_ERR("Both tm_queue and tm_node are set\n");
return -1;
} else if (fanin_info.tm_queue != ODP_TM_INVALID) {
tm_queue_fanin++;
@@ -3766,15 +4371,15 @@ static int walk_tree_backwards(odp_tm_node_t tm_node)
if (first_tm_node == ODP_TM_INVALID)
first_tm_node = fanin_info.tm_node;
} else {
- LOG_ERR("both tm_queue and tm_node are INVALID\n");
+ ODPH_ERR("both tm_queue and tm_node are INVALID\n");
return -1;
}
}
if (tm_queue_fanin != node_info.tm_queue_fanin)
- LOG_ERR("tm_queue_fanin count error\n");
+ ODPH_ERR("tm_queue_fanin count error\n");
else if (tm_node_fanin != node_info.tm_node_fanin)
- LOG_ERR("tm_node_fanin count error\n");
+ ODPH_ERR("tm_node_fanin count error\n");
/* If we have found a tm_queue then we are successfully done. */
if (first_tm_queue != ODP_TM_INVALID)
@@ -3791,41 +4396,189 @@ static int test_fanin_info(const char *node_name)
node_desc = find_node_desc(0, node_name);
if (node_desc == NULL) {
- LOG_ERR("node_name %s not found\n", node_name);
+ ODPH_ERR("node_name %s not found\n", node_name);
return -1;
}
tm_node = node_desc->node;
if (tm_node == ODP_TM_INVALID) {
- LOG_ERR("tm_node is ODP_TM_INVALID\n");
+ ODPH_ERR("tm_node is ODP_TM_INVALID\n");
return -1;
}
return walk_tree_backwards(node_desc->node);
}
-void traffic_mngr_test_capabilities(void)
+static void test_packet_aging(uint64_t tmo_ns, uint32_t pkt_len, odp_bool_t is_dropping)
+{
+ odp_tm_queue_t tm_queue;
+ const char *node_name = "node_1_1_1";
+ const char *shaper_name = "test_shaper";
+ const uint64_t rate = 256 * 1000;
+ pkt_info_t pkt_info;
+ const uint16_t num_pkts = 4;
+ int recv_pkts;
+
+ tm_queue = find_tm_queue(0, node_name, 0);
+ CU_ASSERT_FATAL(tm_queue != ODP_TM_INVALID);
+ init_xmt_pkts(&pkt_info);
+ pkt_info.drop_eligible = false;
+ pkt_info.pkt_class = 1;
+ CU_ASSERT_FATAL(make_pkts(num_pkts, pkt_len, &pkt_info) == 0);
+
+ for (int i = 0; i < num_pkts; i++)
+ odp_packet_aging_tmo_set(xmt_pkts[i], tmo_ns);
+
+ CU_ASSERT_FATAL(set_shaper(node_name, shaper_name, rate, rate) == 0);
+ CU_ASSERT(send_pkts(tm_queue, num_pkts) == num_pkts);
+ recv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin, num_pkts, MBPS);
+
+ if (is_dropping)
+ CU_ASSERT(recv_pkts < num_pkts)
+ else
+ CU_ASSERT(recv_pkts == num_pkts);
+
+ set_shaper(node_name, NULL, 0, 0);
+ flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
+ CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
+}
+
+static void test_defaults(uint8_t fill)
+{
+ odp_tm_requirements_t req;
+ odp_tm_shaper_params_t shaper;
+ odp_tm_sched_params_t sched;
+ odp_tm_threshold_params_t threshold;
+ odp_tm_wred_params_t wred;
+ odp_tm_node_params_t node;
+ odp_tm_queue_params_t queue;
+ int n;
+
+ memset(&req, fill, sizeof(req));
+ odp_tm_requirements_init(&req);
+ CU_ASSERT_EQUAL(req.num_levels, 0);
+ CU_ASSERT(!req.tm_queue_shaper_needed);
+ CU_ASSERT(!req.tm_queue_wred_needed);
+ CU_ASSERT(!req.tm_queue_dual_slope_needed);
+ CU_ASSERT(!req.tm_queue_threshold_needed);
+ CU_ASSERT(!req.vlan_marking_needed);
+ CU_ASSERT(!req.ecn_marking_needed);
+ CU_ASSERT(!req.drop_prec_marking_needed);
+ for (n = 0; n < ODP_NUM_PACKET_COLORS; n++)
+ CU_ASSERT(!req.marking_colors_needed[n]);
+ CU_ASSERT_EQUAL(req.pkt_prio_mode, ODP_TM_PKT_PRIO_MODE_PRESERVE);
+ for (n = 0; n < ODP_TM_MAX_LEVELS; n++) {
+ odp_tm_level_requirements_t *l_req = &req.per_level[n];
+
+ CU_ASSERT(!l_req->tm_node_shaper_needed);
+ CU_ASSERT(!l_req->tm_node_wred_needed);
+ CU_ASSERT(!l_req->tm_node_dual_slope_needed);
+ CU_ASSERT(!l_req->fair_queuing_needed);
+ CU_ASSERT(!l_req->weights_needed);
+ CU_ASSERT(!l_req->tm_node_threshold_needed);
+ }
+
+ memset(&shaper, fill, sizeof(shaper));
+ odp_tm_shaper_params_init(&shaper);
+ CU_ASSERT(shaper.packet_mode == ODP_TM_SHAPER_RATE_SHAPE);
+ CU_ASSERT_EQUAL(shaper.shaper_len_adjust, 0);
+ CU_ASSERT(!shaper.dual_rate);
+ CU_ASSERT(!shaper.packet_mode);
+
+ memset(&sched, 0xff, sizeof(sched));
+ odp_tm_sched_params_init(&sched);
+ for (n = 0; n < ODP_TM_MAX_PRIORITIES; n++)
+ CU_ASSERT_EQUAL(sched.sched_modes[n], ODP_TM_BYTE_BASED_WEIGHTS);
+
+ memset(&threshold, fill, sizeof(threshold));
+ odp_tm_threshold_params_init(&threshold);
+ CU_ASSERT(!threshold.enable_max_pkts);
+ CU_ASSERT(!threshold.enable_max_bytes);
+
+ memset(&wred, fill, sizeof(wred));
+ odp_tm_wred_params_init(&wred);
+ CU_ASSERT(!wred.enable_wred);
+ CU_ASSERT(!wred.use_byte_fullness);
+
+ memset(&node, fill, sizeof(node));
+ odp_tm_node_params_init(&node);
+ CU_ASSERT_EQUAL(node.shaper_profile, ODP_TM_INVALID);
+ CU_ASSERT_EQUAL(node.threshold_profile, ODP_TM_INVALID);
+ for (n = 0; n < ODP_NUM_PACKET_COLORS; n++)
+ CU_ASSERT_EQUAL(node.wred_profile[n], ODP_TM_INVALID);
+
+ memset(&queue, fill, sizeof(queue));
+ odp_tm_queue_params_init(&queue);
+ CU_ASSERT_EQUAL(queue.shaper_profile, ODP_TM_INVALID);
+ CU_ASSERT_EQUAL(queue.threshold_profile, ODP_TM_INVALID);
+ for (n = 0; n < ODP_NUM_PACKET_COLORS; n++)
+ CU_ASSERT_EQUAL(queue.wred_profile[n], ODP_TM_INVALID);
+ CU_ASSERT_EQUAL(queue.priority, 0);
+ CU_ASSERT(queue.ordered_enqueue);
+}
+
+static void traffic_mngr_test_default_values(void)
+{
+ test_defaults(0);
+ test_defaults(0xff);
+}
+
+static void traffic_mngr_test_capabilities(void)
{
CU_ASSERT(test_overall_capabilities() == 0);
}
-void traffic_mngr_test_tm_create(void)
+static void traffic_mngr_test_tm_create(void)
{
/* Create the first/primary TM system. */
CU_ASSERT_FATAL(create_tm_system() == 0);
dump_tm_tree(0);
}
-void traffic_mngr_test_shaper(void)
+static void traffic_mngr_test_shaper(void)
{
- CU_ASSERT(test_shaper_bw("bw1", "node_1_1_1", 0, 1 * MBPS) == 0);
- CU_ASSERT(test_shaper_bw("bw4", "node_1_1_1", 1, 4 * MBPS) == 0);
- CU_ASSERT(test_shaper_bw("bw10", "node_1_1_1", 2, 10 * MBPS) == 0);
- CU_ASSERT(test_shaper_bw("bw40", "node_1_1_1", 3, 40 * MBPS) == 0);
- CU_ASSERT(test_shaper_bw("bw100", "node_1_1_2", 0, 100 * MBPS) == 0);
+ if ((tm_shaper_min_rate <= 1 * MBPS) &&
+ (tm_shaper_max_rate >= 1 * MBPS)) {
+ CU_ASSERT(!odp_cunit_ret(test_shaper_bw("bw1",
+ "node_1_1_1",
+ 0,
+ MBPS * 1)));
+ }
+
+ if ((tm_shaper_min_rate <= 4 * MBPS) &&
+ (tm_shaper_max_rate >= 4 * MBPS)) {
+ CU_ASSERT(!odp_cunit_ret(test_shaper_bw("bw4",
+ "node_1_1_1",
+ 1,
+ 4 * MBPS)));
+ }
+
+ if ((tm_shaper_min_rate <= 10 * MBPS) &&
+ (tm_shaper_max_rate >= 10 * MBPS)) {
+ CU_ASSERT(!odp_cunit_ret(test_shaper_bw("bw10",
+ "node_1_1_1",
+ 2,
+ 10 * MBPS)));
+ }
+
+ if ((tm_shaper_min_rate <= 40 * MBPS) &&
+ (tm_shaper_max_rate >= 40 * MBPS)) {
+ CU_ASSERT(!odp_cunit_ret(test_shaper_bw("bw40",
+ "node_1_1_1",
+ 3,
+ 40 * MBPS)));
+ }
+
+ if ((tm_shaper_min_rate <= 100 * MBPS) &&
+ (tm_shaper_max_rate >= 100 * MBPS)) {
+ CU_ASSERT(!odp_cunit_ret(test_shaper_bw("bw100",
+ "node_1_1_2",
+ 0,
+ 100 * MBPS)));
+ }
}
-void traffic_mngr_test_scheduler(void)
+static void traffic_mngr_test_scheduler(void)
{
CU_ASSERT(test_sched_queue_priority("que_prio", "node_1_1_3", 10) == 0);
return;
@@ -3844,22 +4597,163 @@ void traffic_mngr_test_scheduler(void)
INCREASING_WEIGHTS) == 0);
}
-void traffic_mngr_test_thresholds(void)
+static int traffic_mngr_check_thresholds_byte(void)
+{
+ /* Check only for TM queue threshold support as we only test queue threshold. */
+ if (!tm_capabilities.tm_queue_threshold.byte)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int traffic_mngr_check_thresholds_packet(void)
+{
+ /* Check only for TM queue threshold support as we only test queue threshold. */
+ if (!tm_capabilities.tm_queue_threshold.packet)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int traffic_mngr_check_thresholds_byte_and_packet(void)
{
- CU_ASSERT(test_threshold("thresh_A", "shaper_A", "node_1_2_1", 0,
- 16, 0) == 0);
- CU_ASSERT(test_threshold("thresh_B", "shaper_B", "node_1_2_1", 1,
+ /* Check only for TM queue threshold support as we only test queue threshold. */
+ if (!tm_capabilities.tm_queue_threshold.byte_and_packet)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void traffic_mngr_test_thresholds_byte(void)
+{
+ CU_ASSERT(test_threshold("thresh_byte", "shaper_B", "node_1_2_1", 1,
0, 6400) == 0);
}
-void traffic_mngr_test_byte_wred(void)
+static void traffic_mngr_test_thresholds_packet(void)
{
- if (!tm_capabilities.tm_queue_wred_supported) {
- LOG_DBG("\nwas not run because tm_capabilities indicates"
- " no WRED support\n");
- return;
- }
+ CU_ASSERT(test_threshold("thresh_packet", "shaper_A", "node_1_2_1", 0,
+ 16, 0) == 0);
+}
+
+static void traffic_mngr_test_thresholds_byte_and_packet(void)
+{
+ CU_ASSERT(test_threshold("thresh_byte_and_packet", "shaper_A", "node_1_2_1", 0,
+ 16, 6400) == 0);
+}
+
+static int traffic_mngr_check_queue_stats(void)
+{
+ if (tm_capabilities.queue_stats.all_counters == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void traffic_mngr_test_queue_stats(void)
+{
+ odp_tm_queue_stats_t stats_start, stats_stop;
+ odp_tm_queue_t tm_queue;
+ odp_tm_capabilities_t capa;
+ pkt_info_t pkt_info;
+ uint32_t pkts_sent;
+ uint32_t num_pkts = ODPH_MIN(50u, MAX_PKTS);
+ uint32_t pkt_len = 256;
+
+ CU_ASSERT_FATAL(odp_tm_capability(odp_tm_systems[0], &capa) == 0);
+
+ /* Reuse threshold test node */
+ tm_queue = find_tm_queue(0, "node_1_2_1", 0);
+ CU_ASSERT_FATAL(tm_queue != ODP_TM_INVALID);
+ init_xmt_pkts(&pkt_info);
+ pkt_info.drop_eligible = false;
+ pkt_info.pkt_class = 1;
+ CU_ASSERT_FATAL(make_pkts(num_pkts, pkt_len, &pkt_info) == 0);
+
+ CU_ASSERT(odp_tm_queue_stats(tm_queue, &stats_start) == 0);
+
+ pkts_sent = send_pkts(tm_queue, num_pkts);
+
+ num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin, pkts_sent,
+ 1 * GBPS);
+
+ odp_tm_stats_print(odp_tm_systems[0]);
+
+ CU_ASSERT(odp_tm_queue_stats(tm_queue, &stats_stop) == 0);
+
+ if (capa.queue_stats.counter.packets)
+ CU_ASSERT(stats_stop.packets >= stats_start.packets + num_rcv_pkts);
+ if (capa.queue_stats.counter.octets)
+ CU_ASSERT(stats_stop.octets >= stats_start.octets + (num_rcv_pkts * pkt_len));
+ CU_ASSERT((stats_stop.discards - stats_start.discards) == 0);
+ CU_ASSERT((stats_stop.discard_octets - stats_start.discard_octets) == 0);
+ CU_ASSERT((stats_stop.errors - stats_start.errors) == 0);
+
+ printf("\nTM queue statistics\n-------------------\n");
+ printf(" discards: %" PRIu64 "\n", stats_stop.discards);
+ printf(" discard octets: %" PRIu64 "\n", stats_stop.discard_octets);
+ printf(" errors: %" PRIu64 "\n", stats_stop.errors);
+ printf(" octets: %" PRIu64 "\n", stats_stop.octets);
+ printf(" packets: %" PRIu64 "\n", stats_stop.packets);
+
+ /* Check that all unsupported counters are still zero */
+ if (!capa.queue_stats.counter.discards)
+ CU_ASSERT(stats_stop.discards == 0);
+ if (!capa.queue_stats.counter.discard_octets)
+ CU_ASSERT(stats_stop.discard_octets == 0);
+ if (!capa.queue_stats.counter.errors)
+ CU_ASSERT(stats_stop.errors == 0);
+ if (!capa.queue_stats.counter.octets)
+ CU_ASSERT(stats_stop.octets == 0);
+ if (!capa.queue_stats.counter.packets)
+ CU_ASSERT(stats_stop.packets == 0);
+
+ flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
+ CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
+}
+
+static int traffic_mngr_check_wred(void)
+{
+ /* Check if wred is part of created odp_tm_t capabilities */
+ if (!tm_capabilities.tm_queue_wred_supported)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int traffic_mngr_check_byte_wred(void)
+{
+ /* Check if wred is part of created odp_tm_t capabilities */
+ if (!tm_capabilities.tm_queue_wred_supported ||
+ !tm_capabilities.tm_queue_threshold.byte)
+ return ODP_TEST_INACTIVE;
+
+ if ((tm_shaper_min_rate > 64 * 1000) ||
+ (tm_shaper_max_rate < 64 * 1000) ||
+ (tm_shaper_min_burst > 8 * PKT_BUF_SIZE) ||
+ (tm_shaper_max_burst < 8 * PKT_BUF_SIZE))
+ return ODP_TEST_INACTIVE;
+ return ODP_TEST_ACTIVE;
+}
+
+static int traffic_mngr_check_pkt_wred(void)
+{
+ /* Check if wred is part of created odp_tm_t capabilities */
+ if (!tm_capabilities.tm_queue_wred_supported ||
+ !tm_capabilities.tm_queue_threshold.packet)
+ return ODP_TEST_INACTIVE;
+
+ if ((tm_shaper_min_rate > 64 * 1000) ||
+ (tm_shaper_max_rate < 64 * 1000) ||
+ (tm_shaper_min_burst > 1000) ||
+ (tm_shaper_max_burst < 1000))
+ return ODP_TEST_INACTIVE;
+ return ODP_TEST_ACTIVE;
+}
+
+static void traffic_mngr_test_byte_wred(void)
+{
CU_ASSERT(test_byte_wred("byte_wred_30G", "byte_bw_30G",
"byte_thresh_30G", "node_1_3_1", 1,
ODP_PACKET_GREEN, TM_PERCENT(30), true) == 0);
@@ -3875,123 +4769,249 @@ void traffic_mngr_test_byte_wred(void)
ODP_PACKET_GREEN, TM_PERCENT(30), false) == 0);
}
-void traffic_mngr_test_pkt_wred(void)
+static void traffic_mngr_test_pkt_wred(void)
{
int rc;
- if (!tm_capabilities.tm_queue_wred_supported) {
- LOG_DBG("\ntest_pkt_wred was not run because tm_capabilities "
- "indicates no WRED support\n");
- return;
- }
-
- CU_ASSERT(test_pkt_wred("pkt_wred_40G", "pkt_bw_40G",
- "pkt_thresh_40G", "node_1_3_2", 1,
- ODP_PACKET_GREEN, TM_PERCENT(30), false) == 0);
+ rc = test_pkt_wred("pkt_wred_40G", "pkt_bw_40G",
+ "pkt_thresh_40G", "node_1_3_2", 1,
+ ODP_PACKET_GREEN, TM_PERCENT(30), false);
+ if (odp_cunit_ret(rc) != 0)
+ CU_FAIL("40G test failed\n");
if (!tm_capabilities.tm_queue_dual_slope_supported) {
- LOG_DBG("since tm_capabilities indicates no dual slope "
- "WRED support these tests are skipped.\n");
+ ODPH_DBG("since tm_capabilities indicates no dual slope "
+ "WRED support these tests are skipped.\n");
return;
}
rc = test_pkt_wred("pkt_wred_30G", "pkt_bw_30G",
"pkt_thresh_30G", "node_1_3_2", 1,
ODP_PACKET_GREEN, TM_PERCENT(30), true);
- CU_ASSERT(rc == 0);
+ if (odp_cunit_ret(rc) != 0)
+ CU_FAIL("30G test failed\n");
+
+ rc = test_pkt_wred("pkt_wred_50Y", "pkt_bw_50Y",
+ "pkt_thresh_50Y", "node_1_3_2", 2,
+ ODP_PACKET_YELLOW, TM_PERCENT(50), true);
+ if (odp_cunit_ret(rc) != 0)
+ CU_FAIL("50Y test failed\n");
+
+ rc = test_pkt_wred("pkt_wred_70R", "pkt_bw_70R",
+ "pkt_thresh_70R", "node_1_3_2", 3,
+ ODP_PACKET_RED, TM_PERCENT(70), true);
+ if (odp_cunit_ret(rc) != 0)
+ CU_FAIL("70Y test failed\n");
+}
+
+static int traffic_mngr_check_query(void)
+{
+ uint32_t query_flags = (ODP_TM_QUERY_PKT_CNT | ODP_TM_QUERY_BYTE_CNT);
+
+ /* We need both pkt count and byte count query support */
+ if ((tm_capabilities.tm_queue_query_flags & query_flags) != query_flags)
+ return ODP_TEST_INACTIVE;
- CU_ASSERT(test_pkt_wred("pkt_wred_50Y", "pkt_bw_50Y",
- "pkt_thresh_50Y", "node_1_3_2", 2,
- ODP_PACKET_YELLOW, TM_PERCENT(50), true) == 0);
- CU_ASSERT(test_pkt_wred("pkt_wred_70R", "pkt_bw_70R",
- "pkt_thresh_70R", "node_1_3_2", 3,
- ODP_PACKET_RED, TM_PERCENT(70), true) == 0);
+ /* This test uses 64 Kbps rate and a 1000 bit burst size */
+ if (tm_shaper_min_rate > 64 * 1000 ||
+ tm_shaper_max_rate < 64 * 1000 ||
+ tm_shaper_min_burst > 1000 ||
+ tm_shaper_max_burst < 1000)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
}
-void traffic_mngr_test_query(void)
+static void traffic_mngr_test_query(void)
{
CU_ASSERT(test_query_functions("query_shaper", "node_1_3_3", 3, 10)
== 0);
}
-void traffic_mngr_test_marking(void)
+static int traffic_mngr_check_vlan_marking(void)
+{
+ if (!tm_capabilities.vlan_marking_supported)
+ return ODP_TEST_INACTIVE;
+ return ODP_TEST_ACTIVE;
+}
+
+static int traffic_mngr_check_ecn_marking(void)
+{
+ if (!tm_capabilities.ecn_marking_supported)
+ return ODP_TEST_INACTIVE;
+ return ODP_TEST_ACTIVE;
+}
+
+static int traffic_mngr_check_drop_prec_marking(void)
+{
+ if (!tm_capabilities.drop_prec_marking_supported)
+ return ODP_TEST_INACTIVE;
+ return ODP_TEST_ACTIVE;
+}
+
+static int traffic_mngr_check_ecn_drop_prec_marking(void)
+{
+ if (!tm_capabilities.ecn_marking_supported ||
+ !tm_capabilities.drop_prec_marking_supported)
+ return ODP_TEST_INACTIVE;
+ return ODP_TEST_ACTIVE;
+}
+
+static void traffic_mngr_test_vlan_marking(void)
{
odp_packet_color_t color;
- odp_bool_t test_ecn, test_drop_prec;
- int rc;
- if (tm_capabilities.vlan_marking_supported) {
- for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
- rc = test_vlan_marking("node_1_3_1", color);
- CU_ASSERT(rc == 0);
- }
- } else {
- LOG_DBG("\ntest_vlan_marking was not run because "
- "tm_capabilities indicates no vlan marking support\n");
+ for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
+ /* Tree is 3 level */
+ CU_ASSERT(test_vlan_marking("node_1_3_1", color) == 0);
}
+}
- if (tm_capabilities.ecn_marking_supported) {
- test_ecn = true;
- test_drop_prec = false;
+static void traffic_mngr_test_ecn_marking(void)
+{
+ CU_ASSERT(ip_marking_tests("node_1_3_2", true, false) == 0);
+}
- rc = ip_marking_tests("node_1_3_2", test_ecn, test_drop_prec);
- CU_ASSERT(rc == 0);
- } else {
- LOG_DBG("\necn_marking tests were not run because "
- "tm_capabilities indicates no ecn marking support\n");
- }
+static void traffic_mngr_test_drop_prec_marking(void)
+{
+ CU_ASSERT(ip_marking_tests("node_1_4_2", false, true) == 0);
+}
- if (tm_capabilities.drop_prec_marking_supported) {
- test_ecn = false;
- test_drop_prec = true;
+static void traffic_mngr_test_ecn_drop_prec_marking(void)
+{
+ CU_ASSERT(ip_marking_tests("node_1_4_2", true, true) == 0);
+}
- rc = ip_marking_tests("node_1_4_2", test_ecn, test_drop_prec);
- CU_ASSERT(rc == 0);
- } else {
- LOG_DBG("\ndrop_prec marking tests were not run because "
- "tm_capabilities indicates no drop precedence "
- "marking support\n");
- }
+static int traffic_mngr_check_tx_aging(void)
+{
+ return xmt_pktio_capa.max_tx_aging_tmo_ns ? ODP_TEST_ACTIVE : ODP_TEST_INACTIVE;
+}
- if (tm_capabilities.ecn_marking_supported &&
- tm_capabilities.drop_prec_marking_supported) {
- test_ecn = true;
- test_drop_prec = true;
+static void traffic_mngr_test_tx_aging_no_drop(void)
+{
+ /* Set very long aging tmo, packets should not be dropped due to aging */
+ test_packet_aging(60000000000, 128, false);
+}
- rc = ip_marking_tests("node_1_4_2", test_ecn, test_drop_prec);
- CU_ASSERT(rc == 0);
- }
+static void traffic_mngr_test_tx_aging_drop(void)
+{
+ /* Set very short aging tmo, there should be drops due to aging */
+ test_packet_aging(10, MAX_PAYLOAD, true);
}
-void traffic_mngr_test_fanin_info(void)
+static void traffic_mngr_test_fanin_info(void)
{
CU_ASSERT(test_fanin_info("node_1") == 0);
CU_ASSERT(test_fanin_info("node_1_2") == 0);
CU_ASSERT(test_fanin_info("node_1_3_7") == 0);
}
-void traffic_mngr_test_destroy(void)
+static int traffic_mngr_check_lso_ipv4(void)
+{
+ if (xmt_pktio_capa.lso.max_profiles == 0 || xmt_pktio_capa.lso.max_profiles_per_pktio == 0)
+ return ODP_TEST_INACTIVE;
+
+ if (xmt_pktio_capa.lso.proto.ipv4 == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void traffic_mngr_test_lso_ipv4(void)
+{
+ odp_tm_queue_t tm_queue;
+ pkt_info_t pkt_info;
+ uint32_t pkts_sent;
+ int32_t ret, expect;
+ uint32_t num_pkts = 20;
+ uint32_t pkt_len = 256;
+
+ /* Reuse shaper test node */
+ tm_queue = find_tm_queue(0, "node_1_1_1", 0);
+ CU_ASSERT_FATAL(tm_queue != ODP_TM_INVALID);
+
+ /* IPv4 / UDP packets */
+ init_xmt_pkts(&pkt_info);
+ pkt_info.drop_eligible = false;
+ pkt_info.pkt_class = 1;
+ pkt_info.use_ipv6 = 0;
+ pkt_info.use_tcp = 0;
+ CU_ASSERT_FATAL(make_pkts(num_pkts, pkt_len, &pkt_info) == 0);
+
+ pkts_sent = send_pkts_lso(tm_queue, num_pkts, ODP_LSO_PROTO_IPV4, pkt_len / 2);
+ CU_ASSERT(pkts_sent == num_pkts);
+
+ expect = 2 * pkts_sent;
+ ret = receive_loop(odp_tm_systems[0], rcv_pktin, expect, 1000 * ODP_TIME_MSEC_IN_NS);
+ CU_ASSERT(ret > 0);
+
+ num_rcv_pkts = ret;
+
+ /* As packet size is small, there should not be a reason to
+ * split each packet into more than two segments. */
+ CU_ASSERT(ret == expect)
+ if (ret != expect) {
+ ODPH_ERR("\nReceived %i packets, expected %i\n", ret, expect);
+
+ if (ret < 0)
+ num_rcv_pkts = 0;
+ }
+
+ flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
+ CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
+}
+
+static void traffic_mngr_test_destroy(void)
{
CU_ASSERT(destroy_tm_systems() == 0);
}
odp_testinfo_t traffic_mngr_suite[] = {
+ ODP_TEST_INFO(traffic_mngr_test_default_values),
ODP_TEST_INFO(traffic_mngr_test_capabilities),
ODP_TEST_INFO(traffic_mngr_test_tm_create),
ODP_TEST_INFO(traffic_mngr_test_shaper_profile),
ODP_TEST_INFO(traffic_mngr_test_sched_profile),
- ODP_TEST_INFO(traffic_mngr_test_threshold_profile),
- ODP_TEST_INFO(traffic_mngr_test_wred_profile),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_threshold_profile_byte,
+ traffic_mngr_check_thresholds_byte),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_threshold_profile_packet,
+ traffic_mngr_check_thresholds_packet),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_threshold_profile_byte_and_packet,
+ traffic_mngr_check_thresholds_byte_and_packet),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_wred_profile,
+ traffic_mngr_check_wred),
ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_shaper,
traffic_mngr_check_shaper),
ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_scheduler,
traffic_mngr_check_scheduler),
- ODP_TEST_INFO(traffic_mngr_test_thresholds),
- ODP_TEST_INFO(traffic_mngr_test_byte_wred),
- ODP_TEST_INFO(traffic_mngr_test_pkt_wred),
- ODP_TEST_INFO(traffic_mngr_test_query),
- ODP_TEST_INFO(traffic_mngr_test_marking),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_thresholds_byte,
+ traffic_mngr_check_thresholds_byte),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_thresholds_packet,
+ traffic_mngr_check_thresholds_packet),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_thresholds_byte_and_packet,
+ traffic_mngr_check_thresholds_byte_and_packet),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_byte_wred,
+ traffic_mngr_check_byte_wred),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_pkt_wred,
+ traffic_mngr_check_pkt_wred),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_query,
+ traffic_mngr_check_query),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_queue_stats,
+ traffic_mngr_check_queue_stats),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_vlan_marking,
+ traffic_mngr_check_vlan_marking),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_ecn_marking,
+ traffic_mngr_check_ecn_marking),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_drop_prec_marking,
+ traffic_mngr_check_drop_prec_marking),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_ecn_drop_prec_marking,
+ traffic_mngr_check_ecn_drop_prec_marking),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_tx_aging_no_drop,
+ traffic_mngr_check_tx_aging),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_tx_aging_drop,
+ traffic_mngr_check_tx_aging),
ODP_TEST_INFO(traffic_mngr_test_fanin_info),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_lso_ipv4, traffic_mngr_check_lso_ipv4),
ODP_TEST_INFO(traffic_mngr_test_destroy),
ODP_TEST_INFO_NULL,
};
@@ -4002,10 +5022,10 @@ odp_suiteinfo_t traffic_mngr_suites[] = {
ODP_SUITE_INFO_NULL
};
-int traffic_mngr_main(int argc, char *argv[])
+int main(int argc, char *argv[])
{
/* parse common options: */
- if (odp_cunit_parse_options(argc, argv))
+ if (odp_cunit_parse_options(&argc, argv))
return -1;
int ret = odp_cunit_register(traffic_mngr_suites);
@@ -4013,5 +5033,8 @@ int traffic_mngr_main(int argc, char *argv[])
if (ret == 0)
ret = odp_cunit_run();
+ /* Exit with 77 in order to indicate that test is skipped completely */
+ if (!ret && suite_inactive == (ODPH_ARRAY_SIZE(traffic_mngr_suites) - 1))
+ return 77;
return ret;
}